TMB/0000755000176200001440000000000014641250002010666 5ustar liggesusersTMB/MD50000644000176200001440000006203714641250002011206 0ustar liggesusers589d5ef09ff69016c76c16d93bfc7d63 *DESCRIPTION b9d03d6f35333b54381881701b594645 *NAMESPACE fa77584ce16155c46c5e69174c2effe8 *NEWS f1c4a54a9212313eb4ff384ab4b245a8 *R/TMB.R 95ecf54795d9cef881726f2b39afb8bd *R/abi_table.R 0a8a19e6bc5dd48d053735256258e36a *R/benchmark.R b8cc540513343dbfade07876fe7848b3 *R/checker.R 6ce0ba48512cfa7824baad58adc436c2 *R/config.R 0de18d8b332daf5a55af20a482d98855 *R/dotCall.R 9e8f5d2786cedbab6aba9ad3aa26b527 *R/examples.R 04ed8003df5448ec456a90e6ce07cc8f *R/gdbsource.R aa45c5c2da1bda046b923066ae9e4ed5 *R/graph_transform.R 17042171104125770fd9900126983ecb *R/header_tool.R 56a1295a4739058af563a19d03bc45d0 *R/normalize.R 2a6af00bd9af32a5fbfae722647b4358 *R/sdreport.R f0a8e9eeabfb6ba0886e510ffe17b1cf *R/setupRStudio.R 2d124db373ceaf3988cfd6bd53f0083c *R/tmbprofile.R 34f335bfe3adb0536565aa747f241ca9 *R/tmbroot.R 9aaf0db05e1e4f8f6705e21c97a86af7 *R/utils.R 8fb198e9443ddd5bbcc0ea60142bd5e0 *R/validation.R 9da441e35d3cd61578f9091b640a85be *R/zzz.R eb282d3bb0c3c979198c80bb8713b2aa *inst/CITATION 24a129cdf0e26106e1c4f0fa86e1561a *inst/COPYRIGHTS be8e72009af7b2c27b1aec05c34c67be *inst/examples/ar1xar1.R 7b3318922db5fc0e236db8286058d319 *inst/examples/ar1xar1.cpp b5eb0691a2f04a5e8a3a4ff00cfed546 *inst/examples/linreg_parallel.R 597ae40bbf47d8a3a688c9b8137295e9 *inst/examples/linreg_parallel.cpp cafc553645337e587a317b2b05a5addf *inst/examples/randomregression.R 9a3cad6f5006de7b29f4e81ef5d2b255 *inst/examples/randomregression.cpp 4a93414fd3a6eddaeb81e3d48980ed5e *inst/examples/simple.R 5bfe7e3858e94c1cbdfe82a19e2c56a0 *inst/examples/simple.cpp c16463bf03af3da3c2dfc6a78cf1ce16 *inst/include/EigenWarnings/DisableStupidWarnings d4f69ae8aa7da50bf3bb09b1fd598731 *inst/include/Rstream.hpp cd417f602236c02cbe36af28c7c152e6 *inst/include/TMB.hpp 0eaf7a635884892151adad37884575e9 *inst/include/TMBad/TMBad.cpp 7e607ccf607c167485fd542548859461 *inst/include/TMBad/TMBad.hpp 4d100ee9c1835ed1bc7d82d90e21290f *inst/include/TMBad/ad_blas.hpp 874a10de7bd0090b14fec2d7129f0c67 *inst/include/TMBad/checkpoint.hpp 25e8edd341c204ad49dbf2eff67fd13c *inst/include/TMBad/code_generator.hpp 20925597983a152bd719e16b1079abe2 *inst/include/TMBad/compile.hpp 05c6f4b2dff132d39560807329ab8f70 *inst/include/TMBad/compression.hpp 736ee93df9d8b6a817e750b513f4dfbb *inst/include/TMBad/config.hpp 11d7cb05b168cd082952ba8a2627b277 *inst/include/TMBad/eigen_numtraits.hpp 11d17faa33d4c5e7f6bead97c47c3ba0 *inst/include/TMBad/global.hpp 5e5751d29bf6657124c794c3c2df784a *inst/include/TMBad/graph2dot.hpp e3ea72e0669f0b68177acf2ab4ad1074 *inst/include/TMBad/graph_transform.hpp 11ed0f6676cccf23cd218a543dfe9b42 *inst/include/TMBad/integrate.hpp 78a2d08aa61056792cc75050c2c9fb88 *inst/include/TMBad/radix.hpp 84b60c784a28117d60040e3d3e5f4aa0 *inst/include/TMBad/tmbad_allow_comparison.hpp 36d27943c9a55a25bd2cfa3de32e2998 *inst/include/TMBad/vectorize.hpp 5f72b6e57eac7c7b51db3a459d7e2d5c *inst/include/Vectorize.hpp 1722ff25f1321ce2cc76bd1b211cde07 *inst/include/atomic_convolve.hpp 6c3c56ee7abc9018016414b4e73839c1 *inst/include/atomic_macro.hpp 6cccfc5826f53c22f3d8eeb28a30f869 *inst/include/atomic_math.hpp 315caa47e8fe25e4053348e4ac2b3b1b *inst/include/checkpoint_macro.hpp 1b4fb806a143a79d3034aa2080fddcd6 *inst/include/config.hpp 6e2a3b3bc4d28ec135dad5f39089c6be *inst/include/convenience.hpp 1ae134e358edf735638195edc679baa2 *inst/include/convert.hpp 23c76f5ff819f106b7b5a3016f6a671a *inst/include/cppad/CMakeLists.txt 496717792a2f73e7442031d80860f784 *inst/include/cppad/COPYING 5d6052d773ced6c5c8138e66b7cab3a9 *inst/include/cppad/CheckNumericType.h 8d98f263e9f82ebf25f24d54444923d1 *inst/include/cppad/CheckSimpleVector.h dad2f1fb33b3a0a9c93bf1874dab615c *inst/include/cppad/CppAD.h 46df8127d9421a01bf7fe7027499c6fd *inst/include/cppad/CppAD_vector.h 9154e443717b144ee790e8bd4b1323e6 *inst/include/cppad/ErrorHandler.h 2d9ad14fdecf71b16c0882dc2529f8e3 *inst/include/cppad/LuFactor.h 0bd5f2802b48a4142277798883d1dffb *inst/include/cppad/LuInvert.h ec2ebb8ce4e33508114c9f865aa4ea82 *inst/include/cppad/LuSolve.h 7a7f423e186258ddae3e478a179c21f0 *inst/include/cppad/NearEqual.h 16a2ac02d4c549854a7b0ce18bfde453 *inst/include/cppad/OdeErrControl.h 6005ef0520340949e6e066a4204fc642 *inst/include/cppad/OdeGear.h fcafd4a42bebd806a9fa7b6462b34ec4 *inst/include/cppad/OdeGearControl.h 4d7270af9ef7d398a772ce8cceb57172 *inst/include/cppad/Poly.h 25424409605f05fc5cb117bed1bd3d6b *inst/include/cppad/PowInt.h 03e9910055dcc470946a90107f5e6914 *inst/include/cppad/RombergMul.h 4d3f438b1f2143837ed44eb4307b9039 *inst/include/cppad/RombergOne.h 9a2ec1b57859c304343e6e3f021d3583 *inst/include/cppad/Rosen34.h 6a2f3d6a62a5bdc315cd5ab19f09679d *inst/include/cppad/Runge45.h 9ca037ee938eb8036a4e08acac107c9f *inst/include/cppad/SpeedTest.h 781b232202223cbc422e3a738fa52b8b *inst/include/cppad/TrackNewDel.h c80a1b18583bfd6690cec8b2f478b029 *inst/include/cppad/base_require.hpp 291c59145c092bfa82a14b593a7b905f *inst/include/cppad/check_numeric_type.hpp 771a416d8f5c5a3db3343c6deed02bf3 *inst/include/cppad/check_simple_vector.hpp 5831ca7865d39c62507b115baf41574c *inst/include/cppad/configure.hpp 94c272e5bbb3db47e806909edba907c1 *inst/include/cppad/configure.hpp.in 03b8c187d213b1839ba4a4dec588f66e *inst/include/cppad/cppad.hpp f9bbc2f39373fc2cbf671afcafc7f9f9 *inst/include/cppad/elapsed_seconds.hpp a9b2e205016efc2ba661a28a09b2ee6a *inst/include/cppad/error_handler.hpp 68bdebc518f96b42cfcddfe7b49dfba5 *inst/include/cppad/example/base_adolc.hpp bc0648038820b845e04d2e4c538b274d *inst/include/cppad/example/cppad_eigen.hpp 37abc01f634bef10b18fce5fa3fefa39 *inst/include/cppad/example/eigen_plugin.hpp 89a1dbd1cda07aa0ceaf044b0ba28d4c *inst/include/cppad/example/matrix_mul.hpp 2d3e19e821c782a620e1525d47134e53 *inst/include/cppad/index_sort.hpp 70a956ce3b8b513c5b09cc3facd4db60 *inst/include/cppad/ipopt/solve.hpp cd60277a86b8ff454e188db277882ad1 *inst/include/cppad/ipopt/solve_callback.hpp ef6a1013ce71a227b11062b303045b7f *inst/include/cppad/ipopt/solve_result.hpp c88b9d5474365427cc5f2a9eebd24bc4 *inst/include/cppad/local/abort_recording.hpp eec1a9defc13c98552e08f6f9a2e3fbe *inst/include/cppad/local/abs.hpp 1a3332a208ff41e694cc8cbaef61ddce *inst/include/cppad/local/abs_op.hpp cf01ae3aaf4c87042577345ddb7967bb *inst/include/cppad/local/acos_op.hpp b0d993c700b39d02a23a47242d9aa699 *inst/include/cppad/local/ad.hpp 6cde33273fbdb74e8fbbdd9e1097b1a5 *inst/include/cppad/local/ad_assign.hpp 0d123c4a3b24ac3c77c450033c80e766 *inst/include/cppad/local/ad_binary.hpp af2d3ab04501d9f2090a0ea5922f5f42 *inst/include/cppad/local/ad_ctor.hpp 8e4bbdcd5e7283837773d89f05d0917d *inst/include/cppad/local/ad_fun.hpp 0af4cdc336831e7908f8b151eb631efa *inst/include/cppad/local/ad_io.hpp 352bb27cce466c5edbb7ab6b3e16aba8 *inst/include/cppad/local/ad_tape.hpp 73a58f315f9c54a37ce2df195563f033 *inst/include/cppad/local/ad_valued.hpp 6fd78851b3babc00717837c119cb0153 *inst/include/cppad/local/add.hpp 86e901a5ab85584cd8c1c196b8dc35a1 *inst/include/cppad/local/add_eq.hpp bb5c71858915761c8c7d080f3cf42759 *inst/include/cppad/local/add_op.hpp 27442953cb225989ffcac1f80d948939 *inst/include/cppad/local/arithmetic.hpp 111ac01ea2d7f9cea50f8ac14ae63cc4 *inst/include/cppad/local/asin_op.hpp 2b46dd38d2cebfe8031f674fae74cd22 *inst/include/cppad/local/atan2.hpp b5427a29c029d459726364d1baa12c69 *inst/include/cppad/local/atan_op.hpp 95c71ea3874da7201004e0d376f4a400 *inst/include/cppad/local/atomic_base.hpp 53793bed6e5442051fae4eca9c962955 *inst/include/cppad/local/base_complex.hpp 0b5b87b3c8c04746c626a1de173278d3 *inst/include/cppad/local/base_cond_exp.hpp c0373e2d9f6d4f111d17317437b7289d *inst/include/cppad/local/base_double.hpp ffc714bd69d99d6ffd1a7d0029849b6d *inst/include/cppad/local/base_float.hpp e28defec4dd8a2cb716f66eb73386a35 *inst/include/cppad/local/base_std_math.hpp 937e397b55084d56be20d4dc57b48bb5 *inst/include/cppad/local/bender_quad.hpp a28c1dfb298ed706a1d8f79f16ecc284 *inst/include/cppad/local/bool_fun.hpp be88b1e9e125c5c8fe8322089d71faf6 *inst/include/cppad/local/bool_valued.hpp 6bf63c9b8214ec1f7f3a2e00e0218068 *inst/include/cppad/local/capacity_order.hpp 00e0fb03f1aecc6b13a0dd1c9fc20895 *inst/include/cppad/local/checkpoint.hpp dfb6a4d6219e01daab975c870d319f3c *inst/include/cppad/local/color_general.hpp ded869d847310cc2adc32c112e88ac3c *inst/include/cppad/local/color_symmetric.hpp 454520445e2ef19ca202049ebe5d9c9a *inst/include/cppad/local/comp_op.hpp 0c41bac89f01efad961e2464e3bccc2b *inst/include/cppad/local/compare.hpp 8cf6457c761b01577c405c13891f08fa *inst/include/cppad/local/compute_assign.hpp d703c3aa965322762a5d6b494eea52be *inst/include/cppad/local/cond_exp.hpp 4c765797a5187daa23eedba40f3ed4d4 *inst/include/cppad/local/cond_op.hpp ac24ca67731cca9aff7432c481f32c84 *inst/include/cppad/local/config.h.in f968c675c2b750101b0cc885308431bf *inst/include/cppad/local/convert.hpp 78a7bc936dbb8485f583913ed25bfeee *inst/include/cppad/local/cos_op.hpp 86471158b0bc289609b252771ae1a3d6 *inst/include/cppad/local/cosh_op.hpp b288df3c1d2697183988fe107fe3e528 *inst/include/cppad/local/cppad_assert.hpp 9424bf3f7950367f03efa1652288bff1 *inst/include/cppad/local/cppad_colpack.cpp a46a73e54a74593c85e3c375563ef4a1 *inst/include/cppad/local/cppad_colpack.hpp e1f1b8e85fd856c15650f96adb27e867 *inst/include/cppad/local/cskip_op.hpp b420b69a1dc6dd8768533b51442a5d5d *inst/include/cppad/local/csum_op.hpp 7e1b28a7cad1d4ba8c840a1e95b833da *inst/include/cppad/local/declare_ad.hpp 02a8e5aad9daccffcca39899b79dd1c5 *inst/include/cppad/local/define.hpp 8e41d324f9e0ed7caaf9606a42dd82cd *inst/include/cppad/local/dependent.hpp a6339e9bf1f4d33f5cab1eaf4aea4124 *inst/include/cppad/local/discrete.hpp a85b45c800d412831d738001372d7830 *inst/include/cppad/local/discrete_op.hpp 99584a30af00798e3ea75e39121a8755 *inst/include/cppad/local/div.hpp c8374d5cc22539151f53b73e7a642d66 *inst/include/cppad/local/div_eq.hpp 72233c1f5a7135ee53141a072ec18c52 *inst/include/cppad/local/div_op.hpp eca975875eea29c1b8ef8f6bcfe2f6c9 *inst/include/cppad/local/drivers.hpp 176cd862d506c585f9690f8d5125b008 *inst/include/cppad/local/epsilon.hpp fc851910d38f757106f659dec4e26568 *inst/include/cppad/local/equal_op_seq.hpp 4db246a15a8eafa2cb39443bc2a243aa *inst/include/cppad/local/erf.hpp 7d83e2e88f556909d8cd3a545beb6e79 *inst/include/cppad/local/erf_op.hpp e40c89d5f6f619934afb3684fa785efb *inst/include/cppad/local/exp_op.hpp 2fbe063f232e4c8e0cbfa543f6b120eb *inst/include/cppad/local/for_jac_sweep.hpp bf86f4c21e300818f2a86fe4cb56bd10 *inst/include/cppad/local/for_one.hpp 0ed10ffe128e9c1a40c10b6a12a5c5e8 *inst/include/cppad/local/for_sparse_jac.hpp 22366b2da11628c4738216934a78d80d *inst/include/cppad/local/for_two.hpp 52988261e60d5995bca6abf459ea0abd *inst/include/cppad/local/forward.hpp 304364d54200641c6718cf2efe8ea93d *inst/include/cppad/local/forward0sweep.hpp 4f6a829b7d443d6f960097e33974d8e1 *inst/include/cppad/local/forward1sweep.hpp bc8caef8e6aff0746c237e6f560aad22 *inst/include/cppad/local/forward2sweep.hpp f5d512374c2470d69fe2f929f6e1e0cc *inst/include/cppad/local/fun_check.hpp c92cc9ca499cc00ca4e58ec9684b5c18 *inst/include/cppad/local/fun_construct.hpp 06f54f960c97c710e0d46ee8e917ea70 *inst/include/cppad/local/fun_eval.hpp 77407c03bc58cfb90f02769132b79663 *inst/include/cppad/local/hash_code.hpp 91b1555e4bd4a8e66580e089b065f680 *inst/include/cppad/local/hessian.hpp c9b5310bb9f48a7456cdc041d59ff0f3 *inst/include/cppad/local/identical.hpp e6e7b9aedc7578b1ab267722f9a66eea *inst/include/cppad/local/independent.hpp 1ab79299fd1800d9edd749fc1d705c78 *inst/include/cppad/local/integer.hpp e0479bd04e2f5a9c7aa5d2cf1ded78f3 *inst/include/cppad/local/jacobian.hpp c1b8ea5f3320bff801367303c0914ae1 *inst/include/cppad/local/kasper.hpp 284616db995e67c7edf31ba987775bfb *inst/include/cppad/local/limits.hpp f5e1cffa1fb5bea7bb331d636265563e *inst/include/cppad/local/load_op.hpp b4130da7bbc583fa678f28d832659085 *inst/include/cppad/local/log_op.hpp 85b4eb57dd50583aba6ee7dcd942db82 *inst/include/cppad/local/lu_ratio.hpp ecef2fa272d930b83349f6874a8c76a4 *inst/include/cppad/local/math_other.hpp d92d5cabeeaacf789cb48a68318470f3 *inst/include/cppad/local/mul.hpp 1ee6d5693ef6b64b3f465fe6355a98c5 *inst/include/cppad/local/mul_eq.hpp 8c6961e38840e4575fa07ef7e7840228 *inst/include/cppad/local/mul_op.hpp ed7b9f14822bd934175b6106ab05c761 *inst/include/cppad/local/near_equal_ext.hpp 2cce792465cda30e310f3d71a3cc044f *inst/include/cppad/local/num_skip.hpp 4bae1f0e0e33d1285205c99650c8b73a *inst/include/cppad/local/old_atomic.hpp 13c422225b5fddc48352e70a6ed78b88 *inst/include/cppad/local/omp_max_thread.hpp e7151e209f184859ff318fae3d367bfd *inst/include/cppad/local/op.hpp 66e9c7d0f34e897397ab4960074f64e5 *inst/include/cppad/local/op_code.hpp 113f2fe796cc5317fb15d913cd8a1a2b *inst/include/cppad/local/opt_val_hes.hpp 71e5e0328a06564cb2229a0f12174a82 *inst/include/cppad/local/optimize.hpp 2d129dae34c7a475880068792f4a6ad0 *inst/include/cppad/local/ordered.hpp 341b2e6b6c1747631f8728f4211c1545 *inst/include/cppad/local/par_var.hpp b378cd0a66097439c0c2357f1f6e543a *inst/include/cppad/local/parallel_ad.hpp 4d8dba17a05f24c37da9d975007bdc26 *inst/include/cppad/local/parameter_op.hpp 29a5bcb0c19d7e6650bcce7c2f85d310 *inst/include/cppad/local/player.hpp 7fe1339760b2498a6b9354f3957d758e *inst/include/cppad/local/pod_vector.hpp 6ab45e1fb56b1052d9c8f125830df6bd *inst/include/cppad/local/pow.hpp bb31706c71bf5647da25ad0cd8f69b80 *inst/include/cppad/local/pow_op.hpp 09238dce0c659ff9780e3ca4b557f43f *inst/include/cppad/local/print_for.hpp 32adbc41bd2f54bd537f0da1b7a53cd7 *inst/include/cppad/local/print_op.hpp 3a5e9c965f11bc23b201ce31863951a6 *inst/include/cppad/local/prototype_op.hpp 8c2dc4650525a1cee08d5e8124d462ef *inst/include/cppad/local/recorder.hpp 6f1611076701e93832293b36edd14b64 *inst/include/cppad/local/rev_hes_sweep.hpp 042deede81928bc16dc464372c11e327 *inst/include/cppad/local/rev_jac_sweep.hpp 43118b714d743ea554a6d76f1d6dc8e8 *inst/include/cppad/local/rev_one.hpp 1adf05863d960e87bbd196e71a584eee *inst/include/cppad/local/rev_sparse_hes.hpp e5d6b5677c6c1963ea992d8c061bbe4a *inst/include/cppad/local/rev_sparse_jac.hpp 6bc82e2dc2d362c125004512eac2c0f0 *inst/include/cppad/local/rev_two.hpp 87bcf743ee22636e4560258375da75f2 *inst/include/cppad/local/reverse.hpp e6cf2edb4bb05f005ad70f2fd34602bb *inst/include/cppad/local/reverse_sweep.hpp e7c9373e5b1120b0130b21cae904c4cc *inst/include/cppad/local/sign.hpp 6d4c3a1f98b017a6f22d54bb276e7ce5 *inst/include/cppad/local/sign_op.hpp 7b1d1fe78ee4f44e77c2f3f88900ffa8 *inst/include/cppad/local/sin_op.hpp 324bcb33f11e83d922b10975ba8e533a *inst/include/cppad/local/sinh_op.hpp a26796365dd574e7ae1fb676b5b8caf0 *inst/include/cppad/local/sparse.hpp 3f1ef3459ccb2542aa8c13058df3ac7f *inst/include/cppad/local/sparse_binary_op.hpp 2f10857993b397b10504b5bdb318a89c *inst/include/cppad/local/sparse_hessian.hpp 1fab484b3732b2001a53a47eb8f7de12 *inst/include/cppad/local/sparse_jacobian.hpp ccc2397558b31950b043987c41869447 *inst/include/cppad/local/sparse_list.hpp 391c13cdee23d141c22f35874f5b4694 *inst/include/cppad/local/sparse_pack.hpp 9dde5bc4e71e383594f3ef8e2b277ec0 *inst/include/cppad/local/sparse_pattern.hpp 23b2dea1f727141958416c61a27882bd *inst/include/cppad/local/sparse_set.hpp 969faef671598b38dc25a3384783a07f *inst/include/cppad/local/sparse_unary_op.hpp 9a353eb97be219baab747d09ade344e1 *inst/include/cppad/local/sqrt_op.hpp a4f0bcd91eef71936bb272798fda88dc *inst/include/cppad/local/std_math_ad.hpp 3ad45dbba6cd7d00ae1f2d584d6e099f *inst/include/cppad/local/std_set.hpp 3fe52ac60b9d48d8cec2b7cccf0ddc4f *inst/include/cppad/local/store_op.hpp 8f44194dc81778d9202dcb6a2a4e8c24 *inst/include/cppad/local/sub.hpp eb72c8b56d5ecf4477e4abed680a415e *inst/include/cppad/local/sub_eq.hpp 0a3d9f0e77d2a98949ff46d65643e499 *inst/include/cppad/local/sub_op.hpp b2708f8bef32c00d87f04d83f493e428 *inst/include/cppad/local/tan_op.hpp 8237668f922bd7369d2584895b1c8fe5 *inst/include/cppad/local/tanh_op.hpp 150472f1cdbed489690c523a20bb0f0e *inst/include/cppad/local/tape_link.hpp 92c2f3706dd950962ffc66d9ff815b47 *inst/include/cppad/local/test_vector.hpp a171dcfed372f3bcc49f3abac7413d8a *inst/include/cppad/local/testvector.hpp e3b1058ad33e8abab3c57d7c74925503 *inst/include/cppad/local/unary_minus.hpp f8cb0595fae35df17e2a7637b60ab256 *inst/include/cppad/local/unary_plus.hpp 09af3b277558e4e14076215f202d7710 *inst/include/cppad/local/undef.hpp 533d5cff5289f7f00dd238f9d7d8e13b *inst/include/cppad/local/user_ad.hpp acc8b86c6362d5f8bc70a9bf05764157 *inst/include/cppad/local/value.hpp 348b4fd631b19db32162adcf382e12a0 *inst/include/cppad/local/var2par.hpp e8cf7335073abf5e871bc7d1368ee81c *inst/include/cppad/local/vec_ad.hpp 220b0b1c7d8bf92fd2f92cef6610093f *inst/include/cppad/lu_factor.hpp a6d79e1ec324331074a865e82f37718b *inst/include/cppad/lu_invert.hpp 762854320abacde390c79b2a9e148516 *inst/include/cppad/lu_solve.hpp 098edbc35d4fae23f2b2c6e7377c0826 *inst/include/cppad/memory_leak.hpp fe2cdee7022a4954acd05be4167344a4 *inst/include/cppad/nan.hpp 1486672de8de78fa69feb1ec3c33bd2e *inst/include/cppad/near_equal.hpp 7048ffdd93977c6b248e721ce93c2398 *inst/include/cppad/ode_err_control.hpp 3232493d20851c713d1a3f003ca19feb *inst/include/cppad/ode_gear.hpp 750a7eb9d7851e0e53f5bfefe10dab81 *inst/include/cppad/ode_gear_control.hpp bb34b7f5d995d2b63e6c67cf8826b567 *inst/include/cppad/omp_alloc.hpp 7ab3508e35cff8c47f2a65aa76dd7e09 *inst/include/cppad/poly.hpp ffdf6cd4728cabc91aaab750484a8ef1 *inst/include/cppad/pow_int.hpp 0d34303592d393571ffaf9489d03b601 *inst/include/cppad/romberg_mul.hpp 73cdf88ace1a19f1e28b592b56a7ccdd *inst/include/cppad/romberg_one.hpp 805719a78683f5fc1daf069bedbb638c *inst/include/cppad/rosen_34.hpp 271544538ce56d5b57740e39ef3ac0b1 *inst/include/cppad/runge_45.hpp 2244a061bcca4ee376f191420e4315fc *inst/include/cppad/speed/det_33.hpp aa4b016505e5e5c19a26641ffaf52990 *inst/include/cppad/speed/det_by_lu.hpp db1e7cd61a4da644509b254c4381bc30 *inst/include/cppad/speed/det_by_minor.hpp bbcc516c2cbc6cdf7923f5adbbea7616 *inst/include/cppad/speed/det_grad_33.hpp 031cbbe681eb322ac86d7f688ff817e8 *inst/include/cppad/speed/det_of_minor.hpp 98edf14ec6105d66729c4e590256c361 *inst/include/cppad/speed/mat_sum_sq.hpp ce73494269311e976a8137ed2e3043d3 *inst/include/cppad/speed/ode_evaluate.hpp d240210ad9e473480541fd8b97614be6 *inst/include/cppad/speed/sparse_hes_fun.hpp 82cb08c63266acd7e10d1263f7bed12b *inst/include/cppad/speed/sparse_jac_fun.hpp 34455739b0171ff3778fd719696068b0 *inst/include/cppad/speed/uniform_01.hpp 3a2c097c6b91a5f929bf6a4b5e0d6653 *inst/include/cppad/speed_test.hpp d69dcf21744dca401515265eb37300bb *inst/include/cppad/thread_alloc.hpp 3d363be95f48df10039e506ae4e0a3b0 *inst/include/cppad/time_test.hpp 6030d3bff0a09beefc6ed747866d1b20 *inst/include/cppad/track_new_del.hpp 8082ce868a2fb453e09a23d18a5beb3d *inst/include/cppad/vector.hpp 6e4cad3bc080c35149c2533771c7b426 *inst/include/distributions_R.hpp fb91d053ca670d87de3155c2a53149ab *inst/include/dnorm.hpp 7cb6ef0ce67d83de587c5f6b73a1e314 *inst/include/dynamic_data.hpp 71cecf3c88d0a06ce14bdb1c70d94079 *inst/include/expm.hpp 325cfa2093bb253bc680a62f23b12539 *inst/include/fft.hpp 426b135e048a69cc2486a23b943a406a *inst/include/lgamma.hpp d41d8cd98f00b204e9800998ecf8427e *inst/include/precompile.hpp ec06900458e1f836bdcdd282d36e4fff *inst/include/start_parallel.hpp b09fb12f2a0b7c8e64583af58e6614a5 *inst/include/tiny_ad/atomic.hpp 3abc7594697713ba497872af1d0151b3 *inst/include/tiny_ad/bessel/bessel.h 277b8d52540cf2a36aef0b113cabbd1f *inst/include/tiny_ad/bessel/bessel.hpp a7478f4abea9d29b00797bccf1d3fb2b *inst/include/tiny_ad/bessel/bessel_i.cpp c2f59868f147580996eec7b9c5772722 *inst/include/tiny_ad/bessel/bessel_j.cpp 40e32c410ab2613ebcbdaa488fd83bfc *inst/include/tiny_ad/bessel/bessel_k.cpp 6cd58da4f6e8cd4d6444403a0bbe92a1 *inst/include/tiny_ad/bessel/bessel_y.cpp bc43041037cf940fd58d27a01f5f99a3 *inst/include/tiny_ad/bessel/undefs.h 8cadcb150ab29a23c6744173e0852858 *inst/include/tiny_ad/beta/d1mach.cpp a1b9bc692c40cbb99ef68717f364c2dc *inst/include/tiny_ad/beta/dpq.h 1137c5808d2a3386b1c1bc323e996b45 *inst/include/tiny_ad/beta/i1mach.cpp 31b6ee7aafba5d9247a8bf7064f6f9c9 *inst/include/tiny_ad/beta/nmath.h 92209ca3053abcd475010015e737f0c8 *inst/include/tiny_ad/beta/pbeta.cpp 7aa5084cc84ff89ea16c72990f6f12f7 *inst/include/tiny_ad/beta/pbeta.hpp 13436e9bcd6f2badadcd158675f9ccc1 *inst/include/tiny_ad/beta/toms708.cpp 10bee0b4fe9c9e9add25d6c4b3a769b1 *inst/include/tiny_ad/beta/undefs.h 75bcbfba021021a2dd1ba5a7bbd7acca *inst/include/tiny_ad/compois/compois.hpp 87fb93a3b88ccb569e26ec31523e1ede *inst/include/tiny_ad/gamma/chebyshev.cpp 5414e74b84672ff1ff49d0917500e27f *inst/include/tiny_ad/gamma/gamma.cpp 7ceb076a5704a85a9bf7b3b2f60d25a9 *inst/include/tiny_ad/gamma/gamma.hpp f3b6489843704107641ce12c23ffbf90 *inst/include/tiny_ad/gamma/gamma_cody.cpp 1c7351509cc9cfd2402237a8deb3f151 *inst/include/tiny_ad/gamma/gammalims.cpp.not_used a7e97c5f9d542e34ef1260a963a4754d *inst/include/tiny_ad/gamma/lgamma.cpp e134dcdf5fa724b5c1d1dbdd03e4a910 *inst/include/tiny_ad/gamma/lgammacor.cpp 0bab865a845a214e5fd92bf4a93ffeca *inst/include/tiny_ad/gamma/stirlerr.cpp 6477636d15e69a8ffd1e9e3a209737b3 *inst/include/tiny_ad/gamma/undefs.h 8079597f39fea50c1bb5090dd3b331b8 *inst/include/tiny_ad/integrate/integrate.cpp 9319082a3bafaf97898d098d0a9a7605 *inst/include/tiny_ad/integrate/integrate.hpp ba0e5c9d4bcc8377403787e09dde1379 *inst/include/tiny_ad/mask.hpp bac55137b4e3a7a39ec2781f5357cc5c *inst/include/tiny_ad/robust/distributions.hpp 6adb743e17fbb4f52995536445a3f0d5 *inst/include/tiny_ad/tiny_ad/tiny_ad.hpp d5bd4e8231b4d9e9c908d52437e4b963 *inst/include/tiny_ad/tiny_ad/tiny_valarray.hpp bed275688206d95b0fd155d5c6b1bc96 *inst/include/tiny_ad/tiny_ad/tiny_vec.hpp 0a40a6854ee55c83099eae8c54db9831 *inst/include/tiny_ad/tweedie/tweedie.cpp 65aa74eb2b6c7aea9a23753325879929 *inst/include/tiny_ad/tweedie/tweedie.hpp cfe8c8bd691ed7163f9f5331649944d0 *inst/include/tmb_core.hpp a46fde809b02002b302587fc1ff337db *inst/include/tmb_enable_header_only.hpp 5ad83fb51d885eaf2583324cf8c55e5b *inst/include/tmb_enable_precompile.hpp cba5dbd26e532656b13f717613adda93 *inst/include/tmbad_atomic_macro.hpp 896864fb1ea451068388ea287479682a *inst/include/tmbutils/R_inla.hpp a033a5cedea4b808637d71b5960c859b *inst/include/tmbutils/array.hpp e859bdd25a03f386c71d3fb94a723cbc *inst/include/tmbutils/autodiff.hpp c96c599ba5f0e8bb0dd0893a367851fe *inst/include/tmbutils/concat.hpp c7bb7424b1db4fbde76a1945629336a9 *inst/include/tmbutils/density.hpp 045bd91a3126d15b835a27c7e1fd306a *inst/include/tmbutils/getListElement.hpp 8d67ef0679cfb44547e221feabbacb1f *inst/include/tmbutils/interpol.hpp 7ec47588c1dcad646f724f9b6dc07810 *inst/include/tmbutils/kronecker.hpp a5d52f1782b16620d8bbcd7dff948f52 *inst/include/tmbutils/matexp.hpp 7f5a12ab4ab3efd827aab62579999fcd *inst/include/tmbutils/newton.hpp 4bae9e452a2191da9bbd9e88cb133bf7 *inst/include/tmbutils/order.hpp ff02e0a781e0e5b9023229f8c1d9019b *inst/include/tmbutils/romberg.hpp 74165a766bb2d9fa7a2cc09fdbf7698e *inst/include/tmbutils/simplicial_inverse_subset.hpp 8f80ff444aa47613fb460a8e85a4188e *inst/include/tmbutils/sparse_matrix_exponential.hpp 820d43d49b901be4db059ee51e284a10 *inst/include/tmbutils/splines.hpp 3e607b30aa2b69d50a481d37f10cf5c1 *inst/include/tmbutils/spmat.hpp 5dd9f4493291044e64998b2ac61daf84 *inst/include/tmbutils/supernodal_inverse_subset.hpp e7be1be77ff4c334771b462114d6b52c *inst/include/tmbutils/tmbutils.hpp 47fffe15f21dde554416ea62818d8787 *inst/include/tmbutils/tmbutils_extra.hpp 234f0579fd7f4950be20a74c9ec69095 *inst/include/tmbutils/vector.hpp 1fd33ea45585479c27b20327ac2256f9 *inst/include/tmbutils/vectorize.hpp 852feac7e8101790d3de80c6f0371db5 *inst/include/toggle_thread_safe_R.hpp cac34e6a00e1d301fadd4c4e5f4744d3 *inst/template.cpp 585ff35e0e23841ed42e02e7a8d97d36 *man/FreeADFun.Rd fdbfdb249fd6f51b839f9f7aca5b86d1 *man/GK.Rd 5e05db54772d2e5a889ed7d96299ccbc *man/MakeADFun.Rd c1890fbd90543617a3dd7a37c7a111d8 *man/Rinterface.Rd f3cb41bb8a69add2ac41ef4130e288fe *man/SR.Rd 7af20f2f80c332927d3dd7f3d09c1f25 *man/TMB.Version.Rd 34a86ce50c73b59cc4bdb30169058536 *man/as.list.sdreport.Rd dba8487666ca309829273c6fda29ee86 *man/benchmark.Rd b3c3979d28fd2edeb9dd87cf5a56b8a7 *man/checkConsistency.Rd 443dff9ba715efff6085906c21a5df1e *man/compile.Rd e2ebe83f3d55f97fb97da7eba5481971 *man/config.Rd d6ae8026f7cbf442e7ef8cf7e4485913 *man/confint.tmbprofile.Rd ff112270c193b9e4a32bb2d8f81e1482 *man/dynlib.Rd bb279403622341fa8afdaff4020ea725 *man/gdbsource.Rd 2959d71fedaf6d61053a6af4b966fc4b *man/newton.Rd 2df5d692b73bc2abbdcef7aa31f2592a *man/newtonOption.Rd 5d42b2ff227a31661ae04fcddd2ad9b2 *man/normalize.Rd f80a1a2c1a57d7e157479d5e22fcc77b *man/oneStepPredict.Rd 4314d6fc66468b5dd45cd2b78a7dd7ad *man/openmp.Rd c724052dd366049d54bdbdd8e23b12a4 *man/plot.tmbprofile.Rd 953aeade34f36c16824a8318f494c7f2 *man/precompile.Rd 25ad4fde0a0b80fd88653f38fe0deadb *man/print.checkConsistency.Rd 3b5b5c89804a350f22a8633960d1a344 *man/print.sdreport.Rd d0fee90577a57ef202a879aed448911e *man/runExample.Rd bb9feb042c215118410ac6b97c2d245a *man/runSymbolicAnalysis.Rd 80399c6f7f069962f9bde3d63e0966bf *man/sdreport.Rd 06b97be5fa5240be0811e120593fbd7f *man/summary.checkConsistency.Rd d8a0d3277078f18136a86ebc15ff49b4 *man/summary.sdreport.Rd 9dd84471758f8d925c18d3744d72e71e *man/template.Rd 25ab1a0b79b4356fefa8780771d031f4 *man/tmbprofile.Rd 360c1a036606f6f1315e478a8e5559ab *man/tmbroot.Rd c29b28480a5bee583f9900da5d0adbbf *src/Makevars 9d60805278b72ef9f60edb396b85b77d *src/external_metis.c 412ff0f60a76105f734d3ac9445dd1cf *src/init.c befc9fe6d7fd47dbd1e1964357f17a0b *src/local_stubs.c 1a17f5100f394fe70f0b939d535346b5 *src/solve_subset.c b9f4aef376157c295074e9a9e0b514f2 *src/utils.c TMB/R/0000755000176200001440000000000014641202562011077 5ustar liggesusersTMB/R/graph_transform.R0000644000176200001440000000655414634646733014446 0ustar liggesusersTransformADFunObject <- function(ADFun, method, ... ) { .Call("TransformADFunObject", f = ADFun$ptr, control = list(method = as.character(method), ...), PACKAGE = ADFun$DLL) } ## Utility tape_print <- function(x, depth=0, method="tape", DLL=getUserDLL(), ...) { if (is.list(x)) x <- x$ptr control <- list(depth=as.integer(depth), method=as.character(method), ...) .Call("tmbad_print", x, control, PACKAGE=DLL) } op_table <- function(ADFun, name=TRUE, address=FALSE, input_size=FALSE, output_size=FALSE) { ntapes <- tape_print(ADFun, method="num_tapes", DLL=ADFun$DLL, i=as.integer(0)) ntapes <- max(1, ntapes) f <- function(i)tape_print(ADFun$ptr, method="op", DLL=ADFun$DLL, i=as.integer(i), name=as.integer(name), address=as.integer(address), input_size=as.integer(input_size), output_size=as.integer(output_size)) g <- function(i)data.frame(tape=i, opname=f(i), stringsAsFactors=FALSE) df <- do.call("rbind", lapply(seq_len(ntapes) - 1L, g)) table(opname = df$opname, tape = df$tape) } src_transform <- function(ADFun, flags = "-O3", ..., perm=TRUE) { if(.Platform$OS.type=="windows"){ ## Overload tempfile tempfile <- function(...){ ans <- base::tempfile(...) chartr("\\", "/", shortPathName(ans)) } } ntapes <- tape_print(ADFun, method="num_tapes", DLL=ADFun$DLL, i=as.integer(0)) ntapes <- max(1, ntapes) tapes <- seq.int(from=0, length.out=ntapes) control <- list(method="src") dll <- tempfile(fileext=paste0("_",tapes)) dll.cpp <- paste0(dll, ".cpp") ## Reorder graph if (perm) { TransformADFunObject( ADFun, method="reorder_sub_expressions", random_order=integer(0), max_period_size=1024L) } ## Write redefs forward <- paste0("forward", tapes) reverse <- paste0("reverse", tapes) redef <- function(i) { cat("#define forward", forward[i+1], "\n") cat("#define reverse", reverse[i+1], "\n") } ## Write source code for (i in tapes) { control$i <- i sink(dll.cpp[i+1]); redef(i); out <- .Call("tmbad_print", ADFun$ptr, control, PACKAGE = ADFun$DLL); sink(NULL) } ## Overload compile(dll.cpp, flags=flags, ..., libtmb=FALSE) dyn.load(dynlib(dll)[1]) dllinfo <- getLoadedDLLs()[[basename(dll[1])]] forward_compiled <- lapply(forward, function(x)getNativeSymbolInfo(x,PACKAGE=dllinfo)$address) reverse_compiled <- lapply(reverse, function(x)getNativeSymbolInfo(x,PACKAGE=dllinfo)$address) TransformADFunObject( ADFun, method="set_compiled", forward_compiled=forward_compiled, reverse_compiled=reverse_compiled) ## Unload compiled code when no longer needed finalizer <- function(ptr) { dyn.unload(dynlib(dll[1])) file.remove(dynlib(dll[1])) file.remove(paste0(dll, ".o")) file.remove(dll.cpp) } reg.finalizer(ADFun$ptr, finalizer) NULL } TMB/R/sdreport.R0000644000176200001440000005702414634646733013112 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' After optimization of an AD model, \code{sdreport} is used to ##' calculate standard deviations of all model parameters, including ##' non linear functions of random effects and parameters specified ##' through the ADREPORT() macro from the user template. ##' ##' First, the Hessian wrt. the parameter vector (\eqn{\theta}) is ##' calculated. The parameter covariance matrix is approximated by ##' \deqn{V(\hat\theta)=-\nabla^2 l(\hat\theta)^{-1}} where \eqn{l} ##' denotes the log likelihood function (i.e. \code{-obj$fn}). If ##' \code{ignore.parm.uncertainty=TRUE} then the Hessian calculation ##' is omitted and a zero-matrix is used in place of ##' \eqn{V(\hat\theta)}. ##' ##' For non-random effect models the standard delta-method is used to ##' calculate the covariance matrix of transformed parameters. Let ##' \eqn{\phi(\theta)} denote some non-linear function of ##' \eqn{\theta}. Then \deqn{V(\phi(\hat\theta))\approx \nabla\phi ##' V(\hat\theta) \nabla\phi'} ##' ##' The covariance matrix of reported variables ##' \eqn{V(\phi(\hat\theta))} is returned by default. This can cause ##' high memory usage if many variables are ADREPORTed. Use ##' \code{getReportCovariance=FALSE} to only return standard errors. ##' In case standard deviations are not required one can completely skip ##' the delta method using \code{skip.delta.method=TRUE}. ##' ##' For random effect models a generalized delta-method is used. First ##' the joint covariance of random effect and parameter estimation error is approximated ##' by ##' \deqn{V \left( \begin{array}{cc} \hat u - u \cr \hat\theta - \theta \end{array} \right) \approx ##' \left( \begin{array}{cc} H_{uu}^{-1} & 0 \cr 0 & 0 \end{array} \right) + ##' J V(\hat\theta) J' ##' } ##' where \eqn{H_{uu}} denotes random effect block of the full joint ##' Hessian of \code{obj$env$f} and \eqn{J} denotes the Jacobian of ##' \eqn{\left( \begin{array}{cc}\hat u(\theta) \cr \theta \end{array} \right)} wrt. \eqn{\theta}. ##' Here, the first term represents the expected conditional variance ##' of the estimation error given the data and the second term represents the variance ##' of the conditional mean of the estimation error given the data. ##' ##' Now the delta method can be applied on a general non-linear ##' function \eqn{\phi(u,\theta)} of random effects \eqn{u} and ##' parameters \eqn{\theta}: ##' \deqn{V\left(\phi(\hat u,\hat\theta) - \phi(u,\theta) \right)\approx \nabla\phi V \left( \begin{array}{cc} ##' \hat u - u \cr \hat\theta - \theta \end{array} \right) \nabla\phi'} ##' ##' The full joint covariance is not returned by default, because it ##' may require large amounts of memory. It may be obtained by ##' specifying \code{getJointPrecision=TRUE}, in which case \eqn{V ##' \left( \begin{array}{cc} \hat u - u \cr \hat\theta - \theta \end{array} \right) ^{-1} } will be part of the ##' output. This matrix must be manually inverted using ##' \code{solve(jointPrecision)} in order to get the joint covariance ##' matrix. Note, that the parameter order will follow the original ##' order (i.e. \code{obj$env$par}). ##' ##' Using \eqn{\phi(\hat u,\theta)} as estimator of ##' \eqn{\phi(u,\theta)} may result in substantial bias. This may be ##' the case if either \eqn{\phi} is non-linear or if the distribution ##' of \eqn{u} given \eqn{x} (data) is sufficiently non-symmetric. A ##' generic correction is enabled with \code{bias.correct=TRUE}. It is ##' based on the identity ##' \deqn{E_{\theta}[\phi(u,\theta)|x] = ##' \partial_\varepsilon\left(\log \int \exp(-f(u,\theta) + ##' \varepsilon \phi(u,\theta))\:du\right)_{|\varepsilon=0}} ##' stating that the conditional expectation can be written as a ##' marginal likelihood gradient wrt. a nuisance parameter ##' \eqn{\varepsilon}. ##' The marginal likelihood is replaced by its Laplace approximation. ##' ##' If \code{bias.correct.control$sd=TRUE} the variance of the ##' estimator is calculated using ##' \deqn{V_{\theta}[\phi(u,\theta)|x] = ##' \partial_\varepsilon^2\left(\log \int \exp(-f(u,\theta) + ##' \varepsilon \phi(u,\theta))\:du\right)_{|\varepsilon=0}} ##' A further correction is added to this variance to account for the ##' effect of replacing \eqn{\theta} by the MLE \eqn{\hat\theta} ##' (unless \code{ignore.parm.uncertainty=TRUE}). ##' ##' Bias correction can be be performed in chunks in order to reduce ##' memory usage or in order to only bias correct a subset of ##' variables. First option is to pass a list of indices as ##' \code{bias.correct.control$split}. E.g. a list ##' \code{list(1:2,3:4)} calculates the first four ADREPORTed ##' variables in two chunks. ##' The internal function \code{obj$env$ADreportIndex()} ##' gives an overview of the possible indices of ADREPORTed variables. ##' ##' Second option is to pass the number of ##' chunks as \code{bias.correct.control$nsplit} in which case all ##' ADREPORTed variables are bias corrected in the specified number of ##' chunks. ##' Also note that \code{skip.delta.method} may be necessary when bias ##' correcting a large number of variables. ##' ##' @title General sdreport function. ##' @param obj Object returned by \code{MakeADFun} ##' @param par.fixed Optional. Parameter estimate (will be known to \code{obj} when an optimization has been carried out). ##' @param hessian.fixed Optional. Hessian wrt. parameters (will be calculated from \code{obj} if missing). ##' @param getJointPrecision Optional. Return full joint precision matrix of random effects and parameters? ##' @param bias.correct logical indicating if bias correction should be applied ##' @param bias.correct.control a \code{list} of bias correction options; currently \code{sd}, \code{split} and \code{nsplit} are used - see details. ##' @param ignore.parm.uncertainty Optional. Ignore estimation variance of parameters? ##' @param getReportCovariance Get full covariance matrix of ADREPORTed variables? ##' @param skip.delta.method Skip the delta method? (\code{FALSE} by default) ##' @return Object of class \code{sdreport} ##' @seealso \code{\link{summary.sdreport}}, \code{\link{print.sdreport}}, \code{\link{as.list.sdreport}} ##' @examples ##' \dontrun{ ##' runExample("linreg_parallel", thisR = TRUE) ## Non-random effect example ##' sdreport(obj) } ##' ##' runExample("simple", thisR = TRUE) ## Random effect example ##' rep <- sdreport(obj) ##' summary(rep, "random") ## Only random effects ##' summary(rep, "fixed", p.value = TRUE) ## Only non-random effects ##' summary(rep, "report") ## Only report ##' ##' ## Bias correction ##' rep <- sdreport(obj, bias.correct = TRUE) ##' summary(rep, "report") ## Include bias correction sdreport <- function(obj,par.fixed=NULL,hessian.fixed=NULL,getJointPrecision=FALSE,bias.correct=FALSE, bias.correct.control=list(sd=FALSE, split=NULL, nsplit=NULL), ignore.parm.uncertainty = FALSE, getReportCovariance=TRUE, skip.delta.method=FALSE){ if(is.null(obj$env$ADGrad) & (!is.null(obj$env$random))) stop("Cannot calculate sd's without type ADGrad available in object for random effect models.") ## Make object to calculate ADREPORT vector obj2 <- MakeADFun(obj$env$data, obj$env$parameters, type = "ADFun", ADreport = TRUE, DLL = obj$env$DLL, silent = obj$env$silent) r <- obj$env$random ## Get full parameter (par), Fixed effects parameter (par.fixed) ## and fixed effect gradient (gradient.fixed) if(is.null(par.fixed)){ ## Parameter estimate not specified - use best encountered parameter par <- obj$env$last.par.best if(!is.null(r))par.fixed <- par[-r] else par.fixed <- par gradient.fixed <- obj$gr(par.fixed) } else { gradient.fixed <- obj$gr(par.fixed) ## <-- updates last.par par <- obj$env$last.par } ## In case of empty parameter vector: if(length(par.fixed)==0) ignore.parm.uncertainty <- TRUE ## Get Hessian wrt. fixed effects (hessian.fixed) and check if positive definite (pdHess). if(ignore.parm.uncertainty){ hessian.fixed <- NULL pdHess <- TRUE Vtheta <- matrix(0, length(par.fixed), length(par.fixed)) } else { if(is.null(hessian.fixed)){ hessian.fixed <- optimHess(par.fixed,obj$fn,obj$gr) ## Marginal precision of theta. } pdHess <- !is.character(try(chol(hessian.fixed),silent=TRUE)) Vtheta <- try(solve(hessian.fixed),silent=TRUE) if(is(Vtheta, "try-error")) Vtheta <- hessian.fixed * NaN } ## Get random effect block of the full joint Hessian (hessian.random) and its ## Cholesky factor (L) if(!is.null(r)){ hessian.random <- obj$env$spHess(par,random=TRUE) ## Conditional prec. of u|theta L <- obj$env$L.created.by.newton if(!is.null(L)){ ## Re-use symbolic factorization if exists updateCholesky(L,hessian.random) hessian.random@factors <- list(SPdCholesky=L) } } ## Get ADreport vector (phi) phi <- try(obj2$fn(par), silent=TRUE) ## NOTE_1: obj2 forward sweep now initialized ! if(is.character(phi) | length(phi)==0){ phi <- numeric(0) } ADGradForward0Initialized <- FALSE ADGradForward0Initialize <- function() { ## NOTE_2: ADGrad forward sweep now initialized ! obj$env$f(par, order = 0, type = "ADGrad") ADGradForward0Initialized <<- TRUE } doDeltaMethod <- function(chunk=NULL){ ## ======== Determine case ## If no random effects use standard delta method simpleCase <- is.null(r) if(length(phi)==0){ ## Nothing to report simpleCase <- TRUE } else { ## Something to report - get derivatives if(is.null(chunk)){ ## Do all at once Dphi <- obj2$gr(par) } else { ## Do *chunk* only ## Reduce to Dphi[chunk,] and phi[chunk] w <- rep(0, length(phi)) phiDeriv <- function(i){ w[i] <- 1 obj2$env$f(par, order=1, rangeweight=w, doforward=0) ## See NOTE_1 } Dphi <- t( sapply(chunk, phiDeriv) ) phi <- phi[chunk] } if(!is.null(r)){ Dphi.random <- Dphi[,r,drop=FALSE] Dphi.fixed <- Dphi[,-r,drop=FALSE] if(all(Dphi.random==0)){ ## Fall back to simple case simpleCase <- TRUE Dphi <- Dphi.fixed } } } ## ======== Do delta method ## Get covariance (cov) if(simpleCase){ if(length(phi)>0){ cov <- Dphi %*% Vtheta %*% t(Dphi) } else cov <- matrix(,0,0) } else { tmp <- solve(hessian.random,t(Dphi.random)) tmp <- as.matrix(tmp) term1 <- Dphi.random%*%tmp ## first term. if(ignore.parm.uncertainty){ term2 <- 0 } else { ## Use columns of tmp as direction for reverse mode sweep f <- obj$env$f w <- rep(0, length(par)) if(!ADGradForward0Initialized) ADGradForward0Initialize() reverse.sweep <- function(i){ w[r] <- tmp[,i] -f(par, order = 1, type = "ADGrad", rangeweight = w, doforward=0)[-r] } A <- t(do.call("cbind",lapply(seq_along(phi), reverse.sweep))) + Dphi.fixed term2 <- A %*% (Vtheta %*% t(A)) ## second term } cov <- term1 + term2 } ##list(phi=phi, cov=cov) cov } if (!skip.delta.method) { if (getReportCovariance) { ## Get all cov <- doDeltaMethod() sd <- sqrt(diag(cov)) } else { tmp <- lapply(seq_along(phi), doDeltaMethod) sd <- sqrt(as.numeric(unlist(tmp))) cov <- NA } } else { sd <- rep(NA, length(phi)) cov <- NA } ## Output ans <- list(value=phi,sd=sd,cov=cov,par.fixed=par.fixed, cov.fixed=Vtheta,pdHess=pdHess, gradient.fixed=gradient.fixed) ## ======== Calculate bias corrected random effects estimates if requested if(bias.correct){ epsilon <- rep(0,length(phi)) names(epsilon) <- names(phi) parameters <- obj$env$parameters parameters$TMB_epsilon_ <- epsilon ## Appends to list without changing attributes doEpsilonMethod <- function(chunk = NULL) { if(!is.null(chunk)) { ## Only do *chunk* mapfac <- rep(NA, length(phi)) mapfac[chunk] <- chunk parameters$TMB_epsilon_ <- updateMap(parameters$TMB_epsilon_, factor(mapfac) ) } obj3 <- MakeADFun(obj$env$data, parameters, random = obj$env$random, checkParameterOrder = FALSE, DLL = obj$env$DLL, silent = obj$env$silent) ## Get good initial parameters obj3$env$start <- c(par, epsilon) obj3$env$random.start <- expression(start[random]) ## Test if Hessian pattern is un-changed h <- obj$env$spHess(random=TRUE) h3 <- obj3$env$spHess(random=TRUE) pattern.unchanged <- identical(h@i,h3@i) & identical(h@p,h3@p) ## If pattern un-changed we can re-use symbolic Cholesky: if(pattern.unchanged){ if(!obj$env$silent) cat("Re-using symbolic Cholesky\n") obj3$env$L.created.by.newton <- L } else { if( .Call("have_tmb_symbolic", PACKAGE = "TMB") ) runSymbolicAnalysis(obj3) } if(!is.null(chunk)) epsilon <- epsilon[chunk] par.full <- c(par.fixed, epsilon) i <- (1:length(par.full)) > length(par.fixed) ## epsilon indices grad <- obj3$gr(par.full) Vestimate <- if(bias.correct.control$sd) { ## requireNamespace("numDeriv") hess <- numDeriv::jacobian(obj3$gr, par.full) -hess[i,i] + hess[i,!i] %*% Vtheta %*% hess[!i,i] } else matrix(NA) estimate <- grad[i] names(estimate) <- names(epsilon) list(value=estimate, sd=sqrt(diag(Vestimate)), cov=Vestimate) } nsplit <- bias.correct.control$nsplit if(is.null(nsplit)) { split <- bias.correct.control$split } else { split <- split(seq_along(phi), cut(seq_along(phi), nsplit)) } if( is.null( split ) ){ ## Get all ans$unbiased <- doEpsilonMethod() } else { tmp <- lapply(split, doEpsilonMethod) m <- if (bias.correct.control$sd) length(phi) else 1 ans$unbiased <- list(value = rep(NA, length(phi)), sd = rep(NA, m), cov = matrix(NA, m, m)) for(i in seq_along(split)) { ans$unbiased$value[ split[[i]] ] <- tmp[[i]]$value if (bias.correct.control$sd) { ans$unbiased$sd [ split[[i]] ] <- tmp[[i]]$sd ans$unbiased$cov [ split[[i]], split[[i]] ] <- tmp[[i]]$cov } } } } ## ======== Find marginal variances of all random effects i.e. phi(u,theta)=u if(!is.null(r)){ if(is(L,"dCHMsuper")){ ## Required by inverse subset algorithm diag.term1 <- solveSubset(L=L, diag=TRUE) if(ignore.parm.uncertainty){ diag.term2 <- 0 } else { f <- obj$env$f w <- rep(0, length(par)) if(!ADGradForward0Initialized) ADGradForward0Initialize() reverse.sweep <- function(i){ w[i] <- 1 f(par, order = 1, type = "ADGrad", rangeweight = w, doforward=0)[r] } nonr <- setdiff(seq_along(par), r) framework <- .Call("getFramework", PACKAGE=obj$env$DLL) if (framework != "TMBad") tmp <- sapply(nonr,reverse.sweep) else tmp <- f(par, order = 1, type = "ADGrad", keepx=nonr, keepy=r) ## TMBad only !!! if(!is.matrix(tmp)) ## Happens if length(r)==1 tmp <- matrix(tmp, ncol=length(nonr) ) A <- solve(hessian.random, tmp) diag.term2 <- rowSums((A %*% Vtheta)*A) } ans$par.random <- par[r] ans$diag.cov.random <- diag.term1 + diag.term2 if(getJointPrecision){ ## Get V(u,theta)^-1 if(length(par.fixed) == 0) { ans$jointPrecision <- hessian.random } else if (!ignore.parm.uncertainty) { G <- hessian.random %*% A G <- as.matrix(G) ## Avoid Matrix::cbind2('dsCMatrix','dgeMatrix') M1 <- cbind2(hessian.random,G) M2 <- cbind2(t(G), as.matrix(t(A)%*%G)+hessian.fixed ) M <- rbind2(M1,M2) M <- forceSymmetric(M,uplo="L") dn <- c(names(par)[r],names(par[-r])) dimnames(M) <- list(dn,dn) p <- invPerm(c(r,(1:length(par))[-r])) ans$jointPrecision <- M[p,p] } else { warning("ignore.parm.uncertainty ==> No joint precision available") } } } else { warning("Could not report sd's of full randomeffect vector.") } } ## Copy a few selected members of the environment 'env'. In ## particular we need the 'skeleton' objects that allow us to put ## results back in same shape as original parameter list. ans$env <- new.env(parent = emptyenv()) ans$env$parameters <- obj$env$parameters ans$env$random <- obj$env$random ans$env$ADreportDims <- obj2$env$ADreportDims class(ans) <- "sdreport" ans } ##' Extract parameters, random effects and reported variables along ##' with uncertainties and optionally Chi-square statistics. Bias ##' corrected quantities are added as additional columns if available. ##' ##' @title summary tables of model parameters ##' @param object Output from \code{\link{sdreport}} ##' @param select Parameter classes to select. Can be any subset of ##' \code{"fixed"} (\eqn{\hat\theta}), \code{"random"} (\eqn{\hat u}) or ##' \code{"report"} (\eqn{\phi(\hat u,\hat\theta)}) using notation as ##' \code{\link{sdreport}}. ##' @param p.value Add column with approximate p-values ##' @param ... Not used ##' @return matrix ##' @method summary sdreport ##' @S3method summary sdreport summary.sdreport <- function(object, select = c("all", "fixed", "random", "report"), p.value=FALSE, ...) { select <- match.arg(select, several.ok = TRUE)# *several* : e.g. c("fixed", "report") ## check if 'meth' (or "all") is among the 'select'ed ones : s.has <- function(meth) any(match(c(meth, "all"), select, nomatch=0L)) > 0L ans1 <- ans2 <- ans3 <- NULL if(s.has("fixed")) ans1 <- cbind(object$par.fixed, sqrt(diag(object$cov.fixed))) if(s.has("random")) ans2 <- cbind(object$par.random, sqrt(as.numeric(object$diag.cov.random))) if(s.has("report")) ans3 <- cbind(object$value, object$sd) ans <- rbind(ans1, ans2, ans3) if(s.has("report")) { ans4 <- cbind("Est. (bias.correct)" = object$unbiased$value, "Std. (bias.correct)" = object$unbiased$sd) if(!is.null(ans4)) ans <- cbind(ans, rbind(NA * ans1, NA * ans2, ans4)) } if(length(ans) && ncol(ans) > 0) { colnames(ans)[1:2] <- c("Estimate", "Std. Error") if(p.value) { ans <- cbind(ans, "z value" = (z <- ans[,"Estimate"] / ans[,"Std. Error"])) ans <- cbind(ans, "Pr(>|z^2|)" = pchisq(z^2, df=1, lower.tail=FALSE)) } } else warning("no or empty summary selected via 'select = %s'", deparse(select)) ans } ##' Print parameter estimates and give convergence diagnostic based on ##' gradient and Hessian. ##' ##' @title Print brief model summary ##' @param x Output from \code{\link{sdreport}} ##' @param ... Not used ##' @return NULL ##' @method print sdreport ##' @S3method print sdreport print.sdreport <- function(x, ...) { cat("sdreport(.) result\n") print(summary(x, "fixed")) if(!x$pdHess) { cat("Warning:\nHessian of fixed effects was not positive definite.\n") } cat("Maximum gradient component:", max(abs(x$gradient.fixed)),"\n") invisible(x) } ##' Get estimated parameters or standard errors in the same shape as ##' the original parameter list. ##' ##' This function converts the selected column \code{what} of ##' \code{summary(x, select = c("fixed", "random"), ...)} to the same ##' format as the original parameter list (re-ordered as the template ##' parameter order). The argument \code{what} is partially matched ##' among the column names of the summary table. The actual match is ##' added as an attribute to the output. ##' ##' @title Convert estimates to original list format. ##' @param x Output from \code{\link{sdreport}}. ##' @param what Select what to convert (Estimate / Std. Error). ##' @param report Get AD reported variables rather than model parameters ? ##' @param ... Passed to \code{\link{summary.sdreport}}. ##' @return List of same shape as original parameter list. ##' @method as.list sdreport ##' @S3method as.list sdreport ##' @examples ##' \dontrun{ ##' example(sdreport) ##' ##' ## Estimates as a parameter list: ##' as.list(rep, "Est") ##' ##' ## Std Errors in the same list format: ##' as.list(rep, "Std") ##' ##' ## p-values in the same list format: ##' as.list(rep, "Pr", p.value=TRUE) ##' ##' ## AD reported variables as a list: ##' as.list(rep, "Estimate", report=TRUE) ##' ##' ## Bias corrected AD reported variables as a list: ##' as.list(rep, "Est. (bias.correct)", report=TRUE) ##' } as.list.sdreport <- function(x, what = "", report=FALSE, ...) { if (what == "") return (x) if (!report) { ans <- x$env$parameters random <- x$env$random par <- numeric(length(x$par.fixed) + length(x$par.random)) fixed <- rep(TRUE, length(par)) if(length(random)>0) fixed[random] <- FALSE ## Possible choices opts <- colnames( summary(x, select = c("fixed", "random"), ...) ) what <- match.arg(what, opts) if( any( fixed ) ) par[ fixed ] <- summary(x, select = "fixed", ...)[ , what] if( any(!fixed ) ) par[!fixed ] <- summary(x, select = "random", ...)[ , what] ## Workaround utils::relist bug (?) for empty list items nonemp <- sapply(ans, function(x)length(x) > 0) nonempindex <- which(nonemp) skeleton <- as.relistable(ans[nonemp]) li <- relist(par, skeleton) reshape <- function(x){ if(is.null(attr(x,"map"))) return(x) y <- attr(x,"shape") ## Handle special case where parameters are mapped to a fixed ## value if (what != "Estimate") { y[] <- NA } f <- attr(x,"map") i <- which(f >= 0) y[i] <- x[f[i] + 1L] y } for(i in seq(skeleton)){ ans[[nonempindex[i]]][] <- as.vector(li[[i]]) } for(i in seq(ans)){ ans[[i]] <- reshape(ans[[i]]) } } else { ## Reported variables ## Possible choices opts <- colnames( summary(x, select = "report", ...) ) what <- match.arg(what, opts) par <- summary(x, select = "report", ...)[ , what] skeleton <- lapply(x$env$ADreportDims, function(dim) array(NA, dim)) skeleton <- as.relistable(skeleton) ans <- relist(par, skeleton) ## Not keeping array dims ! ans <- Map(array, ans, x$env$ADreportDims) class(ans) <- NULL } attr(ans, "check.passed") <- NULL attr(ans, "what") <- what ans } TMB/R/tmbroot.R0000644000176200001440000001247514634646733012737 0ustar liggesusers##' Compute likelihood profile confidence intervals of a TMB object by root-finding ##' in contrast to \code{\link{tmbprofile}}, which tries to compute ##' somewhat equally spaced values along the likelihood profile (which ##' is useful for visualizing the shape of the likelihood surface), ##' and then (via \code{\link{confint.tmbprofile}}) extracting a ##' critical value by linear interpolation, ##' ##' @title Compute likelihood profile confidence intervals of a TMB object by root-finding ##' @inheritParams tmbprofile ##' @param target desired deviation from minimum log-likelihood. Default ##' is set to retrieve the 95% likelihood profile confidence interval, ##' if the objective function is a negative log-likelihood function ##' @param parm.range lower and upper limits; if \code{NA}, ##' a value will be guessed based on the parameter value and \code{sd.range} ##' @param sd.range in the absence of explicit \code{parm.range} values, ##' the range chosen will be the parameter value plus or minus \code{sd.range} ##' times the corresponding standard deviation. ##' May be specified as a two-element vector for different ranges below and ##' above the parameter value. ##' @param trace report information? ##' @param continuation use continuation method, i.e. set starting parameters for non-focal parameters to solutions from previous fits? ##' @return a two-element numeric vector containing the lower and upper limits (or \code{NA} if the target is not achieved in the range), with an attribute giving the total number of function iterations used ##' @examples ##' \dontrun{ ##' runExample("simple",thisR=TRUE) ##' logsd0.ci <- tmbroot(obj,"logsd0") ##' } tmbroot <- function (obj, name, target=0.5*qchisq(0.95,df=1), lincomb, parm.range = c(NA,NA), sd.range = 7, trace = FALSE, continuation = FALSE) { ## continuation method works well for profiling, where ## each fit starts "close" to previous values, but may be ## counterproductive for root-finding, when we are jumping back ## and forth ... restore.on.exit <- c("last.par.best", "random.start", "value.best", "last.par", "inner.control", "tracemgc") oldvars <- sapply(restore.on.exit, get, envir = obj$env, simplify = FALSE) restore.oldvars <- function() { for (var in names(oldvars)) assign(var, oldvars[[var]], envir = obj$env) } on.exit(restore.oldvars()) par <- obj$env$last.par.best if (!is.null(obj$env$random)) par <- par[-obj$env$random] if (missing(lincomb)) { if (missing(name)) stop("No 'name' or 'lincomb' specified") stopifnot(length(name) == 1) if (is.numeric(name)) { lincomb <- as.numeric(1:length(par) == name) name <- names(par)[name] } else if (is.character(name)) { if (sum(names(par) == name) != 1) stop("'name' is not unique") lincomb <- as.numeric(names(par) == name) } else stop("Invalid name argument") } else { if (missing(name)) name <- "parameter" } stopifnot(length(lincomb) == length(par)) X <- Diagonal(length(lincomb)) i <- which(lincomb != 0)[1] X[i, ] <- lincomb invX <- solve(X) direction <- invX[, i] C <- invX[, -i, drop = FALSE] that <- sum(lincomb * par) f <- function(x) { par <- par + x * direction if (length(C)==0) { return(obj$fn(par)) } newfn <- function(par0) { par <- par + as.vector(C %*% par0) obj$fn(par) } newgr <- function(par0) { par <- par + as.vector(C %*% par0) as.vector(obj$gr(par) %*% C) } obj$env$value.best <- Inf obj$env$inner.control$trace <- FALSE obj$env$tracemgc <- FALSE control <- list(step.min = 0.001) ans <- nlminb(start, newfn, newgr, control = control) if (continuation) start <<- ans$par conv <<- ans$convergence if (trace) cat("Profile value:", ans$objective, "\n") ans$objective } f.original <- f f <- function(x) { y <- try(f.original(x), silent = TRUE) if (is(y, "try-error")) y <- NA y } g <- function(x) { return(f(x)-v.0-target) } if (any(is.na(parm.range))) { sds <- sdreport(obj) sd0 <- drop(sqrt(lincomb %*% sds$cov.fixed %*% matrix(lincomb))) if (length(sd.range)==1) sd.range <- rep(sd.range,2) parm.range[is.na(parm.range)] <- c(-1,1)*sd0*sd.range[is.na(parm.range)] } ## need to set start in order for f() to work ... ## FIXME: check convergence code ... conv <- 0 start <- rep(0, length(par) - 1) v.0 <- f(0) ## need to set v.0 for g() ... lwr.x <- g(parm.range[1]) if (is.na(lwr.x) || lwr.x<0) { lwr <- list(root=NA,iter=0) } else { lwr <- uniroot(g,interval=c(parm.range[1],0)) } ## reset for upper root-finding restore.oldvars() start <- rep(0, length(par) - 1) upr.x <- g(parm.range[2]) if (is.na(upr.x) || upr.x<0) { upr <- list(root=NA,iter=0) } else { upr <- uniroot(g,interval=c(0,parm.range[2])) } ans <- c(lwr=that+lwr$root,upr=that+upr$root) attr(ans,"iter") <- lwr$iter+upr$iter return(ans) } TMB/R/gdbsource.R0000644000176200001440000000617114634646733013222 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Source R-script through gdb to get backtrace. ##' ##' This function is useful for debugging templates. ##' If a script aborts e.g. due to an out-of-bound index operation ##' it should be fast to locate the line that caused the problem by ##' running \code{gdbsource(file)}. ##' Alternatively, If more detailed debugging is required, then ##' \code{gdbsource(file,TRUE)} will provide the full backtrace followed ##' by an interactive gdb session where the individual frames can be inspected. ##' Note that templates should be compiled without optimization and with debug ##' information in order to provide correct line numbers: ##' \itemize{ ##' \item On Linux/OS X use \code{compile(cppfile,"-O0 -g")}. ##' \item On Windows use \code{compile(cppfile,"-O1 -g",DLLFLAGS="")} (lower ##' optimization level will cause errors). ##' } ##' @title Source R-script through gdb to get backtrace. ##' @param file Your R script ##' @param interactive Run interactive gdb session? ##' @return Object of class \code{backtrace} gdbsource <- function(file,interactive=FALSE){ if(!file.exists(file))stop("File '",file,"' not found") if(.Platform$OS.type=="windows"){ return(.gdbsource.win(file,interactive)) } gdbscript <- tempfile() Rbin <- file.path(R.home('bin'), "R") if(interactive){ gdbcmd <- c(paste("run --vanilla <",file), "bt") gdbcmd <- paste(gdbcmd,"\n",collapse="") cat(gdbcmd,file=gdbscript) cmd <- paste(Rbin, "-d gdb --debugger-args=\"-x",gdbscript,"\"") system(cmd,intern=FALSE,ignore.stdout=FALSE,ignore.stderr=TRUE) return(NULL) } else { cat("run\nbt\nquit\n",file=gdbscript) cmd <- paste(Rbin, "--vanilla < ",file," -d gdb --debugger-args=\"-x", gdbscript,"\"") txt <- system(cmd,intern=TRUE,ignore.stdout=FALSE,ignore.stderr=TRUE) attr(txt,"file") <- file class(txt) <- "backtrace" return(txt) } } ## Windows case .gdbsource.win <- function(file,interactive=FALSE){ gdbscript <- tempfile() txt <- paste("set breakpoint pending on\nb abort\nrun --vanilla -f", file, "\nbt\n") cat(txt, file=gdbscript) cmd <- paste("gdb Rterm -x", gdbscript) if(interactive){ cmd <- paste("start",cmd) shell(cmd) return(NULL) } else { txt <- system(cmd,intern=TRUE,ignore.stdout=FALSE,ignore.stderr=TRUE) attr(txt,"file") <- file class(txt) <- "backtrace" return(txt) } } ##' If \code{gdbsource} is run non-interactively (the default) only ##' the relevant information will be printed. ##' ##' @title Print problematic cpp line number. ##' @param x Backtrace from \code{gdbsource} ##' @param ... Not used ##' @rdname gdbsource ##' @method print backtrace ##' @S3method print backtrace ##' @return NULL print.backtrace <- function(x,...){ ## Both gdb and lldb use the output format ## ' at file.cpp:123' ## to specify the problematic source lines: pattern <- "\\ at\\ .*\\.cpp\\:[0-9]+" x <- grep(pattern, x, value=TRUE) if (length(x) == 0) x <- "Program returned without errors" else x <- c("Errors:", x) cat(paste(x,"\n")) } TMB/R/setupRStudio.R0000644000176200001440000001323614634646733013717 0ustar liggesusers## Experimental RStudio integration setupRStudio <- function(file = "~/.Rprofile", snipRfile = "~/.R/snippets/r.snippets", snipCppfile = "~/.R/snippets/c_cpp.snippets") { on.exit( message("Please re-start RStudio for the changes to take place.") ) code <- ' ######## TMB - setup RStudio setHook(packageEvent("TMB", "onLoad"), function(...) { if("tools:rstudio" %in% search()) { tmb.env <- asNamespace("TMB") compile.orig <- tmb.env$compile unlockBinding("compile", tmb.env) ## Rstudio handle compilation errors: rs.env <- as.environment("tools:rstudio") tmb.env$compile <- function(file,...) { .Call("rs_sourceCppOnBuild", file, FALSE, FALSE) status <- try( compile.orig(file, ...) ) succeeded <- (status == 0) .Call("rs_sourceCppOnBuildComplete", succeeded, "") if(!succeeded) stop("Compilation failed") status } ## Bind "sourceCpp" button to TMB compile rcpp.env <- asNamespace("Rcpp") unlockBinding("sourceCpp", rcpp.env) rcpp.env$sourceCpp <- tmb.env$compile ## Auto completion needs TMB and Eigen on system includes if (.Platform$OS.type=="windows") { ## Overload system.file system.file <- function(...){ ans <- base::system.file(...) chartr("\\\\", "/", shortPathName(ans)) } } definc <- Sys.getenv("CPLUS_INCLUDE_PATH") tmbinc <- system.file("include", package="TMB") eiginc <- system.file("include", package="RcppEigen") inc <- c(definc, tmbinc, eiginc) inc <- paste(inc[inc != ""], collapse=.Platform$path.sep) Sys.setenv(CPLUS_INCLUDE_PATH = inc) } } ) ' mess <- c("You are about to setup Rstudio with TMB.", "Changes will be added to the file:", "", file, "") invisible(lapply(mess, cat, "\n")) getYesOrNo <- function() { repeat { ans <- readline("OK? (yes/no) ") if (ans %in% c("yes", "no")) break; message("Please say 'yes' or 'no'") } ans } ans <- getYesOrNo() if(ans == "yes") { ## Create ~/.Rprofile if not exists if (!file.exists(file)) file.create(file) ## Read ~/.Rprofile and remove change if previously made oldlines <- readLines(file) codelines <- strsplit(code,"\n")[[1]][-1] begin <- which(head(codelines, 1) == oldlines)[1] if (!is.na(begin)) { if (begin > 1) begin <- begin - 1 end <- which(tail(codelines, 1) == oldlines) end <- min(end[end>begin]) oldlines <- oldlines[-(begin:end)] message("Removing old changes from ", file) writeLines(oldlines, file) } message("Adding changes to ", file) cat(code, file=file, append=TRUE) } ## Experimental RStudio TMB snippet integration ## Gavin Fay & Andrea Havron ## rsnips <- getRsnips() headers <- grep("snippet",rsnips) rsnips[-headers] <- paste0("\t",rsnips[-headers]) cppsnips <- getCppsnips() cheaders <- grep("snippet",cppsnips) cppsnips[-cheaders] <- paste0("\t",cppsnips[-cheaders]) mess <- c("", "You are about to setup snippets for TMB.", "Changes will be added to the files:", "", snipRfile, snipCppfile, "") invisible(lapply(mess, cat, "\n")) ans <- getYesOrNo() if(ans == "yes") { if (file.exists(snipRfile) && any( grepl(rsnips[headers][1], readLines(snipRfile)) ) ) { message("Skipping because changes seem to have been made already.") } else { dir.create(dirname(snipRfile), showWarnings=FALSE, recursive=TRUE) if (!file.exists(snipRfile)) file.create(snipRfile) cat(paste(rsnips, collapse="\n"), file=snipRfile, append=TRUE) dir.create(dirname(snipCppfile), showWarnings=FALSE, recursive=TRUE) if(!file.exists(snipCppfile))file.create(snipCppfile) cat(paste(cppsnips, collapse="\n"), file=snipCppfile, append=TRUE) } } invisible(NULL) } ## R snippets, paste as text, replace \$ with \\$ getRsnips <- function() { snips <- ' snippet tmb.template ## Load TMB `r require(TMB)` library(TMB) ## Make C++ file TMB::template("${1:model_name}.cpp") ## Compile and load the model compile("${1:model_name}.cpp") dyn.load(dynlib("${1:model_name}")) ## Data and parameters data <- list(x=rivers) parameters <- list(mu=0, logSigma=0) ## Make a function object obj <- MakeADFun(data, parameters, DLL="${1:model_name}") ## Call function minimizer opt <- nlminb(obj\\$par, obj\\$fn, obj\\$gr) ## Get parameter uncertainties and convergence diagnostics sdr <- sdreport(obj) sdr ' strsplit(snips, "\n")[[1]][-1] } ## R snippets, paste as text, replace \$ with \\$ ## FIXME: C++ snippets doesn't seem to work getCppsnips <- function() { snips <- ' snippet tmb.template #include template Type objective_function::operator() () { DATA_VECTOR(x); PARAMETER(mu); PARAMETER(logSigma); Type f = 0; f -= dnorm(x, mu, exp(logSigma), true).sum(); return f; } ' strsplit(snips, "\n")[[1]][-1] } TMB/R/TMB.R0000644000176200001440000024702614634646741011674 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ## Utilities grepRandomParameters <- function(parameters,random){ r <- sort(unique(unlist(lapply(random,function(regexp)grep(regexp,names(parameters)))))) tmp <- lapply(parameters,function(x)x*0) tmp[r] <- lapply(tmp[r],function(x)x*0+1) which(as.logical(unlist(tmp))) } ## unlist name handling is extremely slow and we *almost* never use it ## New default: use.names=FALSE unlist <- function (x, recursive = TRUE, use.names = FALSE) { base::unlist(x, recursive, use.names) } ## Assign without losing other attributes than 'names' (which may get ## overwritten when subsetting) "keepAttrib<-" <- function(x, value){ attr <- attributes(x) keep <- setdiff(names(attr), "names") x <- value attributes(x)[keep] <- attr[keep] x } ## Associate a 'map' with *one* entry in a parameter list updateMap <- function(parameter.entry, map.entry) { ## Shortened parameter ans <- tapply(parameter.entry, map.entry, mean) if(length(ans) == 0) ans <- as.numeric(ans) ## (zero-length case) ## Integer code used to fill short into original shape fnew <- unclass(map.entry) fnew[!is.finite(fnew)] <- 0L fnew <- fnew - 1L ## Output attr(ans,"shape") <- parameter.entry attr(ans,"map") <- fnew attr(ans,"nlevels") <- length(ans) ans } ## Guess name of user's loaded DLL code getUserDLL <- function(){ dlls <- getLoadedDLLs() isTMBdll <- function(dll)!is(try(getNativeSymbolInfo("MakeADFunObject",dll),TRUE),"try-error") TMBdll <- sapply(dlls, isTMBdll) if(sum(TMBdll) == 0) stop("There are no TMB models loaded (use 'dyn.load').") if(sum(TMBdll) >1 ) stop("Multiple TMB models loaded. Failed to guess DLL name.") names(dlls[TMBdll]) } ## Un-exported functions that we need .shlib_internal <- get(".shlib_internal", envir = asNamespace("tools"), inherits = FALSE) ## Update cholesky factorization ( of H+t*I ) avoiding copy overhead ## by writing directly to L(!). updateCholesky <- function(L, H, t=0){ .Call("tmb_destructive_CHM_update", L, H, t, PACKAGE="TMB") } solveCholesky <- function(L, x){ .Call("tmb_CHMfactor_solve", L, x, PACKAGE="TMB") } ## Test for invalid external pointer isNullPointer <- function(pointer) { .Call("isNullPointer", pointer, PACKAGE="TMB") } ## Add external pointer finalizer registerFinalizer <- function(ADFun, DLL) { if (is.null(ADFun)) return (NULL) ## ADFun=NULL used by sdreport ADFun$DLL <- DLL finalizer <- function(ptr) { if ( ! isNullPointer(ptr) ) { .Call("FreeADFunObject", ptr, PACKAGE=DLL) } else { ## Nothing to free } } reg.finalizer(ADFun$ptr, finalizer) ADFun } ##' Sequential reduction configuration ##' ##' Helper function to specify an integration grid used by the ##' sequential reduction algorithm available through the argument ##' \code{integrate} to \code{MakeADFun}. ##' @param x Breaks defining the domain of integration ##' @param discrete Boolean defining integration wrt Lebesgue measure (\code{discrete=FALSE}) or counting measure \code{discrete=TRUE}. SR <- function(x, discrete=FALSE) { if (is(x, "SR")) return (x) x <- as.numeric(x) if (is.unsorted(x)) stop("'x' must be sorted") if (discrete) { w <- rep(1., length(x)) } else { w <- diff(x) x <- head(x, -1) + w / 2 } structure(list(x=x, w=w, method="marginal_sr"), class="SR") } ##' Gauss Kronrod configuration ##' ##' Helper function to specify parameters used by the Gauss Kronrod ##' integration available through the argument \code{integrate} to ##' \code{MakeADFun}. ##' @param ... See source code GK <- function(...) { ans <- list(dim=1, adaptive=FALSE, debug=FALSE) args <- list(...) ans[names(args)] <- args ans$method <- "marginal_gk" class(ans) <- "GK" ans } ## TODO: Laplace approx config LA <- function(...) { ans <- list(...) ans$method <- "laplace" class(ans) <- "LA" ans } ## 'parse' MakeADFun argument 'integrate' parseIntegrate <- function(arg, name) { i <- sapply(arg, function(x) is(x, name)) arg[i] } ##' Construct objective functions with derivatives based on the users C++ template. ##' ##' A call to \code{MakeADFun} will return an object that, based on the users DLL code (specified through \code{DLL}), contains functions to calculate the objective function ##' and its gradient. The object contains the following components: ##' \itemize{ ##' \item \code{par} A default parameter. ##' \item \code{fn} The likelihood function. ##' \item \code{gr} The gradient function. ##' \item \code{report} A function to report all variables reported with the REPORT() macro in the user template. ##' \item \code{env} Environment with access to all parts of the structure. ##' } ##' and is thus ready for a call to an R optimizer, such as \code{nlminb} or \code{optim}. ##' Data (\code{data}) and parameters (\code{parameters}) are directly read by the user template via the macros beginning with DATA_ ##' and PARAMETER_. The order of the PARAMETER_ macros defines the order of parameters in the final objective function. ##' There are no restrictions on the order of random parameters, fixed parameters or data in the template. ##' @section Parameter mapping: ##' Optionally, a simple mechanism for collecting and fixing parameters from R is available through the \code{map} argument. A map is a named list ##' of factors with the following properties: ##' \itemize{ ##' \item names(map) is a subset of names(parameters). ##' \item For a parameter "p" length(map$p) equals length(parameters$p). ##' \item Parameter entries with NAs in the factor are fixed. ##' \item Parameter entries with equal factor level are collected to a common value. ##' } ##' More advanced parameter mapping, such as collecting parameters between different vectors etc., must be implemented from the template. ##' @section Specifying random effects: ##' Random effects are specified via the argument \code{random}: A component of the parameter list is marked as random if its name is matched ##' by any of the characters of the vector \code{random} (Regular expression match is performed if \code{regexp=TRUE}). ##' If some parameters are specified as random effects, these will ##' be integrated out of the objective function via the Laplace approximation. In this situation the functions \code{fn} and \code{gr} ##' automatically perform an optimization of random effects for each function evaluation. This is referred to as ##' the 'inner optimization'. Strategies for choosing initial values of the inner optimization can be controlled ##' via the argument \code{random.start}. The default is \code{expression(last.par.best[random])} ##' where \code{last.par.best} is an internal full parameter vector corresponding to the currently best ##' likelihood. An alternative choice could be \code{expression(last.par[random])} i.e. the random effect optimum of ##' the most recent - not necessarily best - likelihood evaluation. Further control of the inner optimization can ##' be obtained by the argument \code{inner.control} which is a list of control parameters for the inner optimizer ##' \code{newton}. Depending of the inner optimization problem type the following settings are recommended: ##' \enumerate{ ##' \item Quasi-convex: \code{smartsearch=TRUE} (the default). ##' \item Strictly-convex: \code{smartsearch=FALSE} and \code{maxit=20}. ##' \item Quadratic: \code{smartsearch=FALSE} and \code{maxit=1}. ##' } ##' @section The model environment \code{env}: ##' Technically, the user template is processed several times by inserting ##' different types as template parameter, selected by argument \code{type}: ##' \itemize{ ##' \item \code{"ADFun"} Run through the template with AD-types and produce a stack of operations representing the objective function. ##' \item \code{"Fun"} Run through the template with ordinary double-types. ##' \item \code{"ADGrad"} Run through the template with nested AD-types and produce a stack of operations representing the objective function gradient. ##' } ##' Each of these are represented by external pointers to C++ structures available in the environment \code{env}. ##' ##' Further objects in the environment \code{env}: ##' \itemize{ ##' \item \code{validpar} Function defining the valid parameter region (by default no restrictions). If an invalid ##' parameter is inserted \code{fn} immediately return NaN. ##' \item \code{parList} Function to get the full parameter vector of random and fixed effects in a convenient ##' list format. ##' \item \code{random} An index vector of random effect positions in the full parameter vector. ##' \item \code{last.par} Full parameter of the latest likelihood evaluation. ##' \item \code{last.par.best} Full parameter of the best likelihood evaluation. ##' \item \code{tracepar} Trace every likelihood evaluation ? ##' \item \code{tracemgc} Trace maximum gradient component of every gradient evaluation ? ##' \item \code{silent} Pass 'silent=TRUE' to all try-calls ? ##' } ##' @section The argument \code{intern}: ##' By passing \code{intern=TRUE} the entire Laplace approximation (including sparse matrix calculations) is done within the AD machinery on the C++ side. This requires the model to be compiled using the 'TMBad framework' - see \code{\link{compile}}. For any serious use of this option one should consider compiling with \code{supernodal=TRUE} - again see \code{\link{compile}} - in order to get performance comparable to R's matrix calculations. The benefit of the 'intern' LA is that it may be faster in some cases and that it provides an autodiff hessian (\code{obj$he}) wrt. the fixed effects which would otherwise not work for random effect models. Another benefit is that it gives access to fast computations with certain hessian structures that do not meet the usual sparsity requirement. A detailed list of options are found in the online doxygen documentation in the 'newton' namespace under the 'newton_config' struct. All these options can be passed from R via the `inner.control` argument. However, there are some drawbacks of running the LA on the C++ side. Notably, random effects are no longer visible in the model environment which may break assumptions on the layout of internal vectors (`par`, `last.par`, etc). In addition, model debugging becomes harder when calculations are moved to C++. ##' @section Controlling tracing: ##' A high level of tracing information will be output by default when evaluating the objective function and gradient. ##' This is useful while developing a model, but may eventually become annoying. Disable all tracing by passing ##' \code{silent=TRUE} to the \code{MakeADFun} call. ##' @note Do not rely upon the default arguments of any of the functions in the model object \code{obj$fn}, \code{obj$gr}, \code{obj$he}, \code{obj$report}. I.e. always use the explicit form \code{obj$fn(obj$par)} rather than \code{obj$fn()}. ##' ##' @title Construct objective functions with derivatives based on a compiled C++ template. ##' @param data List of data objects (vectors, matrices, arrays, factors, sparse matrices) required by the user template (order does not matter and un-used components are allowed). ##' @param parameters List of all parameter objects required by the user template (both random and fixed effects). ##' @param map List defining how to optionally collect and fix parameters - see details. ##' @param type Character vector defining which operation stacks are generated from the users template - see details. ##' @param random Character vector defining the random effect parameters. See also \code{regexp}. ##' @param profile Parameters to profile out of the likelihood (this subset will be appended to \code{random} with Laplace approximation disabled). ##' @param random.start Expression defining the strategy for choosing random effect initial values as function of previous function evaluations - see details. ##' @param hessian Calculate Hessian at optimum? ##' @param method Outer optimization method. ##' @param inner.method Inner optimization method (see function "newton"). ##' @param inner.control List controlling inner optimization. ##' @param MCcontrol List controlling importance sampler (turned off by default). ##' @param ADreport Calculate derivatives of macro ADREPORT(vector) instead of objective_function return value? ##' @param atomic Allow tape to contain atomic functions? ##' @param LaplaceNonZeroGradient Allow Taylor expansion around non-stationary point? ##' @param DLL Name of shared object file compiled by user (without the conventional extension, \file{.so}, \file{.dll}, \dots). ##' @param checkParameterOrder Optional check for correct parameter order. ##' @param regexp Match random effects by regular expressions? ##' @param silent Disable all tracing information? ##' @param intern Do Laplace approximation on C++ side ? See details (Experimental - may change without notice) ##' @param integrate Specify alternative integration method(s) for random effects (see details) ##' @param ... Currently unused. ##' @return List with components (fn, gr, etc) suitable for calling an R optimizer, such as \code{nlminb} or \code{optim}. MakeADFun <- function(data, parameters, map=list(), type=c("ADFun","Fun","ADGrad"[!intern && (!is.null(random) || !is.null(profile)) ] ), random=NULL, profile=NULL, random.start=expression(last.par.best[random]), hessian=FALSE,method="BFGS", inner.method="newton", inner.control=list(maxit=1000), MCcontrol=list(doMC=FALSE,seed=123,n=100), ADreport=FALSE, atomic=TRUE, LaplaceNonZeroGradient=FALSE, ## Experimental feature: Allow expansion around non-stationary point DLL=getUserDLL(), checkParameterOrder=TRUE, ## Optional check regexp=FALSE, silent=FALSE, intern=FALSE, integrate=NULL, ...){ ## Check that DLL is loaded if ( ! DLL %in% names(getLoadedDLLs()) ) { stop(sprintf("'%s' was not found in the list of loaded DLLs. Forgot to dyn.load(dynlib('%s')) ?", DLL, DLL)) } env <- environment() ## This environment if(!is.list(data)) stop("'data' must be a list") ok <- function(x)(is.matrix(x)|is.vector(x)|is.array(x))&(is.numeric(x)|is.logical(x)) ok.data <- function(x)ok(x)|is.factor(x)|is(x,"sparseMatrix")|is.list(x)|(is.character(x)&length(x)==1) check.passed <- function(x){ y <- attr(x,"check.passed") if(is.null(y)) FALSE else y } if(!check.passed(data)){ if(!all(sapply(data,ok.data))){ cat("Problem with these data entries:\n") print(which(!sapply(data,ok.data))) stop("Only numeric matrices, vectors, arrays, ", "factors, lists or length-1-characters ", "can be interfaced") } } if(!check.passed(parameters)){ if(!all(sapply(parameters,ok))){ cat("Problem with these parameter entries:\n") print(which(!sapply(parameters,ok))) stop("Only numeric matrices, vectors and arrays ", "can be interfaced") } } if(length(data)){ dataSanitize <- function(x){ if(is.list(x)) return( lapply(x, dataSanitize) ) if(is(x,"sparseMatrix")){ ## WAS: x <- as(x, "dgTMatrix") x <- as( as(x, "TsparseMatrix"), "generalMatrix") } else if (is.character(x)) { ## Do nothing } else { if(is.factor(x))x <- unclass(x)-1L ## Factors are passed as 0-based integers !!! storage.mode(x) <- "double" } x } if(!check.passed(data)){ data <- lapply(data,dataSanitize) } attr(data,"check.passed") <- TRUE } if(length(parameters)){ parameterSanitize <- function(x){ storage.mode(x) <- "double" x } if(!check.passed(parameters)){ parameters <- lapply(parameters,parameterSanitize) } attr(parameters,"check.passed") <- TRUE } if(checkParameterOrder){ ## For safety, check that parameter order match the parameter order in user template. ## If not, permute parameter list with a warning. ## Order in which parameters were requested: parNameOrder <- getParameterOrder(data, parameters, new.env(), DLL=DLL) if(!identical(names(parameters),parNameOrder)){ if(!silent) cat("Order of parameters:\n") if(!silent) print(names(parameters)) if(!silent) cat("Not matching template order:\n") if(!silent) print(parNameOrder) keepAttrib( parameters ) <- parameters[parNameOrder] if(!silent) cat("Your parameter list has been re-ordered.\n(Disable this warning with checkParameterOrder=FALSE)\n") } } ## Prepare parameter mapping. ## * A parameter map is a factor telling which parameters should be grouped ## * NA values are untouched: So user can e.g. set them to zero ## * NOTE: CURRENTLY ONLY WORKS ON PARAMETER_ARRAY() !!! if(length(map)>0){ ok <- all(names(map)%in%names(parameters)) if(!ok)stop("Names in map must correspond to parameter names") ok <- all(sapply(map,is.factor)) if(!ok)stop("map must contain factors") ok <- sapply(parameters[names(map)],length)==sapply(map,length) if(!all(ok))stop("A map factor length must equal parameter length") param.map <- lapply(names(map), function(nam) { updateMap(parameters[[nam]], map[[nam]]) }) ## Now do the change: keepAttrib( parameters[names(map)] ) <- param.map } lrandom <- function() { ans <- logical(length(par)) ans[random] <- TRUE ans } lfixed <- function() { !lrandom() } ## Utility to get back parameter list in original shape parList <- function(x=par[lfixed()],par=last.par){ ans <- parameters nonemp <- sapply(ans,function(x)length(x)>0) ## Workaround utils::relist bug for empty list items nonempindex <- which(nonemp) skeleton <- as.relistable(ans[nonemp]) par[lfixed()] <- x li <- relist(par,skeleton) reshape <- function(x){ if(is.null(attr(x,"map")))return(x) y <- attr(x,"shape") f <- attr(x,"map") i <- which(f>=0) y[i] <- x[f[i]+1] y } for(i in seq(skeleton)){ ans[[nonempindex[i]]][] <- as.vector(li[[i]]) } ## MM: ans[] <- lapply(ans, reshape) # _____________________ for(i in seq(ans)){ ans[[i]] <- reshape(ans[[i]]) } ans } type <- match.arg(type, eval(type), several.ok = TRUE) #if("ADFun"%in%type)ptrADFun <- .Call("MakeADFunObject",data,parameters) else ptrADFun <- NULL reportenv <- new.env() par <- NULL last.par.ok <- last.par <- last.par1 <- last.par2 <- last.par.best <- NULL value.best <- Inf ADFun <- NULL Fun <- NULL ADGrad <- NULL tracepar <- FALSE validpar <- function(x)TRUE tracemgc <- TRUE ## dummy assignments better than "globalVariables(....)" L.created.by.newton <- skipFixedEffects <- spHess <- altHess <- NULL ## Disable all tracing information beSilent <- function(){ tracemgc <<- FALSE inner.control$trace <<- FALSE silent <<- TRUE cf <- config(DLL=DLL) i <- grep("^trace.",names(cf)) cf[i] <- 0 cf$DLL <- DLL do.call(config, cf) NULL } if(silent)beSilent() ## Getting shape of ad reported variables ADreportDims <- NULL ADreportIndex <- function() { lngt <- sapply(ADreportDims, prod) offset <- head( cumsum( c(1, lngt) ) , -1) ans <- lapply(seq_along(lngt), function(i) array(seq(from = offset[i], length.out = lngt[i]), ADreportDims[[i]] )) names(ans) <- names(ADreportDims) ans } ## All external pointers are created in function "retape" and can be re-created ## by running retape() if e.g. the number of openmp threads is changed. ## set.defaults: reset internal parameters to their default values. .random <- random retape <- function(set.defaults = TRUE){ omp <- config(DLL=DLL) ## Get current OpenMP configuration random <<- .random ## Restore original 'random' argument if(atomic){ ## FIXME: Then no reason to create ptrFun again later ? ## User template contains atomic functions ==> ## Have to call "double-template" to trigger tape generation Fun <<- MakeDoubleFunObject(data, parameters, reportenv, DLL=DLL) ## Hack: unlist(parameters) only guarantied to be a permutation of the parameter vecter. out <- EvalDoubleFunObject(Fun, unlist(parameters), get_reportdims = TRUE) ADreportDims <<- attr(out, "reportdims") } if(is.character(profile)){ random <<- c(random, profile) } if(is.character(random)){ if(!regexp){ ## Default: do exact match if(!all(random %in% names(parameters))){ cat("Some 'random' effect names does not match 'parameter' list:\n") print(setdiff(random,names(parameters))) cat("(Note that regular expression match is disabled by default)\n") stop() } if(any(duplicated(random))){ cat("Duplicates in 'random' - will be removed\n") random <<- unique(random) } tmp <- lapply(parameters,function(x)x*0) tmp[random] <- lapply(tmp[random],function(x)x*0+1) random <<- which(as.logical(unlist(tmp))) if(length(random)==0) random <<- NULL } if(regexp){ ## Original regular expression match random <<- grepRandomParameters(parameters,random) if(length(random)==0){ cat("Selected random effects did not match any model parameters.\n") random <<- NULL } } if(is.character(profile)){ ## Convert 'profile' to a pointer into random (represented ## as logical index vector): tmp <- lapply(parameters,function(x)x*0) tmp[profile] <- lapply(tmp[profile],function(x)x*0+1) profile <<- match( which(as.logical(unlist(tmp))) , random ) if(length(profile)==0) random <<- NULL if(any(duplicated(profile))) stop("Profile parameter vector not unique.") tmp <- rep(0L, length(random)) tmp[profile] <- 1L profile <<- tmp } if (set.defaults) { par <<- unlist(parameters) } } if("ADFun"%in%type){ ## autopar? => Tape with single thread if (omp$autopar) openmp(1, DLL=DLL) ADFun <<- MakeADFunObject(data, parameters, reportenv, ADreport=ADreport, DLL=DLL) ## autopar? => Restore OpenMP number of threads if (omp$autopar) openmp(omp$nthreads, DLL=DLL) if (!is.null(integrate)) { nm <- sapply(parameters, length) nmpar <- rep(names(nm), nm) for (i in seq_along(integrate)) { I <- integrate[i] ## Special case: joint integration list if (is.null(names(I)) || names(I) == "") { I <- I[[1]] } ok <- all(names(I) %in% nmpar[random]) if (!ok) stop("Names to be 'integrate'd must be among the random parameters") w <- which(nmpar[random] %in% names(I)) ## Argument 'which' is common to all methods arg_which <- I[[1]]$which if ( ! is.null(arg_which) ) w <- w[arg_which] method <- sapply(I, function(x) x$method) ok <- all(duplicated(method)[-1]) if (!ok) stop("Grouping only allowed for identical methods") method <- method[1] cfg <- NULL if (method == "marginal_sr") { ## SR has special support for joint integration fac <- factor(nmpar[random[w]], levels=names(I)) cfg <- list( grid = I, random2grid = fac ) } else { ## For other methods we use the first ## (FIXME: Test no contradicting choices) cfg <- I[[1]] } stopifnot (is.list(cfg)) ## Integrate parameter subset out of the likelihood TransformADFunObject(ADFun, method = method, random_order = random[w], config = cfg, mustWork = 1L) ## Find out what variables have been integrated ## (only GK might not integrate all random[w]) activeDomain <- as.logical(info(ADFun)$activeDomain) random_remove <- random[w][!activeDomain[random[w]]] ## Integrated parameters must no longer be present TransformADFunObject(ADFun, method="remove_random_parameters", random_order = random_remove, mustWork = 1L) ## Adjust 'random' and 'par' accordingly attr(ADFun$ptr, "par") <- attr(ADFun$ptr, "par")[-random_remove] par_mask <- rep(FALSE, length(attr(ADFun$ptr, "par"))) par_mask[random] <- TRUE par <<- par[-random_remove] nmpar <- nmpar[-random_remove] par_mask <- par_mask[-random_remove] random <<- which(par_mask) if (length(random) == 0) { random <<- NULL type <<- setdiff(type, "ADGrad") } ## Run tape optimizer if (config(DLL=DLL)$optimize.instantly) { TransformADFunObject(ADFun, method = "optimize", mustWork = 1L) } } } if (intern) { cfg <- inner.control if (is.null(cfg$sparse)) cfg$sparse <- TRUE cfg <- lapply(cfg, as.double) TransformADFunObject(ADFun, method = "laplace", config = cfg, random_order = random, mustWork = 1L) TransformADFunObject(ADFun, method="remove_random_parameters", random_order = random, mustWork = 1L) ## FIXME: Should be done by above .Call attr(ADFun$ptr,"par") <- attr(ADFun$ptr,"par")[-random] ## par <<- par[-random] random <<- NULL ## Run tape optimizer if (config(DLL=DLL)$optimize.instantly) { TransformADFunObject(ADFun, method = "optimize", mustWork = 1L) } } if (set.defaults) { par <<- attr(ADFun$ptr,"par") last.par <<- par last.par1 <<- par last.par2 <<- par last.par.best <<- par value.best <<- Inf } } if (omp$autopar && !ADreport) { ## Experiment ! TransformADFunObject(ADFun, method = "parallel_accumulate", num_threads = as.integer(openmp(DLL=DLL)), mustWork = 0L) } if (length(random) > 0) { ## Experiment ! TransformADFunObject(ADFun, method = "reorder_random", random_order = random, mustWork = 0L) } if("Fun"%in%type) { Fun <<- MakeDoubleFunObject(data, parameters, reportenv, DLL=DLL) } if("ADGrad"%in%type) { retape_adgrad(lazy = TRUE) } ## Skip fixed effects from the full hessian ? ## * Probably more efficient - especially in terms of memory. ## * Only possible if a taped gradient is available - see function "ff" below. env$skipFixedEffects <- !is.null(ADGrad) delayedAssign("spHess", sparseHessianFun(env, skipFixedEffects=skipFixedEffects ), assign.env = env) }## end{retape} ## Lazy / Full adgrad ? retape_adgrad <- function(lazy = TRUE) { ## * Use already taped function value f = ADFun$ptr ## * In random effects case we only need the 'random' part of the gradient if (!lazy) random <- NULL ADGrad <<- MakeADGradObject(data, parameters, reportenv, random=random, f=ADFun$ptr, DLL=DLL) } retape(set.defaults = TRUE) ## Has atomic functions been generated for the tapes ? usingAtomics <- function().Call("usingAtomics", PACKAGE=DLL) .data <- NULL f <- function(theta=par, order=0, type="ADdouble", cols=NULL, rows=NULL, sparsitypattern=0, rangecomponent=1, rangeweight=NULL, dumpstack=0, doforward=1, do_simulate=0, set_tail=0, keepx=NULL, keepy=NULL) { if(isNullPointer(ADFun$ptr)) { if(silent)beSilent() ## Loaded or deep copied object: Only restore external ## pointers. Don't touch last.par/last.par.best etc: retape(set.defaults = FALSE) } ## User has changed the data => Next forward pass must traverse whole graph ! data_changed <- !identical(.data, data) ## Fast to check if identical (i.e. most of the time) if (data_changed) { .data <<- data ## Shallow copy (fast) } switch(type, "ADdouble" = { res <- EvalADFunObject(ADFun, theta, order=order, hessiancols=cols, hessianrows=rows, sparsitypattern=sparsitypattern, rangecomponent=rangecomponent, rangeweight=rangeweight, dumpstack=dumpstack, doforward=doforward, set_tail=set_tail, data_changed=data_changed) last.par <<- theta if(order==1)last.par1 <<- theta if(order==2)last.par2 <<- theta }, "double" = { res <- EvalDoubleFunObject(Fun, theta, do_simulate=do_simulate) }, "ADGrad" = { res <- EvalADFunObject(ADGrad, theta, order=order, hessiancols=cols, hessianrows=rows, sparsitypattern=sparsitypattern, rangecomponent=rangecomponent, rangeweight=rangeweight, dumpstack=dumpstack, doforward=doforward, set_tail=set_tail, keepx=keepx, keepy=keepy, data_changed=data_changed) }, stop("invalid 'type'")) # end{ switch() } res } ## end{ f } h <- function(theta=par, order=0, hessian, L, ...) { if(order == 0) { ##logdetH <- determinant(hessian)$mod logdetH <- 2*determinant(L, sqrt=TRUE)$modulus ans <- f(theta,order=0) + .5*logdetH - length(random)/2*log(2*pi) if(LaplaceNonZeroGradient){ grad <- f(theta,order=1)[random] ans - .5* sum(grad * as.numeric( solveCholesky(L, grad) )) } else ans } else if(order == 1) { if(LaplaceNonZeroGradient)stop("Not correct for LaplaceNonZeroGradient=TRUE") ##browser() e <- environment(spHess) solveSubset <- function(L).Call("tmb_invQ",L,PACKAGE="TMB") solveSubset2 <- function(L).Call("tmb_invQ_tril_halfdiag",L,PACKAGE="TMB") ## FIXME: The following two lines are not efficient: ## 1. ihessian <- tril(solveSubset(L)) ## 2. diag(ihessian) <- .5*diag(ihessian) ## Make option to solveSubset to return lower triangular part ## with diagonal halved. As it is now the output of solveSubset is ## symm _with upper storage_ (!) (side effect of cholmod_ptranspose) ## therefore tril takes long time. Further, "diag<-" is too slow. ## FIXED! : ihessian <- solveSubset2(L) ## Profile case correction (1st order case only) if(!is.null(profile)){ ## Naive way: ## ihessian[profile,] <- 0 ## ihessian[,profile] <- 0 ## However, this would modify sparseness pattern and also not ## account for 'ihessian' being permuted: perm <- L@perm+1L ihessian <- .Call("tmb_sparse_izamd", ihessian, profile[perm], 0.0, PACKAGE="TMB") } ## General function to lookup entries A subset B. ## lookup.old <- function(A,B){ ## A <- as(tril(A),"dtTMatrix") ## B <- as(tril(B),"dtTMatrix") ## match(paste(A@i,A@j),paste(B@i,B@j)) ## } ## General function to lookup entries A in B[r,r] assuming pattern of A ## is subset of pattern of B[r,r]. lookup <- function(A,B,r=NULL){ A <- tril(A); B <- tril(B) B@x[] <- seq.int(length.out=length(B@x)) ## Pointers to full B matrix (Can have up to 2^31-1 non-zeros) if(!is.null(r)){ ## Goal is to get: ## B <- forceSymmetric(B) ## B <- B[r,r,drop=FALSE] ## However the internal Matrix code for ## "B[r,r,drop=FALSE]" creates temporary "dgCMatrix" ## thereby almost doubling the number of non-zeros. Need ## solution that works with max (2^31-1) non-zeros: B <- .Call("tmb_half_diag", B, PACKAGE="TMB") B <- tril( B[r,r,drop=FALSE] ) + tril( t(B)[r,r,drop=FALSE] ) } m <- .Call("match_pattern", A, B, PACKAGE="TMB") ## Same length as A@x with pointers to B@x B@x[m] } if(is.null(e$ind1)){ ## hessian: Hessian of random effect part only. ## ihessian: Inverse subset of hessian (same dim but larger pattern!). ## Hfull: Pattern of full hessian including fixed effects. if (!silent) cat("Matching hessian patterns... ") iperm <- invPerm(L@perm+1L) e$ind1 <- lookup(hessian,ihessian,iperm) ## Same dimensions e$ind2 <- lookup(hessian,e$Hfull,random) ## Note: dim(Hfull)>dim(hessian) ! if (!silent) cat("Done\n") } w <- rep(0,length.out=length(e$Hfull@x)) w[e$ind2] <- ihessian@x[e$ind1] ## Reverse mode evaluate ptr in rangedirection w ## now gives .5*tr(Hdot*Hinv) !! ## return as.vector( f(theta,order=1) ) + EvalADFunObject(e$ADHess, theta, order=1, rangeweight=w) }## order == 1 else stop(sprintf("'order'=%d not yet implemented", order)) } ## end{ h } ff <- function(par.fixed=par[-random], order=0, ...) { names(par.fixed) <- names(par[-random]) f0 <- function(par.random,order=0,...){ par[random] <- par.random par[-random] <- par.fixed res <- f(par,order=order,set_tail=random[1],...) switch(order+1,res,res[random],res[random,random]) } ## sparse hessian H0 <- function(par.random){ par[random] <- par.random par[-random] <- par.fixed #spHess(par)[random,random,drop=FALSE] spHess(par,random=TRUE,set_tail=random[1]) } if(inner.method=="newton"){ #opt <- newton(eval(random.start),fn=f0,gr=function(x)f0(x,order=1), # he=function(x)f0(x,order=2)) opt <- try( do.call("newton",c(list(par=eval(random.start), fn=f0, gr=function(x)f0(x,order=1), ##he=function(x)f0(x,order=2)), he=H0,env=env), inner.control) ), silent=silent ) if (inherits(opt, "try-error") || !is.finite(opt$value)) { if (order==0) return(NaN) if (order==1) stop("inner newton optimization failed during gradient calculation") stop("invalid 'order'") } } else { opt <- optim(eval(random.start),fn=f0,gr=function(x)f0(x,order=1), method=inner.method,control=inner.control) } par[random] <- opt$par par[-random] <- par.fixed ## Use alternative Hessian for log determinant? altHessFlag <- !is.null(altHess) if (altHessFlag) { altHess(TRUE) ## Enable alternative hessian on.exit(altHess(FALSE)) } ## HERE! - update hessian and cholesky if(!skipFixedEffects){ ## old way hess <- spHess(par) ## Full hessian hessian <- hess[random,random] ## Subset } else { hessian <- spHess(par,random=TRUE) } ## Profile case correction (0 and 1st order) if( !is.null(profile) ){ ## Naive way: ## hessian[profile, ] <- 0 ## hessian[, profile] <- 0 ## diag(hessian)[profile] <- 1 ## However, this would modify sparseness pattern: hessian <- .Call("tmb_sparse_izamd", hessian, profile, 1.0, PACKAGE="TMB") } ## Update Cholesky: if(inherits(env$L.created.by.newton,"dCHMsuper")){ L <- env$L.created.by.newton ##.Call("destructive_CHM_update",L,hessian,as.double(0),PACKAGE="Matrix") updateCholesky(L,hessian) } else L <- Cholesky(hessian,perm=TRUE,LDL=FALSE,super=TRUE) if(order==0){ res <- h(par,order=0,hessian=hessian,L=L) ## Profile case correction if(!is.null(profile)){ res <- res + sum(profile)/2*log(2*pi) } if(is.finite(res)){ if(res=1){ vec <- exp(I-M) p <- vec/sum(vec) i <- (p>0) p <- p[i] I1 <- apply(samples[,i,drop=FALSE],2,eval.target,order=1)[-random,,drop=FALSE] gr <- as.vector(I1 %*% p) if(order==1)return(gr) ## I1I1 <- t(apply(I1,1,function(x)x%*%t(x))) ## I2 <- t(apply(samples,1,function(x)eval.target(x,order=2)[-random,-random])) ## h <- colMeans(vec*(-I1I1+I2))/mean(vec)+as.vector(gr)%*%t(as.vector(gr)) ## if(order==2)return(h) } if(!is.null(phi)){ phival <- apply(samples,2,phi) if(is.null(dim(phival)))phival <- t(phival) p <- exp(I-M); p <- p/sum(p) ans <- phival %*% p return(ans) } value <- -log(mean(exp(I-M)))-M ci <- 1.96*sd(exp(I-M))/sqrt(n) attr(value,"confint") <- -log(mean(exp(I-M))+c(lower=ci,upper=-ci))-M if(keep){ attr(value,"samples") <- samples attr(value,"nlratio") <- -I } value } report <- function(par=last.par){ f(par,order=0,type="double") as.list(reportenv) } simulate <- function(par = last.par, complete = FALSE){ f(par, order = 0, type = "double", do_simulate = TRUE) sim <- as.list(reportenv) if(complete){ ans <- data ans[names(sim)] <- sim } else { ans <- sim } ans } ## return : list( ## Default parameter vector par = par[lfixed()], ## Objective function fn = function(x = last.par[lfixed()], ...) { if (tracepar) { cat("par:\n"); print(x) } if (!validpar(x)) return(NaN) if (is.null(random)) { ans <- f(x,order=0) if (!ADreport) { if (is.finite(ans) && ans < value.best) { last.par.best <<- x; value.best <<- ans } } } else { ans <- try({ if(MCcontrol$doMC){ ff(x, order=0) MC(last.par, n=MCcontrol$n, seed=MCcontrol$seed, order=0) } else ff(x,order=0) }, silent=silent) if (is.character(ans)) ans <- NaN } ans }, ## Gradient of objective function gr = function(x = last.par[lfixed()], ...) { if (is.null(random)) { ans <- f(x, order=1) } else { ans <- try( { if (MCcontrol$doMC) { ff(x,order=0) MC(last.par, n=MCcontrol$n, seed=MCcontrol$seed, order=1) } else ff(x,order=1) }, silent=silent) if(is.character(ans)) ans <- rep(NaN, length(x)) } if (tracemgc) cat("outer mgc: ", max(abs(ans)), "\n") ans }, ## Hessian of objective function he = function(x = last.par[lfixed()], atomic=usingAtomics()) { if (is.null(random)) { ## If no atomics on tape we have all orders implemented: if(!atomic) return( f(x,order=2) ) ## Otherwise, get Hessian as 1st order derivative of gradient: if(is.null(ADGrad)) retape_adgrad() return( f(x, type="ADGrad", order=1) ) } else { stop("Hessian not yet implemented for models with random effects.") } }, ## Other methods and flags hessian=hessian, method=method, retape=retape, env=env, report=report, simulate=simulate,...) }## end{ MakeADFun } ##' Free memory allocated on the C++ side by \code{MakeADFun}. ##' ##' @note ##' This function is normally not needed. ##' @details ##' An object returned by \code{MakeADFun} contains pointers to ##' structures allocated on the C++ side. These are managed by R's ##' garbage collector which for the most cases is sufficient. However, ##' because the garbage collector is unaware of the C++ object sizes, ##' it may fail to release memory to the system as frequently as ##' necessary. In such cases one can manually call ##' \code{FreeADFun(obj)} to release the resources. ##' @section Memory management: ##' Memory allocated on the C++ side by \code{MakeADFun} is ##' represented by external pointers. Each such pointer has an ##' associated 'finalizer' (see \code{reg.finalizer}) that deallocates ##' the external pointer when \code{gc()} decides the pointer is no ##' longer needed. Deallocated pointers are recognized on the R ##' side as external null pointers \code{}. This is ##' important as it provides a way to prevent the finalizers from ##' freeing pointers that have already been deallocated \emph{even if ##' the deallocation C-code has been unloaded}. ##' The user DLL maintains a list of all external pointers on the C ##' side. Three events can reduce the list: ##' \itemize{ ##' \item Garbage collection of an external pointer that is no longer needed (triggers corresponding finalizer). ##' \item Explicit deallocation of external pointers using \code{FreeADFun()} (corresponding finalizers are untriggered but harmless). ##' \item Unload/reload of the user's DLL deallocates all external pointers (corresponding finalizers are untriggered but harmless). ##' } ##' @title Free memory allocated on the C++ side by \code{MakeADFun}. ##' @param obj Object returned by \code{MakeADFun} ##' @return NULL ##' @examples ##' runExample("simple", thisR = TRUE) ## Create 'obj' ##' FreeADFun(obj) ## Free external pointers ##' obj$fn() ## Re-allocate external pointers FreeADFun <- function(obj) { free <- function(ADFun) { if (! is.null(ADFun) ) { if ( ! isNullPointer(ADFun$ptr) ) { .Call("FreeADFunObject", ADFun$ptr, PACKAGE = obj$env$DLL) } } } free(obj$env$Fun) free(obj$env$ADFun) free(obj$env$ADGrad) ADHess <- environment(obj$env$spHess)$ADHess free(ADHess) return(NULL) } .removeComments <- function(x){ x <- paste(x,collapse="\n") remlong <- function(x)gsub("/\\*.*?\\*/","",x) remshort <- function(x)gsub("//[^\n]*\n","\n",x) x <- remshort(remlong(x)) strsplit(x,"\n")[[1]] } isParallelTemplate <- function(file){ code <- readLines(file) code <- .removeComments(code) length(grep("^[ \t]*PARALLEL_",code))>0 || length(grep("^[ \t]*parallel_accumulator",code))>0 } isParallelDLL <- function(DLL) { attr( .Call("getFramework", PACKAGE = DLL), "openmp") } ##' Control number of OpenMP threads used by a TMB model. ##' ##' This function controls the number of parallel threads used by a TMB model compiled with OpenMP. ##' The number of threads is part of the configuration list \code{config()} of the DLL. ##' The value only affects parallelization of the DLL. It does \emph{not} affect BLAS/LAPACK specific parallelization which has to be specified elsewhere. ##' ##' When a DLL is loaded, the number of threads is set to 1 by default. ##' To activate parallelization you have to explicitly call \code{openmp(nthreads)} after loading the DLL. Calling \code{openmp(max=TRUE)} should normally pick up the environment variable \code{OMP_NUM_THREADS}, but this may be platform dependent. ##' ##' An experimental option \code{autopar=TRUE} can be set to parallelize models automatically. This requires the model to be compiled with \code{framework="TMBad"} and \code{openmp=TRUE} without further requirements on the C++ code. If the C++ code already has explicit parallel constructs these will be ignored if automatic parallelization is enabled. ##' @title Control number of OpenMP threads used by a TMB model. ##' @param n Requested number of threads, or \code{NULL} to just read the current value. ##' @param max Logical; Set n to OpenMP runtime value 'omp_get_max_threads()'? ##' @param autopar Logical; use automatic parallelization - see details. ##' @param DLL DLL of a TMB model. ##' @return Number of threads. openmp <- function(n=NULL, max=FALSE, autopar=NULL, DLL=getUserDLL()) { ## Set n to max possible value? if (max) { n <- .Call("omp_num_threads", NULL, PACKAGE="TMB") } ## Set n ? if (!is.null(n)) config(nthreads=n, DLL=DLL) ## Set autopar ? if (is.logical(autopar)) config(autopar=autopar, DLL=DLL) ## Return current value ans <- config(DLL=DLL)$nthreads names(ans) <- DLL attr(ans, "autopar") <- as.logical(config(DLL=DLL)$autopar) ans } ##' Compile a C++ template into a shared object file. OpenMP flag is set if the template is detected to be parallel. ##' ##' TMB relies on R's built in functionality to create shared libraries independent of the platform. ##' A template is compiled by \code{compile("template.cpp")}, which will call R's makefile with appropriate ##' preprocessor flags. ##' Compiler and compiler flags can be stored in a configuration file. In order of precedence either via ##' the file pointed at by R_MAKEVARS_USER or the file ~/.R/Makevars if it exists. ##' Additional configuration variables can be set with the \code{flags} and \code{...} arguments, which will override any ##' previous selections. ##' ##' @section Using a custom SuiteSparse installation: ##' Sparse matrix calculations play an important role in TMB. By default TMB uses a small subset of \code{SuiteSparse} available through the R package \code{Matrix}. This is sufficient for most use cases, however for some very large models the following extra features are worth considering: ##' ##' \itemize{ ##' \item Some large models benefit from an extended set of graph reordering algorithms (especially METIS) not part of \code{Matrix}. It is common that these orderings can provide quite big speedups. ##' \item Some large models need sparse matrices with number of nonzeros exceeding the current 32 bit limitation of \code{Matrix}. Normally such cases will result in the cholmod error 'problem too large'. \code{SuiteSparse} includes 64 bit integer routines to address this problem. ##' } ##' ##' Experimental support for linking to a \emph{custom} \code{SuiteSparse} installation is available through two arguments to the \code{\link{compile}} function. The first argument \code{supernodal=TRUE} tells TMB to use the supernodal Cholesky factorization from the system wide \code{SuiteSparse} on the C++ side. This will affect the speed of the Laplace approximation when run internally (using arguments \code{intern} or \code{integrate} to \code{\link{MakeADFun}}). ##' ##' The second argument \code{longint=TRUE} tells TMB to use 64 bit integers for sparse matrices on the C++ side. This works in combination with \code{supernodal=TRUE} from Eigen version 3.4. ##' ##' On Windows a \code{SuiteSparse} installation can be obtained using the \code{Rtools} package manager. Start 'Rtools Bash' terminal and run: ##' \preformatted{ ##' pacman -Sy ##' pacman -S mingw-w64-{i686,x86_64}-suitesparse ##' } ##' ##' On Linux one should look for the package \code{libsuitesparse-dev}. ##' ##' @section Selecting the AD framework: ##' TMB supports two different AD libraries 'CppAD' and 'TMBad' selected via the argument \code{framework} which works as a switch to set one of two C++ preprocessor flags: 'CPPAD_FRAMEWORK' or 'TMBAD_FRAMEWORK'. The default value of \code{framework} can be set from R by \code{options("tmb.ad.framework")} or alternatively from the shell via the environment variable 'TMB_AD_FRAMEWORK'. Packages linking to TMB should set one of the two C++ preprocessor flags in Makevars. ##' ##' @section Order of compiler generated atomic functions: ##' The argument \code{max.order} controls the maximum derivative order of special functions (e.g. \code{pbeta}) generated by the compiler. By default the value is set to 3 which is sufficient to obtain the Laplace approximation (order 2) and its derivatives (order 3). However, sometimes a higher value may be needed. For example \code{framework='TMBad'} allows one to calculate the Hessian of the Laplace approximation, but that requires 4th order derivatives of special functions in use. A too small value will cause the runtime error 'increase TMB_MAX_ORDER'. Note that compilation time and binary size increases with \code{max.order}. ##' ##' @title Compile a C++ template to DLL suitable for MakeADFun. ##' @param file C++ file. ##' @param flags Character with compile flags. ##' @param safebounds Turn on preprocessor flag for bound checking? ##' @param safeunload Turn on preprocessor flag for safe DLL unloading? ##' @param openmp Turn on openmp flag? Auto detected for parallel templates. ##' @param libtmb Use precompiled TMB library if available (to speed up compilation)? ##' @param libinit Turn on preprocessor flag to register native routines? ##' @param tracesweep Turn on preprocessor flag to trace AD sweeps? (Silently disables \code{libtmb}) ##' @param framework Which AD framework to use ('TMBad' or 'CppAD') ##' @param supernodal Turn on preprocessor flag to use supernodal sparse Cholesky/Inverse from system wide suitesparse library ##' @param longint Turn on preprocessor flag to use long integers for Eigen's SparseMatrix StorageIndex ##' @param eigen.disable.warnings Turn on preprocessor flag to disable nuisance warnings. Note that this is not allowed for code to be compiled on CRAN. ##' @param max.order Maximum derivative order of compiler generated atomic special functions - see details. ##' @param ... Passed as Makeconf variables. ##' @seealso \code{\link{precompile}} compile <- function(file,flags="",safebounds=TRUE,safeunload=TRUE, openmp=isParallelTemplate(file[1]),libtmb=TRUE, libinit=TRUE,tracesweep=FALSE,framework=getOption("tmb.ad.framework"), supernodal=FALSE,longint=FALSE, eigen.disable.warnings=TRUE, max.order=NULL, ...){ framework <- match.arg(framework, c("CppAD", "TMBad")) ## Handle extra list(...) arguments plus modifications dotargs <- list(...) CPPFLAGS <- PKG_LIBS <- CLINK_CPPFLAGS <- NULL ## Visible binding (CRAN) '%+=%' <- function(VAR, x) { VAR <- deparse(substitute(VAR)) dotargs[[VAR]] <<- paste(dotargs[[VAR]], x) } if(.Platform$OS.type=="windows"){ ## Overload system.file system.file <- function(...){ ans <- base::system.file(...) chartr("\\", "/", shortPathName(ans)) } } qsystem.file <- function(...) { paste0('"', system.file(...), '"') } ## Cannot use the pre-compiled library when enabling sweep tracing if (tracesweep) libtmb <- FALSE ## libtmb existence debug <- length(grep("-O0", flags)) && length(grep("-g", flags)) fpath <- system.file(paste0("libs", Sys.getenv("R_ARCH")), package="TMB") f <- paste0(fpath, "/libTMB", if (openmp) "omp" else if (debug) "dbg", ".cpp") libtmb <- libtmb && file.exists(f) if(libtmb) file <- c(file, f) ## Function to create temporary makevars, Note: ## * R_MAKEVARS_USER overrules all other Makevars in tools:::.shlib_internal oldmvuser <- mvuser <- Sys.getenv("R_MAKEVARS_USER",NA) if(is.na(oldmvuser)){ on.exit(Sys.unsetenv("R_MAKEVARS_USER")) } else { on.exit(Sys.setenv(R_MAKEVARS_USER=oldmvuser)) } if(is.na(mvuser) && file.exists(f <- path.expand("~/.R/Makevars"))) mvuser <- f if(!is.na(mvuser)){ cat("Note: Using Makevars in",mvuser,"\n") } makevars <- function(...){ file <- tempfile() args <- unlist(list(...), use.names=TRUE) txt <- paste(names(args),args,sep="=") if(!is.na(mvuser)){ if(file.exists(mvuser)){ txt <- c(readLines(mvuser),txt) } } writeLines(txt,file) Sys.setenv(R_MAKEVARS_USER=file) file } ## Check that libname is valid C entry. libname <- sub("\\.[^\\.]*$","",basename(file[1])) if(safeunload){ valid <- c(letters[1:26],LETTERS[1:26],0:9,"_") invalid <- setdiff(unique(strsplit(libname,"")[[1]]),valid) if(length(invalid)>0){ cat("Your library name has invalid characters:\n") print(invalid) cat("It is recommended to replace invalid characters by underscore.\n") cat("Alternatively compile with safeunload=FALSE (not recommended).\n") stop() } } ## On windows the DLL must be unloaded before compiling if(.Platform$OS.type=="windows"){ tr <- try(dyn.unload(dynlib(libname)),silent=TRUE) if(!is(tr,"try-error"))cat("Note: Library",paste0("'",dynlib(libname),"'"),"was unloaded.\n") } ## Includes and preprocessor flags specific for the template useRcppEigen <- !file.exists( system.file("include/Eigen",package="TMB") ) useContrib <- file.exists( system.file("include/contrib",package="TMB") ) ppflags <- paste(paste0("-I",qsystem.file("include",package="TMB")), paste0("-I",qsystem.file("include",package="RcppEigen"))[useRcppEigen], paste0("-I",qsystem.file("include/contrib",package="TMB"))[useContrib], "-DTMB_SAFEBOUNDS"[safebounds], "-DTMB_EIGEN_DISABLE_WARNINGS"[eigen.disable.warnings], paste0("-DLIB_UNLOAD=R_unload_",libname)[safeunload], "-DWITH_LIBTMB"[libtmb], paste0("-DTMB_LIB_INIT=R_init_",libname)[libinit], "-DCPPAD_FORWARD0SWEEP_TRACE"[tracesweep], paste0("-D",toupper(framework),"_FRAMEWORK") ) ## *Very* primitive guess of suitesparse configuration ## (If wrong set supernodal=FALSE and tweak manually) if (supernodal) { if (framework != "TMBad") stop("'supernodal=TRUE' only works when framework='TMBad'") CPPFLAGS %+=% "-DTMBAD_SUPERNODAL -DEIGEN_USE_BLAS -DEIGEN_USE_LAPACKE" PKG_LIBS %+=% if (.Platform$OS.type=="windows") "-lcholmod -lcolamd -lamd -lsuitesparseconfig -lopenblas $(SHLIB_OPENMP_CXXFLAGS)" else "-lcholmod" CLINK_CPPFLAGS %+=% if (.Platform$OS.type=="windows") "" else "-I/usr/include/suitesparse" } ## Long integer support if (longint) { CPPFLAGS %+=% if (.Platform$OS.type=="windows") "-DTMB_SPARSE_STORAGE_INDEX='long long'" else "-DTMB_SPARSE_STORAGE_INDEX='long int'" } ## TMB_MAX_ORDER if (!is.null(max.order)) { CPPFLAGS %+=% paste0("-DTMB_MAX_ORDER=", max.order) } ## Makevars specific for template mvfile <- makevars(PKG_CPPFLAGS=ppflags, PKG_LIBS=paste( "$(SHLIB_OPENMP_CXXFLAGS)"[openmp] ), PKG_CXXFLAGS="$(SHLIB_OPENMP_CXXFLAGS)"[openmp], CXXFLAGS=flags[flags!=""], ## Optionally override cxxflags dotargs ) on.exit(file.remove(mvfile),add=TRUE) status <- .shlib_internal(file) ## Was: tools:::.shlib_internal(file) if(status!=0) stop("Compilation failed") status } ##' Precompile the TMB library ##' ##' Precompilation can be used to speed up compilation of ##' templates. It is only necessary to run \code{precompile()} once, ##' typically right after installation of TMB. The function ##' \emph{prepares} TMB for precompilation, while the actual ##' pre-compilation takes place the first time you compile a model ##' after running \code{precompile()}. ##' ##' Note that the precompilation requires write access to the TMB ##' package folder. Three versions of the library will be prepared: ##' Normal, parallel and a debugable version. ##' ##' Precompilation works the same way on all platforms. The only known ##' side-effect of precompilation is that it increases the file size ##' of the generated binaries. ##' @title Precompile the TMB library in order to speed up compilation of templates. ##' @param all Precompile all or just the core parts of TMB ? ##' @param clean Remove precompiled libraries ? ##' @param trace Trace precompilation process ? ##' @param get.header Create files 'TMB.h' and 'TMB.cpp' in current working directory to be used as part of a project? ##' @param ... Not used. ##' @examples ##' \dontrun{ ##' ## Prepare precompilation ##' precompile() ##' ## Perform precompilation by running a model ##' runExample(all = TRUE) ##' } precompile <- function(all=TRUE, clean=FALSE, trace=TRUE, get.header=FALSE, ...){ owdir <- getwd() on.exit(setwd(owdir)) if (get.header) { ## TMB.h outfile <- paste(getwd(), "TMB.h", sep="/") code <- c( "#ifndef TMB_H", "#define TMB_H", "#ifdef TMB_PRECOMPILE", "#define TMB_PRECOMPILE_ATOMICS"[all], "#else", "#define HAVE_PRECOMPILED_ATOMICS"[all], "#define WITH_LIBTMB", "#endif", "#include ", precompileSource()[all], "#endif") writeLines(code, outfile) if(trace) message(outfile, " generated") ## TMB.cpp outfile <- paste(getwd(), "TMB.cpp", sep="/") code <- c( "#define TMB_PRECOMPILE", '#include "TMB.h"' ) writeLines(code, outfile) if(trace) message(outfile, " generated") } else { folder <- system.file(paste0("libs", Sys.getenv("R_ARCH")), package="TMB") setwd(folder) if(clean){ f <- dir(pattern = "^libTMB") if(length(f) && trace) cat("Removing:", f, "\n") file.remove(f) f <- system.file(paste0("include/precompile.hpp"), package="TMB") file.create(f) return(NULL) } ## Cleanup before applying changes: precompile(clean = TRUE) ## Precompile frequently used classes: outfile <- paste0(system.file("include", package="TMB"), "/precompile.hpp") if(all) writeLines(precompileSource(), outfile) code <- c( "#undef TMB_LIB_INIT", "#undef LIB_UNLOAD", "#undef WITH_LIBTMB", "#undef TMB_PRECOMPILE_ATOMICS", "#define TMB_PRECOMPILE_ATOMICS 1", "#pragma message \"Running TMB precompilation...\""[trace], "#include " ) writeLines(code, "libTMB.cpp") writeLines(code, "libTMBomp.cpp") writeLines(code, "libTMBdbg.cpp") if(trace) message("Precompilation sources generated") } } ##' Add the platform dependent dynlib extension. In order for examples ##' to work across platforms DLLs should be loaded by ##' \code{dyn.load(dynlib("name"))}. ##' ##' @title Add dynlib extension ##' @param name Library name without extension ##' @return Character dynlib <- function(name)paste0(name,.Platform$dynlib.ext) ##' Create a cpp template to get started. ##' ##' This function generates a C++ template with a header and include ##' statement. Here is a brief overview of the C++ syntax used to code ##' the objective function. For a full reference see the Doxygen ##' documentation (more information at the package URL). ##' ##' Macros to read data and declare parameters: ##' \tabular{lll}{ ##' \bold{Template Syntax} \tab \bold{C++ type} \tab \bold{R type} \cr ##' DATA_VECTOR(name) \tab vector \tab vector \cr ##' DATA_MATRIX(name) \tab matrix \tab matrix \cr ##' DATA_SCALAR(name) \tab Type \tab numeric(1) \cr ##' DATA_INTEGER(name) \tab int \tab integer(1) \cr ##' DATA_FACTOR(name) \tab vector \tab factor \cr ##' DATA_IVECTOR(name) \tab vector \tab integer \cr ##' DATA_SPARSE_MATRIX(name) \tab Eigen::SparseMatrix \tab dgTMatrix \cr ##' DATA_ARRAY(name) \tab array \tab array \cr ##' PARAMETER_MATRIX(name) \tab matrix \tab matrix \cr ##' PARAMETER_VECTOR(name) \tab vector \tab vector \cr ##' PARAMETER_ARRAY(name) \tab array \tab array \cr ##' PARAMETER(name) \tab Type \tab numeric(1) \cr ##' } ##' ##' Basic calculations: ##' \tabular{ll}{ ##' \bold{Template Syntax} \tab \bold{Explanation} \cr ##' REPORT(x) \tab Report x back to R \cr ##' ADREPORT(x) \tab Report x back to R with derivatives \cr ##' vector v(n1); \tab R equivalent of v=numeric(n1) \cr ##' matrix m(n1,n2); \tab R equivalent of m=matrix(0,n1,n2) \cr ##' array a(n1,n2,n3); \tab R equivalent of a=array(0,c(n1,n2,n3)) \cr ##' v+v,v-v,v*v,v/v \tab Pointwise binary operations \cr ##' m*v \tab Matrix-vector multiply \cr ##' a.col(i) \tab R equivalent of a[,,i] \cr ##' a.col(i).col(j) \tab R equivalent of a[,j,i] \cr ##' a(i,j,k) \tab R equivalent of a[i,j,k] \cr ##' exp(v) \tab Pointwise math \cr ##' m(i,j) \tab R equivalent of m[i,j] \cr ##' v.sum() \tab R equivalent of sum(v) \cr ##' m.transpose() \tab R equivalent of t(m) \cr ##' } ##' ##' Some distributions are available as C++ templates with syntax close to R's distributions: ##' \tabular{ll}{ ##' \bold{Function header} \tab \bold{Distribution} \cr ##' dnbinom2(x,mu,var,int give_log=0) \tab Negative binomial with mean and variance \cr ##' dpois(x,lambda,int give_log=0) \tab Poisson distribution as in R \cr ##' dlgamma(y,shape,scale,int give_log=0) \tab log-gamma distribution \cr ##' dnorm(x,mean,sd,int give_log=0) \tab Normal distribution as in R \cr ##' } ##' @title Create cpp template to get started. ##' @param file Optional name of cpp file. ##' @examples ##' template() template <- function(file=NULL){ x <- readLines(system.file("template.cpp",package="TMB")) if(!is.null(file)){ if(file.exists(file))stop("File '",file,"' exists") writeLines(x,file) } else cat(paste(x,collapse="\n")) } ##' Create a skeleton of required R-code once the cpp template is ready. ##' ##' @title Create minimal R-code corresponding to a cpp template. ##' @param file cpp template file. ##' @examples ##' file <- system.file("examples/simple.cpp", package = "TMB") ##' Rinterface(file) Rinterface <- function(file){ libname <- sub("\\.[^\\.]*$", "", basename(file)) x <- readLines(file) x <- .removeComments(x) items2list <- function(items){ if(length(items)==0)return("list(),") paste0("list(\n",paste(paste0(" ",items,"= "),collapse=",\n"),"\n ),") } ## Data dataregexp <- "^[ ]*DATA_.*?\\((.*?)\\).*" datalines <- grep(dataregexp,x,value=TRUE) dataitems <- sub(dataregexp,"\\1",datalines) ## Parameters parameterregexp <- "^[ ]*PARAMETER.*?\\((.*?)\\).*" parameterlines <- grep(parameterregexp,x,value=TRUE) parameteritems <- sub(parameterregexp,"\\1",parameterlines) libname <- paste0("\"",libname,"\"") txt <- c("library(TMB)", paste0("dyn.load(dynlib(",libname,"))"), "MakeADFun(", paste0(" data=",items2list(dataitems)), paste0(" parameters=",items2list(parameteritems)), paste0(" DLL=",libname), ")\n" ) cat(paste(txt,collapse="\n")) } ## Recommended settings: ## * General non-convex case: smartsearch=TRUE ## * Strictly convex case: smartsearch=FALSE and maxit=20 ## * Quadratic case: smartsearch=FALSE and maxit=1 ##' Generalized newton optimizer used for the inner optimization problem. ##' ##' If \code{smartsearch=FALSE} this function performs an ordinary newton optimization ##' on the function \code{fn} using an exact sparse hessian function. ##' A fixed stepsize may be controlled by \code{alpha} so that the iterations are ##' given by: ##' \deqn{u_{n+1} = u_n - \alpha f''(u_n)^{-1}f'(u_n)} ##' ##' If \code{smartsearch=TRUE} the hessian is allowed to become negative definite ##' preventing ordinary newton iterations. In this situation the newton iterations are performed on ##' a modified objective function defined by adding a quadratic penalty around the expansion point \eqn{u_0}: ##' \deqn{f_{t}(u) = f(u) + \frac{t}{2} \|u-u_0\|^2}{f_t(u) = f(u) + t/2 |u-u_0|^2} ##' This function's hessian ( \eqn{f''(u)+t I} ) is positive definite for \eqn{t} sufficiently ##' large. The value \eqn{t} is updated at every iteration: If the hessian is positive definite \eqn{t} is ##' decreased, otherwise increased. Detailed control of the update process can be obtained with the ##' arguments \code{ustep}, \code{power} and \code{u0}. ##' @title Generalized newton optimizer. ##' @param par Initial parameter. ##' @param fn Objective function. ##' @param gr Gradient function. ##' @param he Sparse hessian function. ##' @param trace Print tracing information? ##' @param maxit Maximum number of iterations. ##' @param tol Convergence tolerance. ##' @param alpha Newton stepsize in the fixed stepsize case. ##' @param smartsearch Turn on adaptive stepsize algorithm for non-convex problems? ##' @param mgcmax Refuse to optimize if the maximum gradient component is too steep. ##' @param super Supernodal Cholesky? ##' @param silent Be silent? ##' @param ustep Adaptive stepsize initial guess between 0 and 1. ##' @param power Parameter controlling adaptive stepsize. ##' @param u0 Parameter controlling adaptive stepsize. ##' @param grad.tol Gradient convergence tolerance. ##' @param step.tol Stepsize convergence tolerance. ##' @param tol10 Try to exit if last 10 iterations not improved more than this. ##' @param env Environment for cached Cholesky factor. ##' @param ... Currently unused. ##' @return List with solution similar to \code{optim} output. ##' @seealso \code{\link{newtonOption}} newton <- function (par,fn,gr,he, trace = 1, maxit = 100, tol = 1e-8, alpha = 1, smartsearch = TRUE, mgcmax = 1e60, super = TRUE, silent = TRUE, ustep = 1, ## Start out optimistic: Newton step power=.5, ## decrease=function(u)const*u^power u0=1e-4, ## Increase u=0 to this value grad.tol = tol, step.tol = tol, tol10 = 1e-3, ## Try to exit if last 10 iterations not improved much env=environment(), ...) { ## Test if a Cholesky factor is present inside the environment of "he" function. ## If not - create one... if(is.null(L <- env$L.created.by.newton)) { h.pattern <- he(par) ## Make sure Cholesky is succesful h.pattern@x[] <- 0 diag(h.pattern) <- 1 L <- env$L.created.by.newton <- Cholesky(h.pattern, super=super) } chol.solve <- function(h,g){ ##.Call("destructive_CHM_update",L,h,as.double(0),PACKAGE="Matrix") updateCholesky(L,h) as.vector(solveCholesky(L,g)) } ## optimize <- stats::optimize nam <- names(par) par <- as.vector(par) g <- h <- NULL ## pd.check: Quick test for hessian being positive definite iterate <- function(par,pd.check=FALSE) { if(pd.check){ if(is.null(h))return(TRUE) h <<- he(par) ## Make sure hessian is updated PD <- updateCholesky(L, h) return( PD ) } g <<- as.vector(gr(par)) if(any( !is.finite(g) ))stop("Newton dropout because inner gradient had non-finite components.") if(is.finite(mgcmax) && max(abs(g)) > mgcmax) stop("Newton dropout because inner gradient too steep.") if(max(abs(g))-m)){ ## h+t*I negative definite ustep <<- min(ustep,invphi(-m)) return(NaN) } } ## Passed... ## Now do more expensive check... ##ok <- !is.character(try( .Call("destructive_CHM_update",L,h,as.double(t),PACKAGE="Matrix") , silent=silent)) ok <- updateCholesky(L, h, t) if(!ok)return(NaN) dp <- as.vector(solveCholesky(L,g)) p <<- par-dp ans <- fn(p) if(gradient)attr(ans,"gradient") <- sum(solveCholesky(L,dp)*gr(p)) ans } ## Adaptive stepsize algorithm (smartsearch) phi <- function(u)1/u-1 invphi <- function(x)1/(x+1) fu <- function(u){f(phi(u))} ## ========== Functions controling the algorithm ## Important requirements: ## 1. increase(u) and decrease(u) takes values in [0,1] ## 2. increase(u)>u and decrease(u)1 when u->1 ## 4. decrease(u)->0 when u->0 ## Properties of algorithm: ## * ustep must converge towards 1 (because 1 <==> Positive definite hessian) ## power<1 - controls the boundary *repulsion* increase <- function(u)u0+(1-u0)*u^power ##decrease <- function(u)1-increase(1-u) ## Solve problem with accuracy when u apprach 0 decrease <- function(u)ifelse(u>1e-10,1-increase(1-u),(1-u0)*power*u) ##plot(increase,0,1,ylim=c(0,1));plot(decrease,0,1,add=TRUE);abline(0,1) ustep <<- increase(ustep) repeat{ fu.value <- fu(ustep) if(is.finite(fu.value)){ eps <- sqrt(.Machine$double.eps) if(fu.value>fnpar+eps){ if(ustep<=0)break ## Avoid trap ustep <<- decrease(ustep) } else break } else { if(ustep<=0)break ## Avoid trap ustep <<- decrease(ustep) } } if(trace>=1)cat("value:", fu.value,"mgc:",max(abs(g)), "ustep:", ustep ,"\n") return(p) } dpar <- chol.solve(h,g) ## ordinary newton if(trace>=1)cat("mgc:",max(abs(g)) ,"\n") par - alpha * dpar } norm <- function(x) sqrt(sum(x^2)) fn.history <- numeric(maxit) fail <- 0 for (i in seq_len(maxit)){ parold <- par if(trace>=1)cat("iter:",i," ") par <- iterate(par) fn.history[i] <- fn(par) if(i>10){ tail10 <- tail(fn.history[1:i],10) improve10 <- tail10[1] - tail10[length(tail10)] if(improve10=1)cat("Not improving much - will try early exit...") pd <- iterate(par,pd.check=TRUE) if(trace>=1)cat("PD hess?:",pd,"\n") if(pd)break fail <- fail+1 } } if(norm(par-parold)5){ stop("Newton drop out: Too many failed attempts.") } } pd <- iterate(par,pd.check=TRUE) if(!pd)stop("Newton failed to find minimum.") names(par) <- nam value <- fn(par) g <- gr(par) if(trace>=1)cat("mgc:",max(abs(g)),"\n") list(par=par,value=value,gradient=g,hessian=h,iterations=i) } ##' Inner-problem options can be set for a model object using this ##' function. ##' ##' @title Set newton options for a model object. ##' @param obj Object from \code{\link{MakeADFun}} for which to change settings. ##' @param ... Parameters for the \code{\link{newton}} optimizer to set. ##' @return List of updated parameters. newtonOption <- function(obj,...){ if(!is.environment(obj$env)){ stop("First argument to 'newtonOption' must be a model object (output from MakeADFun)") } x <- list(...) validOpts <- setdiff(names(formals(newton)), c("par","fn","gr","he","env","...")) inValidOpts <- setdiff(names(x), validOpts) if(length(inValidOpts) > 0){ stop("Invalid newton option(s):", paste0(" '",inValidOpts,"'")) } obj$env$inner.control[names(x)] <- x invisible( obj$env$inner.control ) } sparseHessianFun <- function(obj, skipFixedEffects=FALSE) { r <- obj$env$random if (length(r) == 0) return (NULL) skip <- if(skipFixedEffects) { ## Assuming that random effects comes first in parameter list, we can set ## skip <- as.integer(length(obj$env$par)-length(r)) ## ==number of fixed effects seq_along(obj$env$par)[-r] } else { integer(0) ## <-- Empty integer vector } ## ptr.list ADHess <- MakeADHessObject(obj$env$data, obj$env$parameters, obj$env$reportenv, gf=obj$env$ADGrad$ptr, skip=skip, ## <-- Skip this index vector of parameters DLL=obj$env$DLL) ## Experiment ! TransformADFunObject(ADHess, method = "reorder_random", random_order = r, mustWork = 0L) ev <- function(par, set_tail=0) { EvalADFunObject(ADHess, par, set_tail = set_tail) } n <- as.integer(length(obj$env$par)) M <- new("dsTMatrix", i = as.integer(attr(ADHess$ptr,"i")), j = as.integer(attr(ADHess$ptr,"j")), x = ev(obj$env$par), Dim = c(n,n), uplo = "L") Hfull <- as(M, "CsparseMatrix") ## WAS: as(M,"dsCMatrix") Hrandom <- Hfull[r,r,drop=FALSE] ## before returning the function, remove unneeded variables from the environment: rm(skip, n, M) function(par = obj$env$par, random=FALSE, set_tail=0) { if(!random) { Hfull@x[] <- ev(par) Hfull } else if(skipFixedEffects) { .Call("setxslot", Hrandom, ev(par), PACKAGE="TMB") } else { Hfull@x[] <- ev(par, set_tail=set_tail) Hfull[r,r] } } } ## Debugging utility: Check sparse hessian. ## By comparing with gradient differentiated in random direction. checkSparseHessian <- function(obj,par=obj$env$last.par, w = rnorm(length(par)), ## random direction plot=TRUE,...){ r <- obj$env$random w[-r] <- 0 res1 <- obj$env$f(par, order = 1, type = "ADGrad", rangeweight = w)[r] res2 <- (obj$env$spHess(par)%*%w)[r] res <- list(x=res1,y=res2) if(plot){ plot(res,...) abline(0,1,col="red") } invisible(res) } ##' Aggressively tries to reduce fill-in of sparse Cholesky factor by ##' running a full suite of ordering algorithms. NOTE: requires a ##' specialized installation of the package. More information is ##' available at the package URL. ##' ##' @title Run symbolic analysis on sparse Hessian ##' @param obj Output from \code{MakeADFun} ##' @return NULL runSymbolicAnalysis <- function(obj){ ok <- .Call("have_tmb_symbolic",PACKAGE="TMB") if(!ok){ cat("note: tmb_symbolic not installed\n") return(NULL) } h <- obj$env$spHess(random=TRUE) h@x[] <- 0 diag(h) <- 1 L <- .Call("tmb_symbolic",h,PACKAGE="TMB") obj$env$L.created.by.newton <- L NULL } ## url: Can be local or remote zipfile ## skip.top.level: Skips the top level of unzipped directory. install.contrib <- function(url, skip.top.level = FALSE) { owd <- getwd() on.exit(setwd(owd)) contrib.folder <- paste0(system.file("include",package="TMB"), "/contrib" ) if( !file.exists( contrib.folder ) ) { dir.create(contrib.folder) } zipfile <- tempfile(fileext = ".zip") if(file.exists(url)) { ## Local zip file file.copy(url, zipfile) } else { ## Remote zipfile download.file(url, destfile = zipfile) } tmp.folder <- tempfile() dir.create(tmp.folder) df <- unzip(zipfile, list=TRUE) unzip(zipfile, exdir = tmp.folder) setwd(tmp.folder) ## If unzipped archive is a single folder then strip "-master" from name if(length(dir()) == 1) { if(file_test("-d", dir())) { file.rename(dir(), sub("-master$","",dir())) } if(skip.top.level) setwd(dir()) } file.copy(dir(), contrib.folder, recursive=TRUE) file.remove(zipfile) unlink(tmp.folder, recursive=TRUE) cat("NOTE:",contrib.folder,"\n") dir(contrib.folder) } ##' Version information on API and ABI. ##' ##' The R interface to \code{TMB} roughly consists of two components: (1) The 'API' i.e. R functions documented in this manual and (2) C-level entry points, here referred to as the 'ABI', which controls the C++ code. The latter can be shown by \code{getDLLRegisteredRoutines(DLL)} where \code{DLL} is the shared library generated by the \link{compile} function (or by a package linking to \code{TMB}). ##' A DLL compiled with one version of \code{TMB} can be used with another version of \code{TMB} provided that the 'ABI' is the same. We therefore define the 'ABI version' as the oldest ABI compatible version. This number can then be used to tell if re-compilation of a DLL is necessary after updating \code{TMB}. ##' @return List with components \code{package} (API version) and \code{abi} (ABI version) inspired by corresponding function in the \code{Matrix} package. TMB.Version <- function() { list(package=packageVersion("TMB"), abi=abi()) } TMB/R/checker.R0000644000176200001440000003110214634646733012641 0ustar liggesusers##' Check consistency of various parts of a TMB implementation. ##' Requires that user has implemented simulation code for the data and ##' optionally random effects. (\emph{Beta version; may change without ##' notice}) ##' ##' This function checks that the simulation code of random effects and ##' data is consistent with the implemented negative log-likelihood ##' function. It also checks whether the approximate \emph{marginal} ##' score function is central indicating whether the Laplace ##' approximation is suitable for parameter estimation. ##' ##' Denote by \eqn{u} the random effects, \eqn{\theta} the parameters ##' and by \eqn{x} the data. The main assumption is that the user has ##' implemented the joint negative log likelihood \eqn{f_{\theta}(u,x)} ##' satisfying ##' \deqn{\int \int \exp( -f_{\theta}(u,x) ) \:du\:dx = 1} ##' It follows that the joint and marginal score functions are central: ##' \enumerate{ ##' \item \eqn{E_{u,x}\left[\nabla_{\theta}f_{\theta}(u,x)\right]=0} ##' \item \eqn{E_{x}\left[\nabla_{\theta}-\log\left( \int \exp(-f_{\theta}(u,x))\:du \right) \right]=0} ##' } ##' For each replicate of \eqn{u} and \eqn{x} joint and marginal ##' gradients are calculated. Appropriate centrality tests are carried ##' out by \code{\link{summary.checkConsistency}}. An asymptotic ##' \eqn{\chi^2} test is used to verify the first identity. Power of ##' this test increases with the number of simulations \code{n}. The ##' second identity holds \emph{approximately} when replacing the ##' marginal likelihood with its Laplace approximation. A formal test ##' would thus fail eventually for large \code{n}. Rather, the gradient ##' bias is transformed to parameter scale (using the estimated ##' information matrix) to provide an estimate of parameter bias caused ##' by the Laplace approximation. ##' ##' @section Simulation/re-estimation: ##' A full simulation/re-estimation study is performed when \code{estimate=TRUE}. ##' By default \link[stats]{nlminb} will be used to perform the minimization, and output is stored in a separate list component 'estimate' for each replicate. ##' Should a custom optimizer be needed, it can be passed as a user function via the same argument (\code{estimate}). ##' The function (\code{estimate}) will be called for each simulation as \code{estimate(obj)} where \code{obj} is the simulated model object. ##' Current default corresponds to \code{estimate = function(obj) nlminb(obj$par,obj$fn,obj$gr)}. ##' @title Check consistency and Laplace accuracy ##' @param obj Object from \code{MakeADFun} ##' @param par Parameter vector (\eqn{\theta}) for simulation. If ##' unspecified use the best encountered parameter of the object. ##' @param hessian Calculate the hessian matrix for each replicate ? ##' @param estimate Estimate parameters for each replicate ? ##' @param n Number of simulations ##' @param observation.name Optional; Name of simulated observation ##' @return List with gradient simulations (joint and marginal) ##' @seealso \code{\link{summary.checkConsistency}}, \code{\link{print.checkConsistency}} ##' @examples ##' \dontrun{ ##' runExample("simple") ##' chk <- checkConsistency(obj) ##' chk ##' ## Get more details ##' s <- summary(chk) ##' s$marginal$p.value ## Laplace exact for Gaussian models } checkConsistency <- function(obj, par = NULL, hessian = FALSE, estimate = FALSE, n = 100, observation.name = NULL ) { ## Optimizer if (!is.logical(estimate)) { Optimizer <- match.fun(estimate) estimate <- TRUE } else { Optimizer <- function(obj) nlminb(obj$par, obj$fn, obj$gr) } ## Args to construct copy of 'obj' args <- as.list(obj$env)[intersect(names(formals(MakeADFun)), ls(obj$env))] ## Determine parameter and full parameter to use r0 <- r <- obj$env$random if( is.null(par) ) { ## Default case: Optimization has been carried out by user if (is.null(obj$env$last.par.best)) { stop("'par' not specified.") } parfull <- obj$env$last.par.best if( any(r) ) par <- parfull[-r] else par <- parfull } else { ## Custom case: User specifies parameter vector (fixed effects) parfull <- obj$env$par if( any(r) ) parfull[-r] <- par else parfull <- par } ## Get names of random effects (excluding profiled parameters) if(any(obj$env$profile)) { r0 <- r[ ! as.logical(obj$env$profile) ] names.profile <- unique(names(parfull[r[as.logical(obj$env$profile)]])) } else { names.profile <- NULL } names.random <- unique(names(parfull[r0])) ## Use 'parfull' for new object args$parameters <- obj$env$parList(par, par = parfull) ## Fix all profiled parameters map.profile <- lapply(args$parameters[names.profile], function(x)factor(x*NA)) args$map <- c(args$map, map.profile) ## Find randomeffects character args$random <- names.random args$regexp <- FALSE ## Are we in 'fast' (no retape) mode ? fast <- !is.null(observation.name) if (fast) { ## Move data -> parameters ## Note: We really do need to know 'observation.name'. There ## could be other (deterministic) items in 'data'... args$parameters <- c(args$data[observation.name], args$parameters) args$data[observation.name] <- NULL } ## Create new object newobj <- do.call("MakeADFun", args) newobj0 <- newobj ## backup if (fast) { parobs <- names(newobj$par) %in% observation.name ## NOTE: Simulation is stored as part of 'newobj$env$par' expandpar <- function(par) { ## Incudes par fixed *and* simulation: ans <- newobj0$env$par[newobj0$env$lfixed()] ans[!parobs] <- par ans } ## FIXME: No 'obj$he()' in this object newobj <- list(fn=function(x)newobj0$fn(expandpar(x)), gr=function(x)newobj0$gr(expandpar(x))[!parobs], par=newobj0$par[!parobs], env=newobj0$env ) } doSim <- function(...) { simdata <- newobj0$simulate(newobj0$env$par, complete=TRUE) if (!fast) { newobj$env$data <- simdata } ## Check that random effects have been simulated haveRandomSim <- all( names.random %in% names(simdata) ) ## Set good inner starting values if (haveRandomSim) { if (fast) { for (nm in names.random) { newobj$env$par[names(newobj$env$par) == nm] <- simdata[[nm]] } } else { newobj$env$parameters[names.random] <- simdata[names.random] } } ## FIXME: Mapped random effects not supported (yet) for 'fast' approach if (!fast && haveRandomSim) { ## Snippet taken from MakeADFun to account for mapped parameters: map <- args$map[names(args$map) %in% names.random] if (length(map) > 0) { param.map <- lapply(names(map), function(nam) { updateMap(newobj$env$parameters[[nam]], map[[nam]]) }) keepAttrib(newobj$env$parameters[names(map)]) <- param.map } } if (fast) { ## Set simulated data for (nm in observation.name) { newobj$env$par[names(newobj$env$par) == nm] <- simdata[[nm]] } ## Set inits newobj$env$last.par.best <- newobj$env$par newobj$env$value.best <- Inf } else { ## This approach *must* redo Cholesky newobj$env$L.created.by.newton <- NULL newobj$env$retape() } ans <- list() if (haveRandomSim) { ans$gradientJoint <- newobj$env$f(order=1) if(!is.null(newobj$env$random)) ans$gradientJoint <- ans$gradientJoint[-newobj$env$random] if (fast) ans$gradientJoint <- ans$gradientJoint[!parobs] } ans$gradient <- newobj$gr(par) if (hessian) ans$hessian <- optimHess(par, newobj$fn, newobj$gr) if (estimate) { newobj$par <- par ## just in case... ans$objective.true <- newobj$fn(par) ans$estimate <- try(Optimizer(newobj)) } ans } ans <- lapply(seq_len(n), doSim) attr(ans, "par") <- par class(ans) <- "checkConsistency" ans } ##' Summarize output from \code{\link{checkConsistency}} ##' ##' @title Summarize output from \code{\link{checkConsistency}} ##' @param object Output from \code{\link{checkConsistency}} ##' @param na.rm Logical; Remove failed simulations ? ##' @param ... Not used ##' @return List of diagnostics ##' @method summary checkConsistency ##' @S3method summary checkConsistency summary.checkConsistency <- function(object, na.rm=FALSE, ...) { ans <- list() ans$par <- attr(object, "par") getMat <- function(name) { do.call("cbind", lapply(object, function(x) as.vector(x[[name]]))) } ans$gradientJoint <- getMat( "gradientJoint" ) ans$gradient <- getMat( "gradient" ) ## Check simulation check <- function(mat) { if(!is.matrix(mat)) return( list(p.value=NA, bias=NA) ) if (na.rm) { fail <- as.logical( colSums( !is.finite(mat) ) ) mat <- mat[, !fail, drop=FALSE] } mu <- rowMeans(mat) npar <- length(mu) nsim <- ncol(mat) bias <- p.value <- NULL if(nsim < npar) { stop("Too few simulations ", nsim, " compared to number of parameters ", npar) } ## Variance of score = Information H <- var(t(mat)) iH <- try(solve(H), silent=TRUE) if(is(iH, "try-error")) { warning("Failed to invert information matrix") bias <- attr(object, "par") * NA p.value <- NA } else { mu.scaled <- sqrt(nsim) * mu q <- as.vector( t(mu.scaled) %*% iH %*% mu.scaled ) p.value <- 1 - pchisq(q, df=npar) bias <- -iH %*% mu } bias <- as.vector(bias) names(bias) <- names(attr(object, "par")) list(p.value=p.value, bias=bias) } ans$joint <- check( ans$gradientJoint ) ans$marginal <- check( ans$gradient ) ## Simulation study have.estimate <- !is.null(object[[1]]$estimate) if (have.estimate) { getEstMat <- function(name) { do.call("cbind", lapply(object, function(x) as.vector(x$estimate[[name]]))) } est <- list() est$par <- t(getEstMat("par")) colnames(est$par) <- names(ans$par) est$par <- as.data.frame(est$par) est$objective <- drop(getEstMat("objective")) est$deviance <- 2 * ( drop(getMat("objective.true")) - est$objective ) est$deviance.p.value <- ks.test(est$deviance, "pchisq", df = length(ans$par))$p.value ans$convergence <- drop(getEstMat("convergence")) ## Set it ans$estimate <- est } ans } ##' Print diagnostics output from \code{\link{checkConsistency}} ##' ##' @title Print output from \code{\link{checkConsistency}} ##' @param x Output from \code{\link{checkConsistency}} ##' @param ... Not used ##' @return NULL ##' @method print checkConsistency ##' @S3method print checkConsistency print.checkConsistency <- function(x, ...) { s <- summary(x) cat("Parameters used for simulation:\n") print(s$par) cat("\n") cat("Test correct simulation (p.value):\n") print(s$joint$p.value) alpha <- .05 ## FIXME: Perhaps make option s$sim.ok <- ( s$joint$p.value > alpha ) if(is.na(s$sim.ok)) cat("Full simulation was not available\n") else if(!s$sim.ok) cat("Simulation does *not* appear to be correct !!!\n") else cat("Simulation appears to be correct\n") ## Check Laplace: cat("\n") cat("Estimated parameter bias:\n") print(s$marginal$bias) ## Estimate info: if (!is.null(s$estimate)) { cat("\n") cat("summary(.)$estimate contains:\n") print(names(s$estimate)) } invisible(x) } if(FALSE) { library(TMB) runExample("sam", exfolder="../../tmb_examples") set.seed(123) qw <- checkConsistency(obj, opt$par, n=100) print.checkConsistency(qw) runExample("ar1_4D", exfolder="../../tmb_examples") set.seed(123) qw <- checkConsistency(obj, opt$par, n=100) qw } TMB/R/benchmark.R0000644000176200001440000000676714634646733013212 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Benchmark parallel templates ##' ##' By default this function will perform timings of the most critical ##' parts of an AD model, specifically ##' \enumerate{ ##' \item Objective function of evaluated template. ##' \item Gradient of evaluated template. ##' \item Sparse hessian of evaluated template. ##' \item Cholesky factorization of sparse hessian. ##' } ##' (for pure fixed effect models only the first two). ##' Expressions to time can be overwritten by the user (\code{expr}). ##' A \code{plot} method is available for Parallel benchmarks. ##' ##' @title Benchmark parallel templates ##' @param obj Object from \code{MakeADFun} ##' @param n Number of replicates to obtain reliable results. ##' @param expr Optional expression to benchmark instead of default. ##' @param cores Optional vector of cores. ##' @examples ##' \dontrun{ ##' runExample("linreg_parallel",thisR=TRUE) ## Create obj ##' ben <- benchmark(obj,n=100,cores=1:4) ##' plot(ben) ##' ben <- benchmark(obj,n=10,cores=1:4,expr=expression(do.call("optim",obj))) ##' plot(ben) ##' } benchmark <- function(obj,n=10,expr=NULL,cores=NULL){ if(!is.null(cores)){ return(parallelBenchmark(obj,n=n,cores=cores,expr=expr)) } if(is.null(expr)){ ## Expressions to time expr <- expression( template.likelihood = obj$env$f(order=0), template.gradient = obj$env$f(order=1), template.sparse.hessian = obj$env$spHess(random=TRUE), cholesky=updateCholesky(L,h) ) } else if(is.null(names(expr))) names(expr) <- vapply(expr, function(.) deparse(.)[[1L]], "") addLoopToExpression <- function(y) substitute(for (i in seq_len(n)) { obj$env$.data <- NULL; EE }, list(EE=y)) expr <- lapply(expr, addLoopToExpression) if(!is.null(obj$env$random)){ h <- obj$env$spHess() ## Avoid timing the construction h@x[] <- 0 diag(h) <- 1 L <- Cholesky(h,super=TRUE) } else { expr$template.sparse.hessian <- NULL expr$cholesky <- NULL } ans <- lapply(expr,function(x)system.time(eval(x))) ans <- do.call("rbind",ans) as.data.frame(ans)[c(3)] } ## Internal helper function parallelBenchmark <- function(obj,n,cores=1:4,...){ ans <- lapply(cores,function(nc){ openmp(nc) obj$env$retape() benchmark(obj,n=n,cores=NULL,...) }) ans <- t(do.call("cbind",ans)) rownames(ans) <- cores names(dimnames(ans)) <- c("ncores","") ans <- t(ans) class(ans) <- "parallelBenchmark" ans } ##' Plot result of parallel benchmark ##' ##' @title Plot result of benchmark ##' @param x Object to plot ##' @param type Plot type ##' @param ... Further plot arguments ##' @param show Plot relative speedup or relative time? ##' @param legendpos Position of legend ##' @return NULL ##' @rdname benchmark ##' @method plot parallelBenchmark ##' @S3method plot parallelBenchmark plot.parallelBenchmark <- function(x,type="b",...,show=c("speedup","time"),legendpos="topleft"){ show <- match.arg(show) ncores <- as.numeric(colnames(x)) x <- x[,order(ncores),drop=FALSE] ncores <- sort(ncores) if(show=="time"){ matplot(ncores,t(x/x[,1]),ylab="Time (relative to one core)",type=type,...) plot(function(x)1/x,min(ncores),max(ncores),add=TRUE,col="grey") } if(show=="speedup"){ matplot(ncores,t(x[,1]/x),type=type,ylab="Speedup (relative to one core)",...) abline(0,1,col="grey") } if(!is.null(legendpos)){ n <- nrow(x) if(is.null(rownames(x)))rownames(x) <- 1:n legend(legendpos,legend=rownames(x),col=1:n,lty=1:n) } NULL } TMB/R/normalize.R0000644000176200001440000000716314634646733013247 0ustar liggesusers##' Normalize process likelihood using the Laplace approximation. ##' ##' If the random effect likelihood contribution of a model has been ##' implemented without proper normalization (i.e. lacks the normalizing ##' constant), then this function can perform the adjustment ##' automatically. In order for this to work, the model must include a ##' flag that disables the data term so that the un-normalized random effect ##' (negative log) density is returned from the model template. ##' Automatic process normalization may be useful if either the ##' normalizing constant is difficult to implement, or if its calulation ##' involves so many operations that it becomes infeasible to include in ##' the AD machinery. ##' ##' @param obj Model object from \code{MakeADFun} without proper normalization of the random effect likelihood. ##' @param flag Flag to disable the data term from the model. ##' @param value Value of 'flag' that signifies to not include the data term. ##' @return Modified model object that can be passed to an optimizer. normalize <- function(obj, flag, value=0) { obj1 <- obj ## Data included tracemgc <- obj1$env$tracemgc obj1$env$tracemgc <- FALSE ## Deep copy obj0 <- unserialize(serialize(obj, NULL)) obj0$env$L.created.by.newton <- NULL ## Can't use same Cholesky object if (missing(flag) || ! is.character(flag) || ! all( flag %in% names(obj0$env$data) ) ) { stop("'flag' must be a character vector naming one or more data items.") } if ( (length(flag)==1) && length(value) > 0) { ## Examples: flag="flag" and value=0 ("flag" ~ INTEGER) ## flag="keep" and value=0 ("keep" ~ DATA_VECTOR_INDICATOR) obj0$env$data[[flag]][] <- value } else { ## Examples: flag="obs" and value=numeric(0) ("obs" ~ DATA_VECTOR) ## flag=c("obs1","obs2") and value=list(numeric(0),numeric(0)) if (!is.list(value)) value <- list(value) obj0$env$data[flag] <- value } obj0$retape() newobj <- list() newobj$par <- obj1$par newobj$env <- obj1$env ## Workaround: Insert NAs in invalid hessian block H[fixed, fixed] ## if accessed by e.g. 'sdreport': random <- NULL ## CRAN check: no visible binding local({ f_old <- f f <- function(...) { args <- list(...) ans <- f_old(...) if ((args$order == 1) && (args$type == "ADGrad") && is.vector(args$rangeweight) ) { if ( ! all( args$rangeweight[-random] == 0 ) ) { ans[-random] <- NA } } ans } }, newobj$env) newobj$fn <- function(x = newobj$par) { env <- newobj$env value.best <- env$value.best last.par.best <- env$last.par.best ans <- obj1$fn(x) - obj0$fn(x) last.par <- env$last.par if (is.finite(ans)) { if (ans < value.best) { env$last.par.best <- last.par env$value.best <- ans } } ans } newobj$gr <- function(x = newobj$par) { ans <- obj1$gr(x) - obj0$gr(x) if (tracemgc) cat("outer mgc: ", max(abs(ans)), "\n") ans } newobj$he <- function(x = newobj$par) { ans <- obj1$he(x) - obj0$he(x) ans } newobj$report <- obj1$env$report newobj$simulate <- obj1$env$simulate newobj$retape <- function(...) { obj1$retape(...); obj0$retape(...) } ## Ensure all slots are present: nm.diff <- setdiff(names(obj1), names(newobj)) newobj[nm.diff] <- obj1[nm.diff] newobj } TMB/R/zzz.R0000644000176200001440000000366114641202777012075 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ## .First.lib <- function(lib, pkg) { ## library.dynam("TMB", pkg, lib) ## } ## https://github.com/lme4/lme4/issues/768 ## https://github.com/kaskr/adcomp/issues/387 get_abi_version <- function() { if (utils::packageVersion("Matrix") < "1.6-2") return(numeric_version("0")) Matrix::Matrix.Version()[["abi"]] } .Matrix.abi.build.version <- get_abi_version() checkMatrixPackageVersion <- function(warn=TRUE) { cur_version <- get_abi_version() built_version <- .Matrix.abi.build.version result_ok <- cur_version == built_version if (!result_ok) { warning( "Package version inconsistency detected.\n", "TMB was built with Matrix ABI version ", built_version, "\n", "Current Matrix ABI version is ", cur_version, "\n", "Please re-install 'TMB' from source using install.packages('TMB', type = 'source') ", "or ask CRAN for a binary version of 'TMB' matching CRAN's 'Matrix' package" ) } return(result_ok) } .onLoad <- function(lib, pkg) { library.dynam("TMB", pkg, lib) checkMatrixPackageVersion(getOption("TMB.check.Matrix", TRUE)) ## Select AD framework (CppAD or TMBad) used by TMB::compile tmb.ad.framework <- getOption("tmb.ad.framework", NULL) if (is.null(tmb.ad.framework)) tmb.ad.framework <- Sys.getenv("TMB_AD_FRAMEWORK", "CppAD") options("tmb.ad.framework" = tmb.ad.framework) } .onUnload <- function(libpath) { library.dynam.unload("TMB", libpath) } ## .LastLib <- function(libpath) ## { ## library.dynam.unload("TMB", libpath) ## } .onAttach <- function(lib, pkg) { exfolder <- system.file("examples", package = "TMB") dll <- paste0(exfolder, Sys.getenv("R_ARCH"), "/simple", .Platform$dynlib.ext) if(!file.exists(dll)) runExample("simple", dontrun=TRUE, eigen.disable.warnings=FALSE) } TMB/R/examples.R0000644000176200001440000000504514634646733013062 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Compile and run a test example (\code{runExample()} shows all available examples). ##' ##' @title Run one of the test examples. ##' @param name Character name of example. ##' @param all Run all the test examples? ##' @param thisR Run inside this R? ##' @param clean Cleanup before compile? ##' @param exfolder Alternative folder with examples. ##' @param dontrun Build only (don't run) and remove temporary object files ? ##' @param subarch Build in sub-architecture specific folder ? ##' @param ... Passed to \code{\link{compile}}. runExample <- function(name=NULL,all=FALSE,thisR=TRUE, clean=FALSE,exfolder=NULL, dontrun=FALSE, subarch=TRUE,...){ cwd <- getwd() on.exit(setwd(cwd)) if(is.null(exfolder))exfolder <- system.file("examples",package="TMB") setwd(exfolder) arch <- Sys.getenv("R_ARCH") if(arch != "" && subarch){ arch <- sub("/", "", arch) if( !file.exists(arch) ){ dir.create(arch) file.copy(dir(pattern="*.[cpp|R]"), arch) } setwd(arch) } validExamples <- function(){ f1 <- sub("\\.[^\\.]*$","",dir(pattern=".R$")) f2 <- sub("\\.[^\\.]*$","",dir(pattern=".cpp$")) intersect(f1,f2) } exnames <- validExamples() cppnames <- paste0(exnames,".cpp") ## Format info as text M <- max(nchar(exnames)) info <- sapply(cppnames,function(x){readLines(x)[[1]]}) info[substring(info,1,2)!="//"] <- "" info <- sub("^// *","",info) tmp <- gsub(" ","@",format(paste("\"",exnames,"\"",":",sep=""),width=M+4)) info <- paste(tmp,info,sep="") info <- strwrap(info, width = 60, exdent = M+4) info <- gsub("@"," ",info) if(all){ lapply(exnames,runExample, thisR=thisR,clean=clean,exfolder=exfolder, dontrun=dontrun,subarch=subarch,...) return(invisible(NULL)) } if(is.null(name)){ txt <- paste("Examples in " ,"\'",exfolder,"\':","\n\n",sep="") cat(txt) writeLines(info) return(invisible(exnames)) } if(clean){ cat("Cleanup:\n") file.remove(dynlib(name)) file.remove(paste0(name,".o")) } if(!file.exists(dynlib(name))){ cat("Building example",name,"\n") time <- system.time(compile(paste0(name,".cpp"),...)) cat("Build time",time["elapsed"],"seconds\n\n") if(dontrun)file.remove(paste0(name,".o")) } if(!dontrun){ cat("Running example",name,"\n") if(!thisR){ system(paste("R --vanilla < ",name,".R",sep="")) } else { source(paste(name,".R",sep=""),echo=TRUE) } } } TMB/R/utils.R0000644000176200001440000000160014634646733012375 0ustar liggesusers## Get *subset* of inverse of sparse matrix Q. ## Subset is guarantied to contain pattern of Q. ## Q: Sparse positive definite matrix. ## L: Cholesky factor of Q. ## diag: Return just diagonal of inverse ? solveSubset <- function(Q, L = Matrix::Cholesky(Q, super=TRUE, perm=TRUE), diag = FALSE) { stopifnot( is(L, "dCHMsuper") ) invQ <- .Call("tmb_invQ", L, PACKAGE = "TMB") iperm <- Matrix::invPerm(L@perm + 1L) if (diag) { invQ <- diag(invQ)[iperm] } else { invQ <- invQ[iperm, iperm, drop=FALSE] } invQ } ## Get information on ADFun object pointer info <- function(ADFun, DLL = getUserDLL()) { ptr <- ADFun$ptr DLL <- ADFun$DLL ok <- is(ptr, "externalptr") && !isNullPointer(ptr) if (!ok) stop("'ptr' is not a valid external pointer") .Call("InfoADFunObject", ptr, PACKAGE=DLL) } TMB/R/dotCall.R0000644000176200001440000000732014634646733012624 0ustar liggesusers## ----------------------------------------------------------------------------- ## Fixed R-API to .Call within MakeADFun ## ----------------------------------------------------------------------------- ## General notes: ## - Some TMB functionality (DATA_UPDATE) implicitly assumes that 'env' ## can be found as the enclosing environment (parent.env) of ## 'reportenv' (!). It follows that reportenv must always be passed ## by the caller. getParameterOrder <- function(data, parameters, reportenv, DLL) { control <- NULL .Call("getParameterOrder", data, parameters, reportenv, control, PACKAGE=DLL) } ## ----------------------------------------------------------------------------- ## Constructors: MakeDoubleFunObject <- function(data, parameters, reportenv, DLL) { control <- NULL ans <- .Call("MakeDoubleFunObject", data, parameters, reportenv, control, PACKAGE=DLL) ans <- registerFinalizer(ans, DLL) ans } MakeADFunObject <- function(data, parameters, reportenv, ADreport=FALSE, DLL) { control <- list( report = as.integer(ADreport) ) ans <- .Call("MakeADFunObject", data, parameters, reportenv, control, PACKAGE=DLL) ans <- registerFinalizer(ans, DLL) ans } MakeADGradObject <- function(data, parameters, reportenv, random=NULL, f=NULL, DLL) { control <- list( f=f ) if (!is.null(random)) control$random <- as.integer(random) ans <- .Call("MakeADGradObject", data, parameters, reportenv, control, PACKAGE=DLL) ans <- registerFinalizer(ans, DLL) ans } ## gf (optional) = already calculated gradient object. ## skip (optional) = index vector of parameters to skip. MakeADHessObject <- function(data, parameters, reportenv, gf=NULL, skip=integer(0), DLL) { control <- list(gf=gf, skip=as.integer(skip)) ans <- .Call("MakeADHessObject2", data, parameters, reportenv, control, PACKAGE=DLL) ans <- registerFinalizer(ans, DLL) ans } ## ----------------------------------------------------------------------------- ## Evaluators EvalDoubleFunObject <- function(Fun, theta, do_simulate=FALSE, get_reportdims=FALSE) { theta <- as.double(theta) control = list(do_simulate = as.integer(do_simulate), get_reportdims = as.integer(get_reportdims) ) .Call("EvalDoubleFunObject", Fun$ptr, theta, control, PACKAGE=Fun$DLL) } EvalADFunObject <- function(ADFun, theta, order=0, hessiancols=NULL, hessianrows=NULL, sparsitypattern=FALSE, rangecomponent=1, rangeweight=NULL, dumpstack=FALSE, doforward=TRUE, set_tail=FALSE, keepx=NULL, keepy=NULL, data_changed=FALSE) { if (!is.null(rangeweight)) rangeweight <- as.double(rangeweight) control <- list(order=as.integer(order), hessiancols=as.integer(hessiancols), hessianrows=as.integer(hessianrows), sparsitypattern=as.integer(sparsitypattern), rangecomponent=as.integer(rangecomponent), rangeweight=rangeweight, dumpstack=as.integer(dumpstack), doforward=as.integer(doforward), set_tail = as.integer(set_tail), keepx=as.integer(keepx), keepy=as.integer(keepy), data_changed = as.integer(data_changed) ) .Call("EvalADFunObject", ADFun$ptr, theta, control, PACKAGE=ADFun$DLL) } TMB/R/config.R0000644000176200001440000000271014634646733012505 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Get or set internal configuration variables of user's DLL. ##' ##' A model compiled with the \code{TMB} C++ library has several ##' configuration variables set by default. The variables can be read ##' and modified using this function. The meaning of the variables can ##' be found in the Doxygen documentation. ##' ##' @title Get or set internal configuration variables ##' @param ... Variables to set ##' @param DLL Name of user's DLL. Auto-detected if missing. ##' @return List with current configuration ##' @examples ##' \dontrun{ ##' ## Load library ##' dyn.load(dynlib("mymodel")) ##' ## Read the current settings ##' config(DLL="mymodel") ##' ## Reduce memory peak of a parallel model by creating tapes in serial ##' config(tape.parallel=0, DLL="mymodel") ##' obj <- MakeADFun(..., DLL="mymodel") ##' } config <- function(...,DLL=getUserDLL()){ new <- list(...) ## Get e <- new.env() .Call("TMBconfig",e,as.integer(1),PACKAGE=DLL) conf <- eapply(e,as.integer) ## Set conf[names(new)] <- new conf <- lapply(conf,as.integer) e <- local(environment(),conf) .Call("TMBconfig",e,as.integer(2),PACKAGE=DLL) ## Get e <- new.env() .Call("TMBconfig",e,as.integer(1),PACKAGE=DLL) conf <- eapply(e,as.integer) ## Backwards compatibility (don't break if members unavailable) if (is.null(conf$autopar)) conf$autopar <- FALSE if (is.null(conf$nthreads)) conf$nthreads <- 1 conf } TMB/R/abi_table.R0000644000176200001440000000026114634646741013140 0ustar liggesusers## Generated by abi_table.sh abi.break <- numeric_version( c("1.0", "1.7.10", "1.7.17", "1.8.0", "1.9.7") ) abi <- function() abi.break[sum(abi.break <= packageVersion("TMB"))] TMB/R/validation.R0000644000176200001440000010602214634646733013373 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Calculate one-step-ahead (OSA) residuals for a latent variable ##' model. (\emph{Beta version; may change without notice}) ##' ##' Given a TMB latent variable model this function calculates OSA ##' standardized residuals that can be used for goodness-of-fit ##' assessment. The approach is based on a factorization of the joint ##' distribution of the \emph{observations} \eqn{X_1,...,X_n} into ##' successive conditional distributions. ##' Denote by ##' \deqn{F_n(x_n) = P(X_n \leq x_n | X_1 = x_1,...,X_{n-1}=x_{n-1} )} ##' the one-step-ahead CDF, and by ##' \deqn{p_n(x_n) = P(X_n = x_n | X_1 = x_1,...,X_{n-1}=x_{n-1} )} ##' the corresponding point probabilities (zero for continuous distributions). ##' In case of continuous observations the sequence ##' \deqn{\Phi^{-1}(F_1(X_1))\:,...,\:\Phi^{-1}(F_n(X_n))} ##' will be iid standard normal. These are referred to as the OSA residuals. ##' In case of discrete observations draw (unit) uniform variables ##' \eqn{U_1,...,U_n} and construct the randomized OSA residuals ##' \deqn{\Phi^{-1}(F_1(X_1)-U_1 p_1(X_1))\:,...,\:\Phi^{-1}(F_n(X_n)-U_n p_n(X_n))} ##' These are also iid standard normal. ##' ##' @section Choosing the method: ##' The user must specify the method used to calculate the residuals - see detailed list of method descriptions below. ##' We note that all the methods are based on approximations. While the default 'oneStepGaussianoffMode' often represents a good compromise between accuracy and speed, it cannot be assumed to work well for all model classes. ##' As a rule of thumb, if in doubt whether a method is accurate enough, you should always compare with the 'oneStepGeneric' which is considered the most accurate of the available methods. ##' \describe{ ##' \item{method="fullGaussian"}{ ##' This method assumes that the joint distribution of data \emph{and} ##' random effects is Gaussian (or well approximated by a ##' Gaussian). It does not require any changes to the user ##' template. However, if used in conjunction with \code{subset} ##' and/or \code{conditional} a \code{data.term.indicator} is required ##' - see the next method. ##' } ##' \item{method="oneStepGeneric"}{ ##' This method calculates the one-step conditional probability ##' density as a ratio of Laplace approximations. The approximation is ##' integrated (and re-normalized for improved accuracy) using 1D ##' numerical quadrature to obtain the one-step CDF evaluated at each ##' data point. The method works in the continuous case as well as the ##' discrete case (\code{discrete=TRUE}). ##' ##' It requires a specification of a \code{data.term.indicator} ##' explained in the following. Suppose the template for the ##' observations given the random effects (\eqn{u}) looks like ##' \preformatted{ ##' DATA_VECTOR(x); ##' ... ##' nll -= dnorm(x(i), u(i), sd(i), true); ##' ... ##' } ##' ##' Then this template can be augmented with a ##' \code{data.term.indicator = "keep"} by changing the template to ##' \preformatted{ ##' DATA_VECTOR(x); ##' DATA_VECTOR_INDICATOR(keep, x); ##' ... ##' nll -= keep(i) * dnorm(x(i), u(i), sd(i), true); ##' ... ##' } ##' ##' The new data vector (\code{keep}) need not be passed from \R. It ##' automatically becomes a copy of \code{x} filled with ones. ##' ##' Some extra parameters are essential for the method. ##' Pay special attention to the integration domain which must be set either via \code{range} (continuous case) or \code{discreteSupport} (discrete case). Both of these can be set simultanously to specify a mixed continuous/discrete distribution. For example, a non-negative distribution with a point mass at zero (e.g. the Tweedie distribution) should have \code{range=c(0,Inf)} and \code{discreteSupport=0}. ##' Several parameters control accuracy and appropriate settings are case specific. By default, a spline is fitted to the one-step density before integration (\code{splineApprox=TRUE}) to reduce the number of density evaluations. However, this setting may have negative impact on accuracy. The spline approximation can then either be disabled or improved by noting that \code{...} arguments are passed to \link{tmbprofile}: Pass e.g. \code{ystep=20, ytol=0.1}. ##' Finally, it may be useful to look at the one step predictive distributions on either log scale (\code{trace=2}) or natural scale (\code{trace=3}) to determine which alternative methods might be appropriate. ##' } ##' \item{method="oneStepGaussian"}{ ##' This is a special case of the generic method where the one step ##' conditional distribution is approximated by a Gaussian (and can ##' therefore be handled more efficiently). ##' } ##' \item{method="oneStepGaussianOffMode"}{ ##' This is an approximation of the "oneStepGaussian" method that ##' avoids locating the mode of the one-step conditional density. ##' } ##' \item{method="cdf"}{ ##' The generic method can be slow due to the many function ##' evaluations used during the 1D integration (or summation in the ##' discrete case). The present method can speed up this process but ##' requires more changes to the user template. The above template ##' must be expanded with information about how to calculate the ##' negative log of the lower and upper CDF: ##' \preformatted{ ##' DATA_VECTOR(x); ##' DATA_VECTOR_INDICATOR(keep, x); ##' ... ##' nll -= keep(i) * dnorm(x(i), u(i), sd(i), true); ##' nll -= keep.cdf_lower(i) * log( pnorm(x(i), u(i), sd(i)) ); ##' nll -= keep.cdf_upper(i) * log( 1.0 - pnorm(x(i), u(i), sd(i)) ); ##' ... ##' } ##' ##' The specialized members \code{keep.cdf_lower} and ##' \code{keep.cdf_upper} automatically become copies of \code{x} ##' filled with zeros. ##' } ##' } ##' ##' @title Calculate one-step-ahead (OSA) residuals for a latent variable model. ##' @param obj Output from \code{MakeADFun}. ##' @param observation.name Character naming the observation in the template. ##' @param data.term.indicator Character naming an indicator data variable in the template (not required by all methods - see details). ##' @param method Method to calculate OSA (see details). ##' @param subset Index vector of observations that will be added one by one during OSA. By default \code{1:length(observations)} (with \code{conditional} subtracted). ##' @param conditional Index vector of observations that are fixed during OSA. By default the empty set. ##' @param discrete Logical; Are observations discrete? (assumed FALSE by default). ##' @param discreteSupport Possible outcomes of discrete part of the distribution (\code{method="oneStepGeneric"} and \code{method="cdf"} only). ##' @param range Possible range of continuous part of the distribution (\code{method="oneStepGeneric"} only). ##' @param seed Randomization seed (discrete case only). If \code{NULL} the RNG seed is untouched by this routine (recommended for simulation studies). ##' @param parallel Run in parallel using the \code{parallel} package? ##' @param trace Logical; Trace progress? More options available for \code{method="oneStepGeneric"} - see details. ##' @param reverse Do calculations in opposite order to improve stability? (currently enabled by default for \code{oneStepGaussianOffMode} method only) ##' @param splineApprox Represent one-step conditional distribution by a spline to reduce number of density evaluations? (\code{method="oneStepGeneric"} only). ##' @param ... Control parameters for OSA method ##' @return \code{data.frame} with OSA \emph{standardized} residuals ##' in column \code{residual}. In addition, depending on the method, the output ##' includes selected characteristics of the predictive distribution (current row) given past observations (past rows), notably the \emph{conditional} ##' \describe{ ##' \item{mean}{Expectation of the current observation} ##' \item{sd}{Standard deviation of the current observation} ##' \item{Fx}{CDF at current observation} ##' \item{px}{Density at current observation} ##' \item{nll}{Negative log density at current observation} ##' \item{nlcdf.lower}{Negative log of the lower CDF at current observation} ##' \item{nlcdf.upper}{Negative log of the upper CDF at current observation} ##' } ##' \emph{given past observations}. ##' If column \code{randomize} is present, it indicates that randomization has been applied for the row. ##' @examples ##' ######################## Gaussian case ##' runExample("simple") ##' osa.simple <- oneStepPredict(obj, observation.name = "x", method="fullGaussian") ##' qqnorm(osa.simple$residual); abline(0,1) ##' ##' \dontrun{ ##' ######################## Poisson case (First 100 observations) ##' runExample("ar1xar1") ##' osa.ar1xar1 <- oneStepPredict(obj, "N", "keep", method="cdf", discrete=TRUE, subset=1:100) ##' qqnorm(osa.ar1xar1$residual); abline(0,1) ##' } oneStepPredict <- function(obj, ## Names of data objects (not all are optional) observation.name = NULL, data.term.indicator = NULL, method=c( "oneStepGaussianOffMode", "fullGaussian", "oneStepGeneric", "oneStepGaussian", "cdf"), subset = NULL, conditional = NULL, discrete = NULL, discreteSupport = NULL, range = c(-Inf, Inf), seed = 123, parallel = FALSE, trace = TRUE, reverse = (method == "oneStepGaussianOffMode"), splineApprox = TRUE, ... ){ if (missing(observation.name)) stop("'observation.name' must define a data component") if (!(observation.name %in% names(obj$env$data))) stop("'observation.name' must be in data component") method <- match.arg(method) if (is.null(data.term.indicator)){ if(method != "fullGaussian"){ stop(paste0("method='",method,"' requires a 'data.term.indicator'")) } } ## if (!missing(discreteSupport) && !missing(range)) ## stop("Cannot specify both 'discreteSupport' and 'range'") obs <- as.vector(obj$env$data[[observation.name]]) ## Argument 'discrete' if(is.null(discrete)){ ndup <- sum(duplicated(setdiff(obs, discreteSupport))) if(ndup > 0){ warning("Observations do not look continuous. Number of duplicates = ", ndup) stop("Please specify 'discrete=TRUE' or 'discrete=FALSE'.") } discrete <- FALSE ## Default } stopifnot(is.logical(discrete)) ## Handle partially discrete distributions randomize <- NULL partialDiscrete <- !discrete && !missing(discreteSupport) if (partialDiscrete) { if (! (method %in% c("cdf", "oneStepGeneric")) ) stop("Mixed discrete/continuous distributions are currently for 'cdf' and 'oneStepGeneric' methods only") if (missing(range) && method == "oneStepGeneric") stop("Mixed discrete/continuous distributions must specify 'range' of continuous part") randomize <- obs %in% discreteSupport } ## Using wrong method for discrete data ? if (discrete){ if (! (method %in% c("oneStepGeneric", "cdf")) ){ stop(paste0("method='",method,"' is not for discrete observations.")) } } ## Default subset/permutation: if(is.null(subset)){ subset <- 1:length(obs) subset <- setdiff(subset, conditional) } ## Check if(!is.null(conditional)){ if(length(intersect(subset, conditional)) > 0){ stop("'subset' and 'conditional' have non-empty intersection") } } unconditional <- setdiff(1:length(obs), union(subset, conditional)) ## Args to construct copy of 'obj' args <- as.list(obj$env)[intersect(names(formals(MakeADFun)), ls(obj$env))] ## Use the best encountered parameter for new object if(length(obj$env$random)) args$parameters <- obj$env$parList(par = obj$env$last.par.best) else args$parameters <- obj$env$parList(obj$env$last.par.best) ## Fix all non-random components of parameter list names.random <- unique(names(obj$env$par[obj$env$random])) names.all <- names(args$parameters) fix <- setdiff(names.all, names.random) map <- lapply(args$parameters[fix], function(x)factor(x*NA)) ran.in.map <- names.random[names.random %in% names(args$map)] if(length(ran.in.map)) map <- c(map, args$map[ran.in.map]) # don't overwrite random effects mapping args$map <- map ## Overwrite map ## Find randomeffects character args$random <- names.random args$regexp <- FALSE ## Do we need to change, or take derivatives wrt., observations? ## (search the code to see if a method uses "observation(k,y)" or ## just "observation(k)"). obs.are.variables <- (method != "cdf") ## Move data$name to parameter$name if necessary if (obs.are.variables) { args$parameters[observation.name] <- args$data[observation.name] args$data[observation.name] <- NULL } ## Make data.term.indicator in parameter list if(!is.null(data.term.indicator)){ one <- rep(1, length(obs)) zero <- rep(0, length(obs)) if(method=="cdf"){ args$parameters[[data.term.indicator]] <- cbind(one, zero, zero) } else { args$parameters[[data.term.indicator]] <- cbind(one) } ## Set attribute to tell the order of observations ord <- rep(NA, length(obs)) ord[conditional] <- 0 ## First (out of bounds) ord[subset] <- seq_along(subset) ord[unconditional] <- length(obs) + 1 ## Never (out of bounds) if (any(is.na(ord))) { stop("Failed to determine the order of obervations") } attr(args$parameters[[data.term.indicator]], "ord") <- as.double(ord - 1) } ## Pretend these are *not observed*: if(length(unconditional)>0){ if(is.null(data.term.indicator)) stop("Failed to disable some data terms (because 'data.term.indicator' missing)") args$parameters[[data.term.indicator]][unconditional, 1] <- 0 } ## Pretend these are *observed*: if(length(conditional)>0){ if(is.null(data.term.indicator)) stop("Failed to enable some data terms (because 'data.term.indicator' missing)") args$parameters[[data.term.indicator]][conditional, 1] <- 1 } ## Make map for observations and indicator variables: makeFac <- function(x){ fac <- as.matrix(x) fac[] <- 1:length(x) fac[conditional, ] <- NA fac[unconditional, ] <- NA fac[subset, ] <- 1:(length(subset)*ncol(fac)) ## Permutation factor(fac) } map <- list() if (obs.are.variables) map[[observation.name]] <- makeFac(obs) if(!is.null(data.term.indicator)){ map[[data.term.indicator]] <- makeFac(args$parameters[[data.term.indicator]]) } args$map <- c(args$map, map) ## New object be silent args$silent <- TRUE ## 'fullGaussian' does *not* use any of the following objects if (method != "fullGaussian") { ## Create new object newobj <- do.call("MakeADFun", args) ## Helper function to loop through observations: nm <- names(newobj$par) obs.pointer <- which(nm == observation.name) if(method=="cdf"){ tmp <- matrix(which(nm == data.term.indicator), ncol=3) data.term.pointer <- tmp[,1] lower.cdf.pointer <- tmp[,2] upper.cdf.pointer <- tmp[,3] } else { data.term.pointer <- which(nm == data.term.indicator) lower.cdf.pointer <- NULL upper.cdf.pointer <- NULL } observation <- local({ obs.local <- newobj$par i <- 1:length(subset) function(k, y=NULL, lower.cdf=FALSE, upper.cdf=FALSE){ ## Disable all observations later than k: obs.local[data.term.pointer[k must set nthreads=1 nthreads.restore <- TMB::openmp() on.exit( TMB::openmp( nthreads.restore ), add=TRUE) TMB::openmp(1) requireNamespace("parallel") # was library(parallel) lapply <- parallel::mclapply } ## Trace one-step functions tracefun <- function(k)if(trace)print(k) ## Apply a one-step method and generate common output assuming ## the method generates at least: ## * nll ## * nlcdf.lower ## * nlcdf.upper applyMethod <- function(oneStepMethod){ ord <- seq_along(subset) if (reverse) ord <- rev(ord) pred <- do.call("rbind", lapply(ord, oneStepMethod)) pred <- as.data.frame(pred)[ord, ] pred$Fx <- 1 / ( 1 + exp(pred$nlcdf.lower - pred$nlcdf.upper) ) pred$px <- 1 / ( exp(-pred$nlcdf.lower + pred$nll) + exp(-pred$nlcdf.upper + pred$nll) ) if(discrete || partialDiscrete){ if(!is.null(seed)){ ## Restore RNG on exit: Random.seed <- .GlobalEnv$.Random.seed on.exit(.GlobalEnv$.Random.seed <- Random.seed) set.seed(seed) } U <- runif(nrow(pred)) if (partialDiscrete) { pred$randomize <- randomize[subset] U <- U * pred$randomize } } else { U <- 0 } pred$residual <- qnorm(pred$Fx - U * pred$px) pred } ## ######################### CASE: oneStepGaussian if(method == "oneStepGaussian"){ p <- newobj$par newobj$fn(p) ## Test eval oneStepGaussian <- function(k){ tracefun(k) index <- subset[k] f <- function(y){ newobj$fn(observation(k, y)) } g <- function(y){ newobj$gr(observation(k, y))[obs.pointer[k]] } opt <- nlminb(obs[index], f, g) H <- optimHess(opt$par, f, g) c(observation=obs[index], mean=opt$par, sd=sqrt(1/H)) } ord <- seq_along(subset) if (reverse) ord <- rev(ord) pred <- do.call("rbind", lapply(ord, oneStepGaussian)) pred <- as.data.frame(pred)[ord, ] pred$residual <- (pred$observation-pred$mean)/pred$sd } ## ######################### CASE: oneStepGaussianOffMode if(method == "oneStepGaussianOffMode"){ p <- newobj$par newobj$fn(p) ## Test eval newobj$env$random.start <- expression({last.par[random]}) oneStepGaussian <- function(k){ tracefun(k) index <- subset[k] f <- function(y){ newobj$fn(observation(k, y)) } g <- function(y){ newobj$gr(observation(k, y))[obs.pointer[k]] } c(observation=obs[index], nll = f(obs[index]), grad = g(obs[index])) } ord <- seq_along(subset) if (reverse) ord <- rev(ord) pred <- do.call("rbind", lapply(ord, oneStepGaussian)) pred <- as.data.frame(pred)[ord, ] ################### Convert value and gradient to residual ## Need Lambert W function: x = W(x) * exp( W(x) ) , x > 0 ## Vectorized in x and tested on extreme cases W(.Machine$double.xmin) ## and W(.Machine$double.xmax). W <- function(x){ ## Newton: f(y) = y * exp(y) - x ## f'(y) = y * exp(y) + exp(y) rel.tol <- sqrt(.Machine$double.eps) logx <- log(x) fdivg <- function(y)(y - exp(logx - y)) / (1 + y) y <- pmax(logx, 0) while( any( abs( logx - log(y) - y) > rel.tol, na.rm=TRUE) ) { y <- y - fdivg(y) } y } getResid <- function(value, grad){ Rabs <- sqrt( W( exp( 2*(value - log(sqrt(2*pi)) + log(abs(grad))) ) ) ) R <- sign(grad) * Rabs R } nll0 <- newobj$fn(observation(0)) R <- getResid( diff( c(nll0, pred$nll) ), pred$grad ) M <- pred$observation - ifelse(pred$grad != 0, R * (R / pred$grad), 0) pred$mean <- M pred$residual <- R } ## ######################### CASE: oneStepGeneric OSG_continuous <- missing(discreteSupport) || partialDiscrete if((method == "oneStepGeneric") && OSG_continuous){ p <- newobj$par newobj$fn(p) ## Test eval newobj$env$value.best <- -Inf ## <-- Never overwrite last.par.best nan2zero <- function(x)if(!is.finite(x)) 0 else x ## Set default configuration for this method (modify with '...'): formals(tmbprofile)$ytol <- 10 ## Tail tolerance (increase => more tail) formals(tmbprofile)$ystep <- .5 ## Grid spacing (decrease => more accuracy) ## Handle discrete case if(discrete){ formals(tmbprofile)$h <- 1 integrate <- function(f, lower, upper, ...){ grid <- ceiling(lower):floor(upper) list( value = sum( f(grid) ) ) } } oneStepGeneric <- function(k){ tracefun(k) ans <- try({ index <- subset[k] f <- function(y){ newobj$fn(observation(k, y)) } nll <- f(obs[index]) ## Marginal negative log-likelihood newobj$env$last.par.best <- newobj$env$last.par ## <-- used by tmbprofile if (splineApprox) { slice <- tmbprofile(newobj, k, slice=TRUE, parm.range = range,...) spline <- splinefun(slice[[1]], slice[[2]], ties=mean) spline.range <- range(slice[[1]]) if (partialDiscrete) { ## Remove density evaluations of discrete part slice[[1]][slice[[1]] %in% discreteSupport] <- NA } } else { spline <- Vectorize(f) spline.range <- range slice <- NULL } if(trace >= 2){ plotfun <- function(slice, spline){ plot.range <- spline.range if (!is.finite(plot.range[1])) plot.range[1] <- min(obs) if (!is.finite(plot.range[2])) plot.range[2] <- max(obs) if (!is.null(slice)) plot(slice, type="p", level=NULL) plot(spline, plot.range[1], plot.range[2], add=!is.null(slice)) abline(v=obs[index], lty="dashed") } if(trace >= 3){ if (!is.null(slice)) slice$value <- exp( -(slice$value - nll) ) plotfun(slice, function(x)exp(-(spline(x) - nll))) } else plotfun(slice, spline) } F1 <- integrate(function(x)exp(-(spline(x) - nll)), spline.range[1], obs[index])$value F2 <- integrate(function(x)exp(-(spline(x) - nll)), obs[index] + discrete, spline.range[2])$value mean <- integrate(function(x)exp(-(spline(x) - nll)) * x, spline.range[1], spline.range[2])$value / (F1 + F2) ## Correction mean, F1 and F2 if (partialDiscrete) { ## Evaluate discrete part f.discrete <- sapply(discreteSupport, f) Pdis <- exp(-f.discrete + nll) ## mean correction mean <- mean * (F1 + F2) + sum(Pdis * discreteSupport) mean <- mean / (F1 + F2 + sum(Pdis)) ## F1 and F2 correction left <- discreteSupport <= obs[index] F1 <- F1 + sum(Pdis[left]) F2 <- F2 + sum(Pdis[!left]) } ## Was: ## F1 <- integrate(Vectorize( function(x)nan2zero( exp(-(f(x) - nll)) ) ), -Inf, obs[index])$value ## F2 <- integrate(Vectorize( function(x)nan2zero( exp(-(f(x) - nll)) ) ), obs[index], Inf)$value nlcdf.lower = nll - log(F1) nlcdf.upper = nll - log(F2) c(nll=nll, nlcdf.lower=nlcdf.lower, nlcdf.upper=nlcdf.upper, mean=mean) }) if(is(ans, "try-error")) ans <- NaN ans } pred <- applyMethod(oneStepGeneric) } ## ######################### CASE: oneStepDiscrete if((method == "oneStepGeneric") && !OSG_continuous){ p <- newobj$par newobj$fn(p) ## Test eval obs <- as.integer(round(obs)) if(is.null(discreteSupport)){ warning("Setting 'discreteSupport' to ",min(obs),":",max(obs)) discreteSupport <- min(obs):max(obs) } oneStepDiscrete <- function(k){ tracefun(k) ans <- try({ index <- subset[k] f <- function(y){ newobj$fn(observation(k, y)) } nll <- f(obs[index]) ## Marginal negative log-likelihood F <- Vectorize(function(x)exp(-(f(x) - nll))) (discreteSupport) F1 <- sum( F[discreteSupport <= obs[index]] ) F2 <- sum( F[discreteSupport > obs[index]] ) nlcdf.lower = nll - log(F1) nlcdf.upper = nll - log(F2) c(nll=nll, nlcdf.lower=nlcdf.lower, nlcdf.upper=nlcdf.upper) }) if(is(ans, "try-error")) ans <- NaN ans } pred <- applyMethod(oneStepDiscrete) } ## ######################### CASE: fullGaussian if(method == "fullGaussian"){ ## Same object with y random: args2 <- args args2$random <- c(args2$random, observation.name) ## Change map: Fix everything except observations fix <- data.term.indicator args2$map[fix] <- lapply(args2$map[fix], function(x)factor(NA*unclass(x))) newobj2 <- do.call("MakeADFun", args2) newobj2$fn() ## Test-eval to find mode mode <- newobj2$env$last.par GMRFmarginal <- function (Q, i, ...) { ind <- 1:nrow(Q) i1 <- (ind)[i] i0 <- setdiff(ind, i1) if (length(i0) == 0) return(Q) Q0 <- as(Q[i0, i0, drop = FALSE], "symmetricMatrix") L0 <- Cholesky(Q0, ...) ans <- Q[i1, i1, drop = FALSE] - Q[i1, i0, drop = FALSE] %*% solve(Q0, Q[i0, i1, drop = FALSE]) ans } h <- newobj2$env$spHess(mode, random=TRUE) i <- which(names(newobj2$env$par[newobj2$env$random]) == observation.name) Sigma <- solve( as.matrix( GMRFmarginal(h, i) ) ) res <- obs[subset] - mode[i] L <- t(chol(Sigma)) pred <- data.frame(residual = as.vector(solve(L, res))) } ## ######################### CASE: cdf if(method == "cdf"){ p <- newobj$par newobj$fn(p) ## Test eval newobj$env$random.start <- expression(last.par[random]) cdf <- function(k){ tracefun(k) nll <- newobj$fn(observation(k)) lp <- newobj$env$last.par nlcdf.lower <- newobj$fn(observation(k, lower.cdf = TRUE)) newobj$env$last.par <- lp ## restore nlcdf.upper <- newobj$fn(observation(k, upper.cdf = TRUE)) newobj$env$last.par <- lp ## restore c(nll=nll, nlcdf.lower=nlcdf.lower, nlcdf.upper=nlcdf.upper) } pred <- applyMethod(cdf) } pred } ## Goodness of fit residuals based on an approximate posterior ## sample. (\emph{Beta version; may change without notice}) ## ## Denote by \eqn{(u, x)} the pair of the true un-observed random effect ## and the data. Let a model specification be given in terms of the ## estimated parameter vector \eqn{\theta} and let \eqn{u^*} be a ## sample from the conditional distribution of \eqn{u} given ## \eqn{x}. If the model specification is correct, it follows that the ## distribution of the pair \eqn{(u^*, x)} is the same as the distribution ## of \eqn{(u, x)}. Goodness-of-fit can thus be assessed by proceeding as ## if the random effect vector were observed, i.e check that \eqn{u^*} ## is consistent with prior model of the random effect and that \eqn{x} ## given \eqn{u^*} agrees with the observation model. ## ## This function can carry out the above procedure for many TMB models ## under the assumption that the true posterior is well approximated by a ## Gaussian distribution. ## ## First a draw from the Gaussian posterior distribution \eqn{u^*} is ## obtained based on the mode and Hessian of the random effects given the ## data. ## This sample uses sparsity of the Hessian and will thus work for large systems. ## ## An automatic standardization of the sample can be carried out \emph{if ## the observation model is Gaussian} (\code{fullGaussian=TRUE}). In this ## case the prior model is obtained by disabling the data term and ## calculating mode and Hessian. A \code{data.term.indicator} must be ## given in order for this to work. Standardization is performed using ## the sparse Cholesky of the prior precision. ## By default, this step does not use a fill reduction permutation \code{perm=FALSE}. ## This is often superior wrt. to interpretation of the. ## the natural order of the parameter vector is used \code{perm=FALSE} ## which may be superior wrt. to interpretation. Otherwise ## \code{perm=TRUE} a fill-reducing permutation is used while ## standardizing. ## @references Waagepetersen, R. (2006). A Simulation-based Goodness-of-fit Test for Random Effects in Generalized Linear Mixed Models. Scandinavian journal of statistics, 33(4), 721-731. ## @param obj TMB model object from \code{MakeADFun}. ## @param observation.name Character naming the observation in the template. ## @param data.term.indicator Character naming an indicator data variable in the template. Only used if \code{standardize=TRUE}. ## @param standardize Logical; Standardize sample with the prior covariance ? Assumes all latent variables are Gaussian. ## @param as.list Output posterior sample, and the corresponding standardized residual, as a parameter list ? ## @param perm Logical; Use a fill-reducing ordering when standardizing ? ## @param fullGaussian Logical; Flag to signify that the joint distribution of random effects and data is Gaussian. ## @return List with components \code{sample} and \code{residual}. oneSamplePosterior <- function(obj, observation.name = NULL, data.term.indicator = NULL, standardize = TRUE, as.list = TRUE, perm = FALSE, fullGaussian = FALSE){ ## Draw Gaussian posterior sample tmp <- obj$env$MC(n=1, keep=TRUE, antithetic=FALSE) samp <- as.vector( attr(tmp, "samples") ) ## If standardize resid <- NULL if (standardize) { ## Args to construct copy of 'obj' args <- as.list(obj$env)[intersect(names(formals(MakeADFun)), ls(obj$env))] ## Use the best encountered parameter for new object args$parameters <- obj$env$parList(par = obj$env$last.par.best) ## Make data.term.indicator in parameter list obs <- obj$env$data[[observation.name]] nobs <- length(obs) zero <- rep(0, nobs) if ( ! fullGaussian ) args$parameters[[data.term.indicator]] <- zero ## Fix all non-random components of parameter list names.random <- unique(names(obj$env$par[obj$env$random])) names.all <- names(args$parameters) fix <- setdiff(names.all, names.random) map <- lapply(args$parameters[fix], function(x)factor(x*NA)) args$map <- map ## Overwrite map ## If 'fullGaussian == TRUE' turn 'obs' into a random effect if (fullGaussian) { names.random <- c(names.random, observation.name) args$parameters[[observation.name]] <- obs } ## Find randomeffects character args$random <- names.random args$regexp <- FALSE ## New object be silent args$silent <- TRUE ## Create new object newobj <- do.call("MakeADFun", args) ## Construct Hessian and Cholesky newobj$fn() ## Get Cholesky and prior mean ## FIXME: We are using the mode as mean. Consider skewness ## correction similar to 'bias.correct' in 'sdreport'. L <- newobj$env$L.created.by.newton mu <- newobj$env$last.par ## If perm == FALSE redo Cholesky with natural ordering if ( ! perm ) { Q <- newobj$env$spHess(mu, random=TRUE) L <- Matrix::Cholesky(Q, super=TRUE, perm=FALSE) } ## If 'fullGaussian == TRUE' add 'obs' to the sample if (fullGaussian) { tmp <- newobj$env$par * NA tmp[names(tmp) == observation.name] <- obs tmp[names(tmp) != observation.name] <- samp samp <- tmp } ## Standardize ( P * Q * P^T = L * L^T ) r <- samp - mu rp <- r[L@perm + 1] Lt <- Matrix::t( as(L, "sparseMatrix") ) resid <- as.vector( Lt %*% rp ) } if (as.list) { if (standardize) obj <- newobj par <- obj$env$last.par.best asList <- function(samp) { par[obj$env$random] <- samp samp <- obj$env$parList(par=par) nm <- unique(names(obj$env$par[obj$env$random])) samp[nm] } samp <- asList(samp) if (!is.null(resid)) resid <- asList(resid) } ans <- list() ans$sample <- samp ans$residual <- resid ans } if(FALSE) { library(TMB) runExample("MVRandomWalkValidation", exfolder="../../tmb_examples/validation") set.seed(1) system.time( qw <- TMB:::oneSamplePosterior(obj, "obs", "keep") ) qqnorm(as.vector(qw$residual$u)); abline(0,1) runExample("rickervalidation", exfolder="../../tmb_examples/validation") set.seed(1) system.time( qw <- TMB:::oneSamplePosterior(obj, "Y", "keep") ) qqnorm(as.vector(qw$residual$X)); abline(0,1) runExample("ar1xar1") set.seed(1) system.time( qw <- TMB:::oneSamplePosterior(obj, "N", "keep") ) qqnorm(as.vector(qw$residual$eta)); abline(0,1) } TMB/R/header_tool.R0000644000176200001440000001223614634646733013531 0ustar liggesusers## Header extraction (keep declaration - remove definition) declExtract <- function(x, level=2){ ## level of matching curly brackets to exclude x <- paste(x, "\n") x <- paste(x, collapse="") x <- strsplit(x,"")[[1]] y <- cumsum( (x=="{") - (x=="}") ) mark.left <- (y == level) & (x=="{") ## Begin { mark.right <- (y == level - 1) & (x=="}") ## End } x[y >= level] <- "" ## Discard x[mark.right] <- ";" x <- paste(x, collapse="") x <- strsplit(x, "\n")[[1]] ## Change e.g. "matrix(T1 x):Base(x);" to "matrix(T1 x);" x <- sub(")[ ]*:.*;",");",x) x } ## Template class extraction tclassExtract <- function(x){ from <- grep("^class|^struct", x) - 1 to <- grep("^};", x) from <- from[ findInterval(to, from) ] nm <- gsub("^[^ ]*[ ]*([^ ^{^:]*).*", "\\1", x[from+1]) ans <- Map("[", list(x), Map(":", from, to) ) names(ans) <- nm ans } ## Template class specialization tclassSpecialize <- function(y, type="double"){ if(length(type) > 1){ return( unlist(lapply(type, function(x)tclassSpecialize(y, x))) ) } typename <- sub("^template[ ]*.*","\\1",y[1]) y[1] <- "template <>" nm <- gsub("^[^ ]*[ ]*([^ ^{^:]*).*", "\\1", y[2]) y[2] <- sub(nm,paste0(nm,"<",type,">"),y[2]) y <- gsub(typename, type, y) y } ## template class precompilation (explicit instantiation) tclassInstantiate <- function(y, type = "double"){ nm <- gsub("(^[^ ]*[ ]*[^ ^{^:]*).*", "\\1", y[2]) paste0("template ", nm, "<", type,">;") } ## Macro extraction macroExtract <- function(x){ x <- paste(x, "\n") x <- paste(x, collapse="") x <- gsub("\\\\[ ]*\n","",x) x <- gsub("\t","",x) x <- strsplit(x, "\n")[[1]] x <- grep("^#define", x, value=TRUE) nm <- sub("#define ([^ ^(]*).*", "\\1", x) names(x) <- nm x } ## Cleanup source cleanup <- function(x){ x <- .removeComments(x) ## Remove comments x <- gsub("[ ]*$", "", x) ## Remove trailing whitespace x <- x[x != ""] ## Remove empty lines x } ######################################################## ## Example precompSource <- function( filename = "include/tmbutils/density.hpp", namespace = "density", classes = "MVNORM_t", types = c( "double ", ## "CppAD::AD ", ## "CppAD::AD > ", ## "CppAD::AD > > ", "TMBad::ad_aug"), macros = TRUE, append = FALSE ## Modify input file in place ) { ## density namespace x <- readLines( system.file(filename, package="TMB") ) tcl <- tclassExtract(x) dcl <- lapply(tcl, declExtract) spec <- lapply(dcl, tclassSpecialize, type = types) macro <- macroExtract(x) macros <- rep(macros, length.out = length(macro)) ans <- c( ## Begin namespace paste("namespace", namespace, "{")[!is.null(namespace)], ## macro-defines paste("#undef", names(macro))[macros], macro[macros], ## Explicit instantiation "#ifdef WITH_LIBTMB", unlist( spec[classes] ), "#endif", ## Precompiled version "#ifdef TMB_PRECOMPILE_ATOMICS", unlist( lapply(classes, function(name)tclassInstantiate(tcl[[name]], types) ) ), "#endif", ## Undefs paste("#undef", names(macro))[macros], ## End namespace "}"[!is.null(namespace)] ) names(ans) <- NULL ## Add header ans <- cleanup(ans) ans <- c("// Autogenerated - do not edit by hand", "//", "// -DWITH_LIBTMB : Extracts header declarations only.", "// -DTMB_PRECOMPILE_ATOMICS : Instantiations for precompilation.", ans) if (append) { writeLines(c(x, ans), system.file(filename, package="TMB") ) return(NULL) } ans } precompileSource <- function() { CppAD_types <- c( "double ", "CppAD::AD ", "CppAD::AD > ", "CppAD::AD > > ") TMBad_types <- c( "double ", "TMBad::ad_aug " ) ## TODO : ## ============================================== ## precompSource( ## filename = "include/tmbutils/vector.hpp", ## namespace = NULL, ## classes = c("vector", "matrix"), ## append = TRUE ) ## , ## precompSource( ## filename = "include/tmbutils/array.hpp", ## namespace = "tmbutils", ## classes = c("array"), ## macros = FALSE ) ## , ## ============================================== x <- c( ## Precompile using CppAD "#ifdef CPPAD_FRAMEWORK", precompSource( filename = "include/tmbutils/density.hpp", namespace = "density", classes = c("MVNORM_t", "GMRF_t"), types = CppAD_types ), "#endif", ## Precompile using TMBad "#ifdef TMBAD_FRAMEWORK", precompSource( filename = "include/tmbutils/density.hpp", namespace = "density", classes = c("MVNORM_t", "GMRF_t"), types = TMBad_types ), "#endif" ) x } TMB/R/tmbprofile.R0000644000176200001440000002151614634646733013410 0ustar liggesusers## Copyright (C) 2013-2015 Kasper Kristensen ## License: GPL-2 ##' Calculate 1D likelihood profiles wrt. single parameters or more ##' generally, wrt. arbitrary linear combinations of parameters ##' (e.g. contrasts). ##' ##' Given a linear combination \deqn{ t = \sum_{i=1}^n v_i \theta_i } of ##' the parameter vector \eqn{\theta}, this function calculates the ##' likelihood profile of \eqn{t}. By default \eqn{v} is a unit vector ##' determined from \code{name}. Alternatively the linear combination ##' may be given directly (\code{lincomb}). ##' ##' @title Adaptive likelihood profiling. ##' @param obj Object from \code{MakeADFun} that has been optimized. ##' @param name Name or index of a parameter to profile. ##' @param lincomb Optional linear combination of parameters to ##' profile. By default a unit vector corresponding to \code{name}. ##' @param h Initial adaptive stepsize on parameter axis. ##' @param ytol Adjusts the range of the likelihood values. ##' @param ystep Adjusts the resolution of the likelihood profile. ##' @param maxit Max number of iterations for adaptive algorithm. ##' @param slice Do slicing rather than profiling? ##' @param parm.range Valid parameter range. ##' @param adaptive Logical; Use adaptive step size? ##' @param trace Trace progress? (TRUE, or a numeric value of 1, ##' gives basic tracing: numeric values > 1 give more information) ##' @param ... Unused ##' @return data.frame with parameter and function values. ##' @seealso \code{\link{plot.tmbprofile}}, \code{\link{confint.tmbprofile}} ##' @examples ##' \dontrun{ ##' runExample("simple",thisR=TRUE) ##' ## Parameter names for this model: ##' ## beta beta logsdu logsd0 ##' ##' ## Profile wrt. sigma0: ##' prof <- tmbprofile(obj,"logsd0") ##' plot(prof) ##' confint(prof) ##' ##' ## Profile the difference between the beta parameters (name is optional): ##' prof2 <- tmbprofile(obj,name="beta1 - beta2",lincomb = c(1,-1,0,0)) ##' plot(prof2) ##' confint(prof2) ##' } tmbprofile <- function(obj, name, lincomb, h=1e-4, ytol=2, ystep=.1, maxit=ceiling(5*ytol/ystep), parm.range = c(-Inf, Inf), slice=FALSE, adaptive=TRUE, trace=TRUE,...){ ## Cleanup 'obj' when we exit from this function: restore.on.exit <- c("last.par.best", "random.start", "value.best", "last.par", "inner.control", "tracemgc") oldvars <- sapply(restore.on.exit, get, envir=obj$env, simplify=FALSE) restore.oldvars <- function(){ for(var in names(oldvars)) assign(var, oldvars[[var]], envir=obj$env) } on.exit(restore.oldvars()) ## Parameter estimate (thetahat) par <- obj$env$last.par.best if(!is.null(obj$env$random)) par <- par[-obj$env$random] ## Determine lincomb vector ('lincomb') if(missing(lincomb)){ if (missing(name)) stop("No 'name' or 'lincomb' specified") stopifnot(length(name) == 1) if(is.numeric(name)){ lincomb <- as.numeric(1:length(par)==name) name <- names(par)[name] } else if(is.character(name)){ if (sum(names(par)==name) != 1) stop("'name' is not unique") lincomb <- as.numeric(names(par)==name) } else stop("Invalid name argument") } else { if (missing(name)) name <- "parameter" } stopifnot(length(lincomb) == length(par)) ## Re-parameterize to direction plus (n-1)-dim-subspace ## theta = t*direction + C %*% s X <- Diagonal(length(lincomb)) i <- which(lincomb != 0)[1] X[i,] <- lincomb ## Linear indep. columns invX <- solve(X) direction <- invX[,i] C <- invX[,-i,drop=FALSE] ## Now t(lincomb) %*% C = 0 ! that <- sum( lincomb * par ) ## Start out with initial increment h and ytol. ## * Evaluate and store next function value x1=x0+h, y1=f(x1). ## * Repeat as long as abs(y1-y.init)0) { if (trace>1) cat("Profile displacement:",x*direction,"\n") cat("Profile value:",ans$objective,"\n") } ans$objective } } ## Robustify f against failure f.original <- f f <- function(x){ y <- try(f.original(x), silent=TRUE) if(is(y, "try-error")) y <- NA y } start <- NULL evalAlongLine <- function(h){ start <<- rep(0, length(par)-1) x <- 0; y <- f(x) if(slice)obj$env$random.start <- expression(last.par[random]) for(it in 1:maxit){ yinit <- y[1] xcurrent <- tail(x,1) ycurrent <- tail(y,1) xnext <- xcurrent+h if(xnext + that < parm.range[1]) { if (trace>1) cat("below minimum value: break\n") break } if( parm.range[2] < xnext + that) { if (trace>1) cat("above maximum value: break\n") break } ynext <- f(xnext) x <- c(x,xnext) y <- c(y,ynext) if( is.na(ynext) ) { if (trace>1) cat("y is NA: break\n") break } if( (ydiff <- abs(ynext-yinit)) > ytol ) { if (trace>1) cat(sprintf("delta y=%f > %f: break\n", ydiff,ytol)) break } if (adaptive) { speedMax <- ystep speedMin <- if(ynext >= yinit) ystep/4 ## 'tail-part' else ystep/8 ## 'center-part' => slow down if( abs(ynext-ycurrent) > speedMax ) { h <- h / 2 if (trace>1) cat(sprintf("halve step size (to %f)\n",h)) } if( abs(ynext-ycurrent) < speedMin ) { h <- h * 2 if (trace>1) cat(sprintf("double step size (to %f)\n",h)) } } } ans <- data.frame(x=x+that, y=y) names(ans) <- c(name,"value") ans } if (trace>1) cat("profile up\n") ans1 <- evalAlongLine(h) restore.oldvars() if (trace>1) cat("profile down\n") ans2 <- evalAlongLine(-h) ans <- rbind(ans1,ans2) ord <- order(ans[[1]]) ans <- ans[ord,] class(ans) <- c("tmbprofile", class(ans)) ans } ##' Plot (negative log) likelihood profile with confidence interval added. ##' ##' @title Plot likelihood profile. ##' @param x Output from \code{\link{tmbprofile}}. ##' @param type Plot type. ##' @param level Add horizontal and vertical lines depicting this confidence level (\code{NULL} disables the lines). ##' @param ... Additional plot arguments. ##' @return NULL ##' @method plot tmbprofile ##' @S3method plot tmbprofile plot.tmbprofile <- function(x,type="l",level=.95,...){ plot(as.data.frame(x), type=type, ...) if(!is.null(level)){ hline <- .5*qchisq(level,df=1) abline(h=hline+min(x$value), lty="dotted") abline(v=confint(x, level=level), lty="dotted") } } ##' Calculate confidence interval from a likelihood profile. ##' ##' @title Profile based confidence intervals. ##' @param object Output from \code{\link{tmbprofile}}. ##' @param parm Not used ##' @param level Confidence level. ##' @param ... Not used ##' @return Lower and upper limit as a matrix. ##' @method confint tmbprofile ##' @S3method confint tmbprofile confint.tmbprofile <- function (object, parm, level = 0.95, ...){ i <- which.min(object$value) left <- head(object, i) right <- tail(object, nrow(object)-i ) hline <- .5*qchisq(level,df=1) + object$value[i] lower <- approx(left[[2]], left[[1]], hline)$y upper <- approx(right[[2]], right[[1]], hline)$y ans <- t( c(lower=lower, upper=upper) ) rownames(ans) <- names(object)[1] ans } TMB/NEWS0000644000176200001440000005603014641202613011376 0ustar liggesusers------------------------------------------------------------------------ TMB 1.9.14 (2024-07-03) ------------------------------------------------------------------------ o Fix C++20 compiler warning ------------------------------------------------------------------------ TMB 1.9.13 (2024-06-28) ------------------------------------------------------------------------ o Fix isNullPointer #395 ------------------------------------------------------------------------ TMB 1.9.12 (2024-06-19) ------------------------------------------------------------------------ o Fix clang-19 build issue o use Matrix API for SEXP->(cholmod_factor *) coercion (#393) o Add TMB.Version() #387 o Fix protection bug in REPORT(SparseMatrix) ------------------------------------------------------------------------ TMB 1.9.11 (2024-04-02) ------------------------------------------------------------------------ o Fix conflict between TMB and Eigen parallelization (GH #390). o Matrix ABI check no longer writes external file during installation (GH #392). o Improved diagnostic messages when data of invalid storage mode is passed to TMB (GH #391). ------------------------------------------------------------------------ TMB 1.9.10 (2023-12-12) ------------------------------------------------------------------------ o precompile(): Translation unit changes that should not be visible to the user. ------------------------------------------------------------------------ TMB 1.9.9 (2023-11-27) ------------------------------------------------------------------------ o Fix 'Wformat' warnings ------------------------------------------------------------------------ TMB 1.9.7 (2023-11-22) ------------------------------------------------------------------------ o Fixed tweedie integer overflow for extreme parameters. o Added new argument 'adaptive' to 'tmbprofile'. o Added new atomic matrix functions 'sqrtm' and 'absm'. o Avoid unnecessary warnings on 'Matrix package version inconsistency'. ------------------------------------------------------------------------ TMB 1.9.6 (2023-08-11) ------------------------------------------------------------------------ o Fix UBSAN observed for nested AD contexts. ------------------------------------------------------------------------ TMB 1.9.5 (2023-07-18) ------------------------------------------------------------------------ o R: Prepare determinant() change from Matrix version 1.6-0. o R: oneStepPredict() now allows mixed discrete/continuous distributions. o C++: New atomic 'fft'. ------------------------------------------------------------------------ TMB 1.9.4 (2023-04-18) ------------------------------------------------------------------------ o Fix internal bug causing wrong 'sparse_matrix_exponential' derivatives. o Prepare for upcoming Matrix_1.5-5 (Fix misuse of Matrix C-API). ------------------------------------------------------------------------ TMB 1.9.3 (2023-03-28) ------------------------------------------------------------------------ o Prepare for Matrix 1.5-4 deprecations. o checkConsistency(): New argument 'estimate' enables full simulation study and another argument 'observation.name' can be used to not retape between simulation replicates. o oneStepPredict(): Eliminate nuisance warnings from generic method. o C++ side: Added preprocessor flag 'TMB_ABORT' to better control behaviour in 'TMB_SAFEBOUNDS' mode (e.g. signal normal R error rather than abort). In addition, 'TMB_CATCH' can now be configured to catch more than just std::bad_alloc. ------------------------------------------------------------------------ TMB 1.9.2 (2023-01-23) ------------------------------------------------------------------------ o Improved interoperability with Rcpp (conflicts resolved). o Fix some broken sparse matrix coercions that used to work. o New features on C++ side: - Atomic sparse matrix exponential. - Atomic vectorized operations mainly for internal use. - Faster matrix multiply for higher order AD (see example 'hmm_filter'). - Utility for parameter object concatenation. o Fix warnings from CRAN check page. ------------------------------------------------------------------------ TMB 1.9.1 (2022-08-16) ------------------------------------------------------------------------ o Prepare for Matrix 1.4-2 deprecations. o Fix html version of sdreport docs (thanks to Kurt Hornik). o Constant input test was missing for some special functions. o Added argument 'max.order' to compile(). o oneStepPredict() CDF method was stabilized. o 2D interpolation operator was added on C++ side. ------------------------------------------------------------------------ TMB 1.9.0 (2022-05-26) ------------------------------------------------------------------------ o Fix PROTECT bugs (thanks to Tomas Kalibera) o Behavior if TMB parallization has been changed: - Setting the number of threads of a TMB model using TMB::openmp() is now portable across platforms. - Number of TMB threads no longer interferes with number of threads used by other packages or BLAS/LAPACK. - By default, unless explicitly requested, models run with a single thread. o precompile() can now auto generate 'TMB.h' and 'TMB.cpp' that may be used by R packages to split in several compilation units. o MakeADFun() now checks that the DLL is loaded. ------------------------------------------------------------------------ TMB 1.8.1 (2022-03-23) ------------------------------------------------------------------------ o Fix some clang-14 compiler warnings. ------------------------------------------------------------------------ TMB 1.8.0 (2022-03-07) ------------------------------------------------------------------------ o Experimental support for new AD library 'TMBad' - see ?compile. o Option to discard nuisance warnings from Eigen - see ?compile. ------------------------------------------------------------------------ TMB 1.7.22 (2021-09-28) ------------------------------------------------------------------------ o Fortran character strings: use 'FCONE' macro o Add .onUnload() (GH #353) o Fix tmbutils::array ASAN issue triggered by at least clang-13 ------------------------------------------------------------------------ TMB 1.7.21 (2021-09-06) ------------------------------------------------------------------------ o Fix CRAN build issue triggered by clang 13.0.0 (omp.h) o Fix tweedie thread safety on Windows (https://github.com/glmmTMB/glmmTMB/issues/714) o Fix as.list(.) for class 'sdreport' (https://github.com/glmmTMB/glmmTMB/issues/692) ------------------------------------------------------------------------ TMB 1.7.20 (2021-04-08) ------------------------------------------------------------------------ o Make parList() work in more edge cases. o Make tiny_ad variables work (again) with Eigen types. o Avoid header issues in Matrix 1.3-0 due to SuiteSparse update (#340) ------------------------------------------------------------------------ TMB 1.7.19 (2021-02-05) ------------------------------------------------------------------------ o 'sdreport()' failed in a special case (GH #333) o Improved implementation of 'rtweedie()' o Fixed thread safety issues (GH #330) o Fixed 'tmbroot()' and 'tmbprofile()' for single-parameter models o Function 'compile()' now quotes more paths (GH #323) ------------------------------------------------------------------------ TMB 1.7.18 (2020-07-24) ------------------------------------------------------------------------ o Fix clang-ASAN issue caused by memory management changes ------------------------------------------------------------------------ TMB 1.7.17 (2020-07-23) ------------------------------------------------------------------------ o New function 'FreeADFun()' gives the user more control with memory deallocation. In addition memory management of external pointers have been improved - see '?FreeADFun'. o Documentation errors in '?sdreport' have been corrected. o The function 'oneStepPredict()' now calculates residuals in reverse order because it is more stable. Old behaviour can be obtained by passing 'reverse=FALSE'. ------------------------------------------------------------------------ TMB 1.7.16 (2020-01-15) ------------------------------------------------------------------------ o Fixed memory leaks in sparse hessian. o logspace_add: Added some edge case reductions. o 'oneStepPredict' no longer overwrites mapping of random effects. o Fix isfinite, isinf, isnan etc (github #297). o Add support for other object names to 'gdbsource'. o dbinom: Handle edge cases 0*log(0). o Add segment method to vector_indicator. o data_indicator: Add default CTOR. o splinefun: fix memleak + add vector evaluation. o Saved models from old TMB versions should now be usable in new TMB versions without giving the INTEGER(...) error. o checkConsistency bugfix: did not work for mapped random effects. ------------------------------------------------------------------------ TMB 1.7.15 (2018-11-08) ------------------------------------------------------------------------ o Allow report array of any type (similar to vector case) o Solve 'rck' issue from CRAN o Fix bug in separable simulate ------------------------------------------------------------------------ TMB 1.7.14 (2018-06-23) ------------------------------------------------------------------------ o Fix performance bug triggered by R-devel. o Fixed bug in sparsity detection algorithm. o Tweedie atomic function speedup. o Fix save/reload model object. Loaded object no longer overwrites fitted parameters when running e.g. obj$report() or obj$simulate(). o New function 'tmbroot' computes likelihood profile confidence intervals. ------------------------------------------------------------------------ TMB 1.7.13 (2018-03-22) ------------------------------------------------------------------------ o Redundant warnings from CHOLMOD have been silenced. o ADREPORT now tracks object dimensions. More infomation in '?TMB::as.list.sdreport'. o New feature to do process normalization from R. See '?TMB::normalize'. o New feature to update data from R without re-taping the computational graph. Enabled on C++ side using 'DATA_UPDATE'. o bugfix C++ namespace 'autodiff': Jacobian had wrong dimension. ------------------------------------------------------------------------ TMB 1.7.12 (2017-12-10) ------------------------------------------------------------------------ o Fix CRAN rchk tool warnings (although false alarm) o New macro TMB_OBJECTIVE_PTR for more flexible use of DATA and PARAMETER objects. o Fix 'oneStepPredict' when no random effects. o C callable routines 'tmb_forward' and 'tmb_reverse' added. o Fix bug in empty array assignment. o Fix slow 'oneStepPredict' caused by matrix inversion in 'tmbprofile'. o TMB:::setupRstudio() o compile: Add flag for tracing forward sweeps. ------------------------------------------------------------------------ TMB 1.7.11 (2017-08-09) ------------------------------------------------------------------------ o Allow logical data objects o Fix conversion R <-> C++ of long objects - Can now pass long vector (length >= 2^31) as DATA_VECTOR. - Can now REPORT a long vector. - Can now calculate long jacobian matrices (m*n can be greater than 2^31 where n is the number of parameters and m the number of ADREPORTed variables). o Fixed a possible segmentation fault in sparsity detection algorithm. o New flag to sdreport() reduces memory usage when bias correcting a large number of variables. o New experimental function checkConsistency() to check the Laplace approximation and correctness of the user template. ------------------------------------------------------------------------ TMB 1.7.10 (2017-05-03) ------------------------------------------------------------------------ o Fix broken Solaris build caused by Eigen 3.3.3 update ------------------------------------------------------------------------ TMB 1.7.9 (2017-04-12) ------------------------------------------------------------------------ o Fix PROTECT bugs reported by Tomas Kalibera. o Now possible to use 'autodiff::hessian' in conjunction with atomic functions. o Fix 'testthat:::expect_equal(obj, obj)' crash for non-random effect models. o Fix compatibility issues of 'tmbutils::vector' with eigen 3.3.2. ------------------------------------------------------------------------ TMB 1.7.8 (2017-02-08) ------------------------------------------------------------------------ o Internal bug fixes. - Fix bug in GMRF constructor from grid array. - Fix printing of Eigen/AD types (broken by version 1.7.7) ------------------------------------------------------------------------ TMB 1.7.7 (2017-01-31) ------------------------------------------------------------------------ o Internal modifications: - Fix Eigen 3.3.2 compatibility issues. - Disable dynamic symbols. ------------------------------------------------------------------------ TMB 1.7.6 (2017-01-15) ------------------------------------------------------------------------ o Add Conway-Maxwell-Poisson distribution (dcompois, rcompois). o Add lfactorial. o Move MCMC to separate package. o Add simulators for exponential, beta, F, logistic, Student T, and Weibull distributions. o Fix a bug in function 'split'. ------------------------------------------------------------------------ TMB 1.7.5 (2016-11-20) ------------------------------------------------------------------------ o Atomic logspace_add and logspace_sub. o Numerically robust binomial and negative binomial density functions. o New macros: - DATA_STRING to pass strings from R. - SIMULATE to mark blocks of simulation code. o Simulation methods implemented for density classes: - MVNORM, AR1, SCALE, VECSCALE, SEPARABLE, GMRF, ARk. o R-style simulators now available from the template: - rnorm, runif, rpois, rbinom, rgamma, rnbinom and rnbinom2. ------------------------------------------------------------------------ TMB 1.7.4 (2016-09-21) ------------------------------------------------------------------------ o Add onLoad check on 'Matrix' package version. o runExample: Fixed bug when running all examples from a specified example folder. o sdreport: Fixed bug in the case with exactly one random effect. o sdreport: Reduced size of output. ------------------------------------------------------------------------ TMB 1.7.3 (2016-09-05) ------------------------------------------------------------------------ o New atomic functions: - pbeta - qbeta - dtweedie - besselK (derivatives wrt both arguments) - besselI (derivatives wrt both arguments) - besselJ (derivatives wrt both arguments) - besselY (derivatives wrt both arguments) o Adaptive numerical integration: - gauss_kronrod::integrate (one dimension) - gauss_kronrod::mvIntegrate (multiple dimensions) o sdreport: Fix bug that caused bias.correction to break when parameter maps were in use. ------------------------------------------------------------------------ TMB 1.7.2 (2016-08-09) ------------------------------------------------------------------------ o sdreport: Allow bias correct in chunks to reduce memory. o Experimental TMB:::install.contrib to install user contributed cpp code from github. o Disable CHOLMOD warnings from inner problem when silent=TRUE. o sdreport: Keep running when solve(hessian) fails. o sdreport: Fix bug that caused summary(sdreport(.)) to fail when nothing was ADREPORTed. ------------------------------------------------------------------------ TMB 1.7.1 (2016-05-05) ------------------------------------------------------------------------ o Lots of minor performance optimizations. o sdreport: New argument getReportCovariance. - Reduces memory usage when many variables are ADREPORTed. o bugfix: numeric(0) in random list of MakeADFun caused wrong parameter list. o New atomic: convol2d ------------------------------------------------------------------------ TMB 1.7.0 (2016-03-22) ------------------------------------------------------------------------ o CITATION update with JSS publication. o New function 'as.list.sdreport' converts from sdreport format to original parameter list structure. o MCMC: - Fix small bug in transforming intial values in NUTS. - Rename mcmc -> run_mcmc to avoid conflict with coda package. ------------------------------------------------------------------------ TMB 1.6.6 (2016-01-28) ------------------------------------------------------------------------ o oneStepPredict: Add one-step mean to output for methods 'oneStepGaussianOffMode' and 'oneStepGeneric'. o Array class changes: - More explicit array assignment from other class: Can now assign from matrix and un-evaluated expression template E.g. a = a.matrix() * a.matrix(); - Documentation: warning about the 2D case - Re-implement vec() operator (avoid loop) - Add method matrix() to array class: Keeps first dimension and collapses remaining dimensions. o Clean up potential naming conflicts caused by Rmath macros o Atomic functions general speedup o bugfix: precompile broke REGISTER_ATOMIC ------------------------------------------------------------------------ TMB 1.6.5 (2015-12-02) ------------------------------------------------------------------------ o Fix bug that caused sdreport to fail for models using the 'profile' argument to MakeADFun. o Robustify marginal likelihood for extreme parameters (to help MCMC). o MCMC: Improved stability of adaptive step size algorithm. o Bias correction documentation and test example. Results are now part of the summary output. ------------------------------------------------------------------------ TMB 1.6.4 (2015-11-22) ------------------------------------------------------------------------ o precompile() makeover: - Works on all platforms. - Works with multiple models in same R instance. - Frequently used classes MVNORM_t and GMRF_t added to precompilation. ------------------------------------------------------------------------ TMB 1.6.3 (2015-11-11) ------------------------------------------------------------------------ o Fix array rows() method (The method inherited from Eigen would report the length of the underlying vector). o Eliminate std::cout in CRAN version. o MCMC samplers (HMC, NUTS and RWM) added by Cole Monnahan. ------------------------------------------------------------------------ TMB 1.6.0 (2015-10-08) ------------------------------------------------------------------------ o Major changeset to get closer to CRAN submission - Added 'dontrun' to most examples. Reason: All useful TMB examples take too long time to run because they require compilation. - Removed TMB-internals. - Added Roxygen for remaining functions: dynlib() runSymbolicAnalysis() config() plot.parallelBenchmark() summary.sdreport() print.sdreport() print.backtrace() plot.tmbprofile() confint.tmbprofile() - Got rid of global configuration, e.g flagsDefaults(), newtonDefaults() etc. - Reworked newtonOption() and adapted all examples to new syntax. (NOTE: not entirely backwards compatible since to old version modified global settings, which we do not allow anymore). - Fixed several typos, e.g. c++ -> C++ - CITATION: Added arxiv reference. - DESCRIPTION: Changed title and added URL. - NAMESPACE: Cleanup o CppAD jacobian: prefer reverse mode over forward mode if work is equal - TMB only requires 1st order reverse (not forward) mode to be implemented for atomic functions. This can result in the error 'order not implemented' if CppAD automatically selects forward over reverse mode. - In cases where the work of forward and reverse mode are equal it is therefore more natural (from TMB perspective) to select reverse mode. o Document parallel_accumulator #122 o Lots of doxygen documentation updates ------------------------------------------------------------------------ TMB 1.5-1 (2015-10-04) ------------------------------------------------------------------------ o From now on the github version follows the R version o Added NEWS file o Emacs mode version 3.0 from Arni Magnusson o Robustify tmbprofile() #137 o Experimental oneStepPredict() now handles discrete case as part of a general 'oneStepGeneric' method. o tmbprofile() multiple changes - Allow specification of a valid range of the parameter. - Get more detail in the center of the distribution. (does not affect cases where likelihood has been optimized) o C++: New namespace 'autodiff' makes it easy to use AD from the user template. o C++: New namespace 'romberg' with wrappers around CppAD integration routines. o Automatic retape when loading saved ADFun objects. o Keep attribute 'check.passed' when subsetting parameter list o Fix 32 bit o match pattern: Improvement for large problems. As a result it is now be possible to handle 2^31-1 nonzeros in Cholesky factor. Old method would break down around 2^30. o MVNORM_t: Method to evaluate marginals. o oneStepPredict() now works with maps #108. o Lots of improvements and cleanups from Martin Maechler. ------------------------------------------------------------------------ TMB 1.5-0 (2015-08-10) ------------------------------------------------------------------------ o Add pnorm/qnorm one-argument vectorized versions. o bugfix: Vectorized math operations crashed in the zero-length case. o Add missing bounds check to array class #100 - Now checks each index component (not just the re-mapped univariate index). o BesselK atomic + Matern correlation function. o asSparseMatrix and asSparseVector fix: Conversion from dense to sparse should not drop zero-entries that could potentially become nonzero (i.e. parameter dependent entries). o CppAD::Variable now works for TMB models. o Script to enable 'runSymbolicAnalysis' on all Linux and OS X systems. o New experimental function 'oneStepPredict' for OSA residuals with ar1xar1 as an example. o New atomic function 'ppois'. o REPORT: Allow report vector of 'anything'. o getUserDLL: Improve guess of user's DLL code (when DLL not given to MakeADFun). o Eliminate warning with '-Wpedantic'. o sdreport new option: ignore.parm.uncertainty. o New example: HMM filter. o General 1D likelihood profile function + method to get profile confidence intervals + plot method. o MakeADFun: New option to move outer parameters to the inner problem (purpose: get same speed as the REML trick for linear fixed effects without loosing the ML interpretation). o MakeADFun: New argument 'silent' to disable all tracing output. ------------------------------------------------------------------------ TMB 1.4-0 (2015-04-16) ------------------------------------------------------------------------ o CppAD updated to development version. Reduced peak memory usage with up to 50% for large models. o New configuration variables. o Avoid crashes due to memory allocation errors. o Print index errors to error stream. o Allow compilation with -std=c++11. o sdreport: bias.correct optimizations. o Implement up to 7d array (like admb). o Allow lists as data object with R-inla SPDE structures as an example. ------------------------------------------------------------------------ TMB 1.0-0 (2013-09-10) ------------------------------------------------------------------------ o Initial release. TMB/src/0000755000176200001440000000000014641202563011466 5ustar liggesusersTMB/src/utils.c0000644000176200001440000000135514637523572013011 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 # ifdef _OPENMP #include # endif # include # include /* openmp controller */ SEXP omp_num_threads(SEXP x) { #ifdef _OPENMP if( !isNull(x) ){ int n = INTEGER(x)[0]; omp_set_num_threads( n ); } return ScalarInteger( omp_get_max_threads() ); #else warning("OpenMP not supported."); return ScalarInteger( 0 ); #endif } /* Avoid S4 overhead when changing x-slot: Set xslot to SEXP pointer i.e. x@x <- y */ SEXP setxslot(SEXP x, SEXP y){ setAttrib(x,install("x"),y); return x; } /* Is external pointer nil ? */ SEXP isNullPointer(SEXP pointer) { return ScalarLogical(TYPEOF(pointer) == EXTPTRSXP && !R_ExternalPtrAddr(pointer)); } TMB/src/solve_subset.c0000644000176200001440000003046214634646741014370 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 /* ========================================================== Supernodal version of solvesubset for CHOLMOD supernodal sparse Cholesky structures. Description: * Given the factorization A=LL'. * Calculate the inverse S=A^-1 on the sparseness pattern of LL'. NOTE: In the dense case this is equivalent with "DPOTRI". Algorithm (Recursions): * s = indices of supernode * p = non-zero indices below supernode [ L(s,s) L(s,p) ] [ S(s,s) S(s,p) ] [ L(p,s) L(p,p) ] [ S(p,s) S(p,p) ] 1. S(s,p) = -L(s,s)^t^-1 * L(s,p) * S(p,p) 2. S(s,s) = -L(s,s)^t^-1 * L(s,p) * S(p,s) + (L(s,s)*L(s,s)^t)^-1 Rewritten: M0 = (L(s,s)*L(s,s)^t)^-1 (DPOTRI) M = -L(p,s) * L(s,s)^-1 (DTRSM) S(p,s) = S(p,p) * M (DSYMM) S(s,s) = M^t * S(p,s) + M0 (DGEMM) ====> IMPLEMENTATION: Compute dense submatrix Lss=L(s,s) If p not empty { 1. Compute M = -L(p,s) * L(s,s)^-1 : Compute dense submatrix M=L(p,s) M = - M * Lss^-1 DTRSM("R", "L",np,ns,-1.0,Lss,ns,M,np); 2. Compute S(p,s) = S(p,p) * M Compute dense submatrix Spp=S(p,p) 3. Compute S(s,s) = M^t * S(p,s) + M0 } M0 = (L(s,s)*L(s,s)^t)^-1 = DPOTRI("U",dims,Lss,dims,&info); ========================================================== */ #define USE_FC_LEN_T #include #include #include #include #ifndef FCONE # define FCONE #endif #include "Matrix.h" #include /* Copy-pasted from "Writing R Extensions". A similar spell is present in Matrix.h so might not be needed anymore ? */ #ifdef __GNUC__ // this covers gcc, clang, icc # undef alloca # define alloca(x) __builtin_alloca((x)) #elif defined(HAVE_ALLOCA_H) // needed for native compilers on Solaris and AIX # include #endif extern cholmod_common c; // See init.c #if !defined(R_MATRIX_PACKAGE_VERSION) || R_MATRIX_PACKAGE_VERSION < R_Version(1, 7, 1) /* In Matrix < 1.7-1, the registered routine underlying AS_CHM_FR */ /* called cholmod_check_factor unconditionally, requiring much overhead. */ /* Hence, when TMB is compiled linking old Matrix, we redefine AS_CHM_FR. */ #undef AS_CHM_FR #define AS_CHM_FR(x) tmb_as_cholmod_factor3((CHM_FR)alloca(sizeof(cholmod_factor)), x, FALSE) CHM_FR tmb_as_cholmod_factor3(CHM_FR ans, SEXP x, Rboolean do_check) { int *type = INTEGER(GET_SLOT(x, install("type"))); SEXP tmp; memset(ans, 0, sizeof(cholmod_factor)); /* zero the struct */ ans->itype = CHOLMOD_INT; /* characteristics of the system */ ans->dtype = CHOLMOD_DOUBLE; ans->z = (void *) NULL; ans->xtype = CHOLMOD_REAL; ans->ordering = type[0]; /* unravel the type */ ans->is_ll = (type[1] ? 1 : 0); ans->is_super = (type[2] ? 1 : 0); ans->is_monotonic = (type[3] ? 1 : 0); SEXP Matrix_permSym = install("perm"); tmp = GET_SLOT(x, Matrix_permSym); ans->minor = ans->n = LENGTH(tmp); ans->Perm = INTEGER(tmp); ans->ColCount = INTEGER(GET_SLOT(x, install("colcount"))); ans->z = ans->x = (void *) NULL; SEXP Matrix_xSym = install("x"); tmp = GET_SLOT(x, Matrix_xSym); ans->x = REAL(tmp); if (ans->is_super) { /* supernodal factorization */ ans->xsize = LENGTH(tmp); ans->maxcsize = type[4]; ans->maxesize = type[5]; ans->i = (int*)NULL; tmp = GET_SLOT(x, install("super")); ans->nsuper = LENGTH(tmp) - 1; ans->super = INTEGER(tmp); tmp = GET_SLOT(x, install("pi")); ans->pi = INTEGER(tmp); tmp = GET_SLOT(x, install("px")); ans->px = INTEGER(tmp); tmp = GET_SLOT(x, install("s")); ans->ssize = LENGTH(tmp); ans->s = INTEGER(tmp); } else { Rf_error("Unexpected"); } return ans; } #endif /* !defined(R_MATRIX_PACKAGE_VERSION) || ... */ SEXP tmb_destructive_CHM_update(SEXP L, SEXP H, SEXP mult) { CHM_FR f = AS_CHM_FR(L); CHM_SP A = AS_CHM_SP__(H); double mm[2] = {0, 0}; mm[0] = asReal(mult); // NB: Result depends if A is "dsC" or "dgC"; the latter case assumes we mean AA' !!! /* cholmod_factorize_p return value: TRUE: CHOLMOD_OK, CHOLMOD_NOT_POSDEF, CHOLMOD_DSMALL FALSE: CHOLMOD_NOT_INSTALLED, CHOLMOD_OUT_OF_MEMORY, CHOLMOD_TOO_LARGE, CHOLMOD_INVALID, CHOLMOD_GPU_PROBLEM */ if (!M_cholmod_factorize_p(A, mm, (int*)NULL, 0 /*fsize*/, f, &c)) /* -> ./CHOLMOD/Cholesky/cholmod_factorize.c */ error("cholmod_factorize_p failed: status %d, minor %lu of ncol %lu", (int) c.status, (unsigned long) f->minor, (unsigned long) f->n); int ok = (f->minor == f->n); // WAS: (c.status == CHOLMOD_OK); return ScalarLogical(ok); } SEXP tmb_CHMfactor_solve(SEXP L_, SEXP y_) { CHM_FR L = AS_CHM_FR(L_); int n = LENGTH(y_); CHM_DN y = N_AS_CHM_DN(REAL(y_), n, 1); SEXP x = PROTECT( NEW_NUMERIC( n ) ); CHM_DN sol = M_cholmod_solve(CHOLMOD_A, L, y, &c); memcpy(REAL(x), sol->x, n * sizeof(double)); M_cholmod_free_dense(&sol, &c); UNPROTECT(1); return x; } // Notes about the CHOLMOD super-nodal storage. // According to the documentation of CHOLMOD we have: // ================================================== // k1 = Super [s] ; /* s contains columns k1 to k2-1 of L */ // k2 = Super [s+1] ; // nscol = k2 - k1 ; /* # of columns in all of s */ // psi = Lpi [s] ; /* pointer to first row of s in Ls */ // psx = Lpx [s] ; /* pointer to first row of s in Lx */ // psend = Lpi [s+1] ; /* pointer just past last row of s in Ls */ // nsrow = psend - psi ; /* # of rows in all of s */ // * See discussion in cholmod_change_factor.c: // * (3) supernodal symbolic: A representation of the nonzero pattern of the // * supernodes for a supernodal factorization. There are L->nsuper // * supernodes. Columns L->super [k] to L->super [k+1]-1 are in the kth // * supernode. The row indices for the kth supernode are in // * L->s [L->pi [k] ... L->pi [k+1]-1]. The numerical values are not // * allocated (L->x), but when they are they will be located in // * L->x [L->px [k] ... L->px [k+1]-1], and the L->px array is defined // * in this factor type. /* Extract dense block x[p,q] of sparse matrix x */ CHM_DN densesubmatrix(CHM_SP x, int *p, int np, int *q, int nq, cholmod_common *c){ CHM_DN ans = M_cholmod_allocate_dense(np,nq,np,CHOLMOD_REAL,c); double *w = malloc(x->nrow*sizeof(double)); int *xi=x->i; int *xp=x->p; double *xx=x->x; double *ansx=ans->x; int col, row; for(int j=0;jsuper; int* Ls=L->s; int* Lpi=L->pi; int ncol=super[k+1]-super[k]; /* ncol of supernode */ int nrow=Lpi[k+1]-Lpi[k]; /* Number of rows in supernode */ /* q contains row-indices of *entire* supernode */ /* p contains row-indices excluding those of diagonal */ /* s contains row-indices of diagonal - setdiff(q,p) */ int* q=Ls+Lpi[k]; /* Pointer to L->s [L->pi [k]] */ int nq=nrow; /* length of q */ // int* p=q+ncol; /* Exclude triangle in diagonal */ int np=nq-ncol; /* length of p */ int* s=q; int ns=ncol; /* length of s */ /* do not sort because p is sorted */ int info; /* For lapack */ int i,j; double ONE=1.0, ZERO=0.0, MONE=-1.0; CHM_DN x = densesubmatrix(Lsparse,q,nq,q,nq,c); double *xx=x->x; double *Lss=xx, *Lps=xx+ns, *Ssp=xx+(nq*ns), *Spp=xx+(nq*ns+ns); /* Workspace to hold output from dsymm */ double *wrk=malloc(nq*ns*sizeof(double)); double *wrkps=wrk+ns; if(np>0){ F77_CALL(dtrsm)("R", "L", "N", "N", &np, &ns, &MONE, Lss, &nq, Lps, &nq FCONE FCONE FCONE FCONE); for(i=ns;ix; int *Lp=Lsparse->p; int m=Lp[s[0]]; for(j=0;j0){ */ /* flopcount[0]+=(Ns*(Ns+1))*0.5*Np; /\* dtrsm *\/ */ /* flopcount[1]+=Np*Np*Ns; /\* dsymm *\/ */ /* flopcount[2]+=2.0*(Ns*Ns*Ns)/3.0; /\* dpotri *\/ */ /* flopcount[3]+=Ns*Np*Ns; /\* dgemm *\/ */ /* } else { */ /* flopcount[2]+=2.0*(Ns*Ns*Ns)/3.0; /\* dpotri *\/ */ /* } */ /* } */ /* Clean up */ M_cholmod_free_dense(&x,c); free(wrk); } CHM_SP tmb_inv_super(CHM_FR Lfac, cholmod_common *c){ /* Convert factor to sparse without modifying factor */ CHM_FR Ltmp = M_cholmod_copy_factor(Lfac,c); CHM_SP L = M_cholmod_factor_to_sparse(Ltmp,c); M_cholmod_free_factor(&Ltmp,c); /* Loop over supernodes */ int nsuper=Lfac->nsuper; for(int k=nsuper-1;k>=0;k--)tmb_recursion_super(L,k,Lfac,c); /* Change to symm lower */ L->stype=-1; return L; } SEXP tmb_invQ(SEXP Lfac){ CHM_FR L=AS_CHM_FR(Lfac); CHM_SP iQ = tmb_inv_super(L, &c); return M_chm_sparse_to_SEXP(iQ, 1 /* Free */ , 0, 0, "N" /* Not unit */, R_NilValue); } void half_diag(CHM_SP A){ int ncol=A->ncol; double *Ax; int *Ai, *Ap, i; Ai=A->i; Ap=A->p; Ax=A->x; for(int j=0;jstype=0; /* Change to non-sym */ return M_chm_sparse_to_SEXP(iQ, 1 /* Free */ , -1 /* uplo="L" */ , 0, "N" /* Not unit */, R_NilValue); } /* Given sparse matrices A and B (sorted columns). Assume pattern of A is a subset of pattern of B. (This also includes cases where dimension of B larger than dim of A) Return integer vector p of same length as A@x such that " A@i == B@i[p] and A@j == B@j[p] " */ SEXP match_pattern(SEXP A_, SEXP B_){ CHM_SP A=AS_CHM_SP(A_); CHM_SP B=AS_CHM_SP(B_); int *Ai=A->i, *Bi=B->i, *Ap=A->p, *Bp=B->p; int ncol=A->ncol,i,j,k; int index; // index match SEXP ans; if(A->ncol>B->ncol)error("Must have dim(A)<=dim(B)"); PROTECT(ans=NEW_INTEGER(A->nzmax)); int *pans=INTEGER(ans); for(j=0;j=Bp[j+1]){ UNPROTECT(1); error("No match"); } } *pans=index+1; pans++; // R-index ! } } UNPROTECT(1); return ans; } /* Sparse version of 'insert zeros and modify diagonal' ( izamd ) keeping the sparsity pattern un-modified: A[:,p] <- 0; A[p,:] <- 0; diag(A)[p] <- d; A_ : Sparse matrix to modify mark_ : Logical (int) index vector of rows and columns. diag_ : Diagonal replacement (double). */ SEXP tmb_sparse_izamd(SEXP A_, SEXP mark_, SEXP diag_){ CHM_SP A = AS_CHM_SP(A_); int *Ai=A->i, *Ap=A->p; double *Ax = A->x; int ncol=A->ncol; int *mark = INTEGER(mark_); double diag = REAL(diag_)[0]; int i, l=0; for(int j = 0; j < ncol; j++){ for(int k = Ap[j]; k < Ap[j+1]; k++){ i = Ai[k]; if (mark[i]) Ax[l] = 0; if (mark[j]) Ax[l] = 0; if ( (mark[i] || mark[j]) && (i == j) ) Ax[l] = diag; l++; } } return A_; } /* Half the diagonal of a matrix (note: modifies input) */ SEXP tmb_half_diag(SEXP A_){ CHM_SP A = AS_CHM_SP(A_); half_diag(A); return A_; } TMB/src/local_stubs.c0000644000176200001440000000013214634646734014156 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 #include "Matrix_stubs.c" TMB/src/init.c0000644000176200001440000000251514634646734012616 0ustar liggesusers#include #include #include #include "Matrix.h" cholmod_common c; #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} SEXP omp_num_threads(SEXP x); SEXP isNullPointer(SEXP pointer); SEXP setxslot(SEXP x, SEXP y); SEXP tmb_invQ(SEXP Lfac); SEXP tmb_invQ_tril_halfdiag(SEXP Lfac); SEXP match_pattern(SEXP A_, SEXP B_); SEXP tmb_sparse_izamd(SEXP A_, SEXP mark_, SEXP diag_); SEXP tmb_half_diag(SEXP A_); SEXP have_tmb_symbolic(void); SEXP tmb_symbolic(SEXP Qp); SEXP tmb_destructive_CHM_update(SEXP L, SEXP H, SEXP mult); SEXP tmb_CHMfactor_solve(SEXP L_, SEXP y_); static R_CallMethodDef CallEntries[] = { CALLDEF(omp_num_threads, 1), CALLDEF(isNullPointer, 1), CALLDEF(setxslot, 2), CALLDEF(tmb_invQ, 1), CALLDEF(tmb_invQ_tril_halfdiag, 1), CALLDEF(match_pattern, 2), CALLDEF(tmb_sparse_izamd, 3), CALLDEF(tmb_half_diag, 1), CALLDEF(have_tmb_symbolic, 0), CALLDEF(tmb_symbolic, 1), CALLDEF(tmb_destructive_CHM_update, 3), CALLDEF(tmb_CHMfactor_solve, 2), {NULL, NULL, 0} }; void R_init_TMB(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, (Rboolean)FALSE); M_R_cholmod_start(&c); c.error_handler = NULL; // Disable CHOLMOD warnings } void R_unload_TMB(DllInfo *dll) { M_cholmod_finish(&c); } TMB/src/external_metis.c0000644000176200001440000000645014634646734014700 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 #include #include #include #include "Matrix.h" #include #ifdef _USE_EXTERNAL_CHOLMOD_LIB_ /* ========================================================================== */ /* Error handler */ /* ========================================================================== */ void tmb_cholmod_error(int status, const char *file, int line, const char *message) { /* From CHOLMOD/Include/cholmod_core.h : ...status values. zero means success, negative means a fatal error, positive is a warning. */ warning(("Cholmod warning '%s' at file:%s, line %d"), message, file, line); } cholmod_factor *cholmod_analyze ( /* ---- input ---- */ cholmod_sparse *A, /* matrix to order and analyze */ /* --------------- */ cholmod_common *Common ) ; int cholmod_free_factor ( /* ---- in/out --- */ cholmod_factor **LHandle, /* factor to free, NULL on output */ /* --------------- */ cholmod_common *Common ) ; /* ========================================================================== */ /* Run the symbolic analysis and prepare workspace - only run once !!! */ /* ========================================================================== */ SEXP tmb_symbolic(SEXP Qp){ cholmod_common c; M_R_cholmod_start(&c); /* TODO: More control from R */ c.nmethods=9; c.supernodal = CHOLMOD_SUPERNODAL; c.final_ll = TRUE; /* Return quickly if not positive definite */ c.quick_return_if_not_posdef=TRUE; c.error_handler=tmb_cholmod_error; int trace=1; CHM_SP Q = M_cholmod_copy(AS_CHM_SP(Qp), -1 /* symmetric lower */, 1 /*values*/, &c); CHM_FR LQ; CHM_FR LQ2; // Step 1: Run symbolic analysis with external cholmod library: if(trace)Rprintf("Entering externallib \n"); c.itype=CHOLMOD_INT; LQ = cholmod_analyze(Q, &c); /* get fill-reducing permutation */ if(trace)Rprintf("cholmod_analyze: status=%d \n",c.status); if(trace)Rprintf("Chosen ordering %d \n", c.selected); // Step 2: Grab the permutation: int *perm=LQ->Perm; // Step 3: Run symbolic analysis again, now with known permutation // using the R cholmod interface routines if(trace)Rprintf("Running symbolic analysis \n"); if(trace)Rprintf("User permutation \n"); c.nmethods=1; LQ2 = M_cholmod_analyze_p(Q, perm, NULL, 0, &c); cholmod_free_factor(&LQ,&c); // LQ Not needed anymore if(trace)Rprintf("Chosen ordering %d \n", c.selected); if(trace)Rprintf("Length of supernodal xslot %d \n", LQ2->xsize); if(trace)Rprintf("Flopcount %f \n", c.fl); double nnzL = LQ2->xsize; double nnzQ = Q->nzmax; if(trace)Rprintf("Fill-in ratio (nnz(L)/nnz(Q)) %f \n", nnzL/nnzQ); if(trace)Rprintf("Factor xtype %d \n", LQ2->xtype); // Step 4: Make sure factor has numerical values if(trace)Rprintf("Running numerical factorization \n"); M_cholmod_factorize(Q, LQ2, &c); if(trace)Rprintf("Done \n"); // Cleanup M_cholmod_free_sparse(&Q,&c); M_cholmod_finish(&c); return M_chm_factor_to_SEXP(LQ2, 1 /* Free */); } #else SEXP tmb_symbolic(SEXP Qp) { return R_NilValue; } #endif SEXP have_tmb_symbolic(void) { SEXP ans; PROTECT(ans = NEW_INTEGER(1)); #ifdef _USE_EXTERNAL_CHOLMOD_LIB_ INTEGER(ans)[0]=1; #else INTEGER(ans)[0]=0; #endif UNPROTECT(1); return ans; } TMB/src/Makevars0000644000176200001440000000015314641202777013170 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) $(SHLIB_OPENMP_CFLAGS) PKG_CFLAGS = $(SHLIB_OPENMP_CFLAGS) TMB/NAMESPACE0000644000176200001440000000377514634646741012146 0ustar liggesusers ###____________________ IMPORTS ___________________________________ ## Base R - packages ---------------------- importFrom("graphics", abline, legend, matplot, plot) importFrom("methods", as, is, new, cbind2, rbind2) importFrom("stats", approx, confint, integrate, median, nlminb, optim, optimHess, pchisq, qchisq, qnorm, rnorm, runif, sd, splinefun, uniroot, var, ks.test) importFrom("utils", as.relistable, relist, head, tail) ## Windows specific imports if(.Platform$OS.type == "windows"){ importFrom("utils", shortPathName) } ## TMB:::install.contrib importFrom("utils", download.file, file_test, unzip) ## TMB:::.onLoad importFrom("utils", packageVersion) ## Recommended R - packages ---------------------- importClassesFrom("Matrix")# all currently (FIXME) , corMatrix, dgCMatrix, dgTMatrix, dpoMatrix) importMethodsFrom("Matrix", coerce, cov2cor, determinant, drop, "%*%", crossprod,tcrossprod, t, diag, chol2inv, solve, colSums,rowSums) importFrom("Matrix", Cholesky, forceSymmetric, invPerm, tril, triu, Diagonal) ###____________________ EXPORTS ___________________________________ export(benchmark, checkConsistency, compile, config, dynlib, FreeADFun, gdbsource, MakeADFun, newton, newtonOption, normalize, oneStepPredict, openmp, plot.parallelBenchmark, plot.tmbprofile, precompile, print.backtrace, print.checkConsistency, print.sdreport, Rinterface, runExample, runSymbolicAnalysis, sdreport, SR, summary.checkConsistency, summary.sdreport, template, tmbprofile, tmbroot, TMB.Version) ## Methods : S3method(confint, tmbprofile) S3method(plot, tmbprofile) S3method(print, sdreport) S3method(summary, sdreport) S3method(as.list, sdreport) S3method(plot, parallelBenchmark) S3method(print, backtrace) S3method(print, checkConsistency) S3method(summary, checkConsistency) TMB/inst/0000755000176200001440000000000014634646734011672 5ustar liggesusersTMB/inst/include/0000755000176200001440000000000014641203001013263 5ustar liggesusersTMB/inst/include/lgamma.hpp0000644000176200001440000001466214641203001015243 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 /** \file \brief Gamma function and gamma probability densities */ /** \brief Logarithm of gamma function (following R argument convention). \ingroup special_functions */ template Type lgamma(Type x){ CppAD::vector tx(2); tx[0] = x; tx[1] = Type(0); return atomic::D_lgamma(tx)[0]; } VECTORIZE1_t(lgamma) /** \brief Logarithm of factorial function (following R argument convention). \ingroup special_functions */ template Type lfactorial(Type x){ CppAD::vector tx(2); tx[0] = x + Type(1); tx[1] = Type(0); return atomic::D_lgamma(tx)[0]; } VECTORIZE1_t(lfactorial) /* Old lgamma approximation */ template inline Type lgamma_approx(const Type &y) { /* coefficients for gamma=7, kmax=8 Lanczos method */ static const Type LogRootTwoPi_ = 0.9189385332046727418, lanczos_7_c[9] = { 0.99999999999980993227684700473478, 676.520368121885098567009190444019, -1259.13921672240287047156078755283, 771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853, -0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7 }; Type x=y; int k; Type Ag; Type term1, term2; x -= Type(1.0); /* Lanczos writes z! instead of Gamma(z) */ Ag = lanczos_7_c[0]; for(k=1; k<=8; k++) { Ag += lanczos_7_c[k]/(x+k); } /* (x+0.5)*log(x+7.5) - (x+7.5) + LogRootTwoPi_ + log(Ag(x)) */ term1 = (x+Type(0.5))*log((x+Type(7.5))/Type(M_E)); term2 = LogRootTwoPi_ + log(Ag); return term1 + (term2 - Type(7.0)); } /** \brief Negative binomial probability function. \ingroup R_style_distribution Parameterized through size and prob parameters, following R-convention. */ template inline Type dnbinom(const Type &x, const Type &size, const Type &prob, int give_log=0) { Type n=size; Type p=prob; Type logres = lgamma(x+n)-lgamma(n)-lgamma(x+Type(1))+ n*log(p)+x*log(Type(1)-p); if (give_log) return logres; else return exp(logres); } VECTORIZE4_ttti(dnbinom) /** \brief Negative binomial probability function. \ingroup R_style_distribution Alternative parameterization through mean and variance parameters. */ template inline Type dnbinom2(const Type &x, const Type &mu, const Type &var, int give_log=0) { Type p=mu/var; Type n=mu*p/(Type(1)-p); return dnbinom(x,n,p,give_log); } VECTORIZE4_ttti(dnbinom2) /** \brief Negative binomial probability function. More robust parameterization through \f$log(\mu)\f$ and \f$log(\sigma^2-\mu)\f$ parameters. \ingroup R_style_distribution */ template inline Type dnbinom_robust(const Type &x, const Type &log_mu, const Type &log_var_minus_mu, int give_log=0) { CppAD::vector tx(4); tx[0] = x; tx[1] = log_mu; tx[2] = log_var_minus_mu; tx[3] = 0; Type ans = atomic::log_dnbinom_robust(tx)[0]; return ( give_log ? ans : exp(ans) ); } VECTORIZE4_ttti(dnbinom_robust) /** \brief Poisson probability function. \ingroup R_style_distribution */ template inline Type dpois(const Type &x, const Type &lambda, int give_log=0) { Type logres = -lambda + x*log(lambda) - lgamma(x+Type(1)); if (give_log) return logres; else return exp(logres); } VECTORIZE3_tti(dpois) /** \brief Density of X where X~gamma distributed \ingroup R_style_distribution */ template Type dgamma(Type y, Type shape, Type scale, int give_log=0) { Type logres=-lgamma(shape)+(shape-Type(1.0))*log(y)-y/scale-shape*log(scale); if(give_log)return logres; else return exp(logres); } VECTORIZE4_ttti(dgamma) /** \brief Density of log(X) where X~gamma distributed \ingroup R_style_distribution */ template inline Type dlgamma(Type y, Type shape, Type scale, int give_log=0) { Type logres=-lgamma(shape)-shape*log(scale)-exp(y)/scale+shape*y; if(give_log)return logres; else return exp(logres); } VECTORIZE4_ttti(dlgamma) /** \brief Zero-Inflated Poisson probability function. \ingroup R_style_distribution * \details \param zip is the probaility of having extra zeros */ template inline Type dzipois(const Type &x, const Type &lambda, const Type &zip, int give_log=0) { Type logres; if (x==Type(0)) logres=log(zip + (Type(1)-zip)*dpois(x, lambda, false)); else logres=log(Type(1)-zip) + dpois(x, lambda, true); if (give_log) return logres; else return exp(logres); } VECTORIZE4_ttti(dzipois) /** \brief Zero-Inflated negative binomial probability function. \ingroup R_style_distribution * \details Parameterized through size and prob parameters, following R-convention. No vectorized version is currently available. \param zip is the probaility of having extra zeros */ template inline Type dzinbinom(const Type &x, const Type &size, const Type &p, const Type & zip, int give_log=0) { Type logres; if (x==Type(0)) logres=log(zip + (Type(1)-zip)*dnbinom(x, size, p, false)); else logres=log(Type(1)-zip) + dnbinom(x, size, p, true); if (give_log) return logres; else return exp(logres); } /** \brief Zero-Inflated negative binomial probability function. \ingroup R_style_distribution * \details Alternative parameterization through mean and variance parameters (conditional on not being an extra zero). No vectorized version is currently available. \param zip is the probaility of having extra zeros */ template inline Type dzinbinom2(const Type &x, const Type &mu, const Type &var, const Type & zip, int give_log=0) { Type p=mu/var; Type n=mu*p/(Type(1)-p); return dzinbinom(x,n,p,zip,give_log); } /********************************************************************/ /* SIMULATON CODE */ /********************************************************************/ extern "C" { double Rf_rnbinom(double n, double p); } /** \brief Simulate from a negative binomial distribution */ template Type rnbinom(Type n, Type p) { return Rf_rnbinom(asDouble(n), asDouble(p)); } VECTORIZE2_tt(rnbinom) VECTORIZE2_n(rnbinom) /** \brief Simulate from a negative binomial distribution */ template Type rnbinom2(Type mu, Type var) { Type p = mu / var; Type n = mu * p / (Type(1) - p); return Rf_rnbinom(asDouble(n), asDouble(p)); } VECTORIZE2_tt(rnbinom2) VECTORIZE2_n(rnbinom2) TMB/inst/include/checkpoint_macro.hpp0000644000176200001440000002743114641203000017312 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 /* Given function f0. Define recursively higher order reverse mode derivatives: f0: R^(n) -> R^(m) ( x -> f0 (x) ) f1: R^(n+m) -> R^(n) ( (x,w1) -> f0'(x)*w1 ) f2: R^(n+m+n) -> R^(n+m) ( (x,w1,w2) -> f1'(x)*w2 ) f3: R^(n+m+n+n+m) -> R^(n+m+n) ( (x,w1,w2,w3) -> f2'(x)*w3 ) 1. We define a 'generalized symbol' to represent all of these. _Reverse_mode_AD_ is trivially obtained for this symbol by calling itself on a higher level. Each occurance on the tape will occupy O(n+m) memory units independent on the number of flops performed by f0. 2. _Double_versions_ of the generalized symbol are obtained using nested AD types to tape f0, then recursively tape forward and reverse mode sweeps. Finally, given (1) and (2) the macro TMB_ATOMIC_VECTOR_FUNCTION will generate the atomic symbol. */ /* general helper functions */ #ifdef CPPAD_FRAMEWORK namespace atomic{ /** \name User defined atomic functions \note The following procedure is automatically performed with the macro REGISTER_ATOMIC. \verbatim Given function f0. Define recursively higher order reverse mode derivatives: f0: R^(n) -> R^(m) ( x -> f0 (x) ) f1: R^(n+m) -> R^(n) ( (x,w1) -> f0'(x)*w1 ) f2: R^(n+m+n) -> R^(n+m) ( (x,w1,w2) -> f1'(x)*w2 ) f3: R^(n+m+n+n+m) -> R^(n+m+n) ( (x,w1,w2,w3) -> f2'(x)*w3 ) 1. We define a 'generalized symbol' to represent all of these. _Reverse_mode_AD_ is trivially obtained for this symbol by calling itself on a higher level. Each occurance on the tape will occupy O(n+m) memory units independent of the number of flops performed by f0. 2. _Double_versions_ of the generalized symbol are obtained using nested AD types to tape f0, then recursively tape forward and reverse mode sweeps. Finally, given (1) and (2) the macro TMB_ATOMIC_VECTOR_FUNCTION will generate the atomic symbol. \endverbatim @{ */ /** \brief Construct a tape of a given template _functor_ (Will be used to tape 'f0' for different nested AD types) */ template CppAD::ADFun* generate_tape(Func f, vector x_){ Rcout << "Generating tape\n"; int n=x_.size(); vector > x(n); for(int i=0;i(x_[i]); CppAD::Independent(x); vector > y=f(x); vector > y2(y.size()); for(int i=0;i* padf=new CppAD::ADFun(x,y2); return padf; } /** \brief Lift tape of fn up one level by taping forward and reverse sweeps. Note: x_ needs only have length equal to the input domain dimension of f0. Zeros are filled in for all range directions. */ template CppAD::ADFun* forrev(CppAD::ADFun >* padf, vector x_){ size_t n=padf->Domain(); size_t m=padf->Range(); vector > x(n+m); vector > y(n); for(int i=0;i(x_[i]); for(int i=x_.size();i(0); vector > tmp1(n); vector > tmp2(m); CppAD::Independent(x); for(size_t i=0;iForward(0,tmp1); y = padf->Reverse(1,tmp2); CppAD::ADFun* padf2=new CppAD::ADFun(x,y); delete padf; return padf2; } /** \brief Recursively apply forrev until the lowest Base level (double) */ template CppAD::ADFun* multi_forrev(CppAD::ADFun* padf, vector x_){ return multi_forrev(forrev(padf, x_), x_); } template <> CppAD::ADFun* multi_forrev(CppAD::ADFun* padf, vector x_) CSKIP({ return padf; }) /** \brief Tape symbol up to any order */ template CppAD::ADFun* tape_symbol(Func f, vector x){ typedef typename Func::ScalarType::value_type Base; CppAD::ADFun* f0=generate_tape(f,x); CppAD::ADFun* fn=multi_forrev(f0,x); return fn; } #ifdef _OPENMP #define NTHREADS config.nthreads #define THREAD omp_get_thread_num() #else #define NTHREADS 1 #define THREAD 0 #endif /** \brief General class to construct 'double versions' of the generalized symbol. */ template class UserFunctor> struct forrev_derivatives{ bool initialized; int n,m; forrev_derivatives(){ initialized=false; } /* ADFun pointers used by the double versions indexed as vpf[thread][level] */ CppAD::vector* > > vpf; void cpyADfunPointer(CppAD::ADFun* padf, int i){ padf->optimize(); vpf[0][i] = padf; /* Copy object for other threads */ for(int thread=1;thread(); vpf[thread][i]->operator=(*padf); } } void do_init(vector x){ UserFunctor f; n=x.size(); m=f(x).size(); UserFunctor > f0; UserFunctor > > f1; UserFunctor > > > f2; UserFunctor > > > > f3; vpf.resize(NTHREADS); for(int thread=0;thread x){ if(!initialized){ do_init(x); initialized=true; } } int get_output_dim(int input_dim){ int output_dim=-1; // Fibonacci type recursion for each 'column' if (input_dim == n) output_dim = m; else if (input_dim == n+m) output_dim = n; else if (input_dim == n+m+n) output_dim = n+m; else if (input_dim == n+m+n+n+m) output_dim = n+m+n; else Rf_error("get_output_dim failed"); return output_dim; } // Calculate level from input dimension int get_level(int input_dim){ int level=-1; if (input_dim == n) level = 0; else if (input_dim == n+m) level = 1; else if (input_dim == n+m+n) level = 2; else if (input_dim == n+m+n+n+m) level = 3; else Rf_error("get_level failed"); return level; } // Evaluate CppAD::vector operator()(CppAD::vector tx){ int level = get_level(tx.size()); return vpf[THREAD][level]->Forward(0,tx); } }; /* end class forrev_derivatives */ #undef NTHREADS #undef THREAD /** \brief Wrap user function into a functor, generate double versions, and construct atomic function in a namespace */ #define REGISTER_ATOMIC(USERFUNCTION) \ namespace USERFUNCTION##NAMESPACE{ \ template \ struct UserFunctor{ \ typedef Type ScalarType; \ vector operator()(vector x){ \ return USERFUNCTION(x); \ } \ }; \ atomic::forrev_derivatives double_version; \ TMB_ATOMIC_VECTOR_FUNCTION( \ generalized_symbol \ , \ double_version.get_output_dim(tx.size()) \ , \ ty = double_version(tx); \ , \ CppAD::vector concat(tx.size() + py.size()); \ for(size_t i=0; i < tx.size(); i++) concat[i] = tx[i]; \ for(size_t i=0; i < py.size(); i++) concat[tx.size()+i] = py[i]; \ px = generalized_symbol(concat); \ ) \ template \ vector generalized_symbol(vector x){ \ CppAD::vector xx(x.size()); \ for(int i=0;i yy=generalized_symbol(xx); \ vector y(yy.size()); \ for(int i=0;i USERFUNCTION(vector x){ \ USERFUNCTION##NAMESPACE::double_version.init(x); \ return USERFUNCTION##NAMESPACE::generalized_symbol(x); \ } \ vector > USERFUNCTION(vector > x){ \ return USERFUNCTION##NAMESPACE::generalized_symbol(x); \ } \ vector > > USERFUNCTION(vector > > x){ \ return USERFUNCTION##NAMESPACE::generalized_symbol(x); \ } \ vector > > > USERFUNCTION(vector > > > x){ \ return USERFUNCTION##NAMESPACE::generalized_symbol(x); \ } /** @} */ } /* end namespace atomic */ #endif // CPPAD_FRAMEWORK #ifdef TMBAD_FRAMEWORK namespace atomic { /** \brief User interface to checkpointing using TMBad \details This is the recommended way of constucting atomic functions without knowing the reverse mode derivatives. Usage: Within `objective_function` (or any other function) one can do ``` AtomicLocal F(Functor()); ``` - Each call to `F(x)` only generates one new operatation on the ad stack. - Thread safe because local. - `F` is owned by the operation stack and is automatically freed when no longer needed. */ template struct AtomicLocal { typedef TMBad::StdWrap > StdWrapFunctor; Functor F; TMBad::ADFun<> Tape; AtomicLocal(const Functor &F) : F(F) {} template vector operator()(const vector &x) { if ( (size_t) x.size() != Tape.Domain() ) { Tape = TMBad::ADFun<>( StdWrapFunctor(F), x).atomic(); } std::vector x_(x.data(), x.data() + x.size()); std::vector y_ = Tape(x_); vector y(y_); return y; } vector operator()(const vector &x) { return F(x); } }; /** \brief For backwards compatibility with CppAD \details Allocate an atomic function for each thread. Main purpuse is to support the `REGISTER_ATOMIC` macro. \tparam Functor assumed to have a default CTOR \warning Static atomic functions are never freed. */ template struct AtomicGlobal { #ifdef _OPENMP #define NTHREADS config.nthreads #define THREAD omp_get_thread_num() #else #define NTHREADS 1 #define THREAD 0 #endif std::vector< AtomicLocal >* p_; AtomicGlobal() { static std::vector< AtomicLocal >* p = new std::vector< AtomicLocal > (NTHREADS, Functor() ); p_ = p; } template vector operator()(const vector &x) { return ((*p_)[THREAD])(x); } #undef NTHREADS #undef THREAD }; #define REGISTER_ATOMIC(USERFUNCTION) \ namespace USERFUNCTION##NAMESPACE { \ template \ struct UserFunctor { \ typedef Type ScalarType; \ vector operator()(const vector &x) { \ return USERFUNCTION(x); \ } \ }; \ } \ vector USERFUNCTION(const vector &x) { \ typedef USERFUNCTION##NAMESPACE::UserFunctor Functor; \ return atomic::AtomicGlobal()(x); \ } \ vector USERFUNCTION(const vector &x) { \ typedef USERFUNCTION##NAMESPACE::UserFunctor Functor; \ return atomic::AtomicGlobal()(x); \ } } // End namespace atomic #endif // TMBAD_FRAMEWORK TMB/inst/include/convenience.hpp0000644000176200001440000001310514641203000016267 0ustar liggesusers// Copyright (C) 2013-2015 Kasper Kristensen // License: GPL-2 /** \file \brief Templates to get convenient R-like syntax. */ /** \brief Similar to R's split function: split(x,fac) devides x into groups defined by fac . * \details Returns a "vector of vectors". */ template vector > split(vector x, vector fac) { if (x.size() != fac.size()) Rf_error("x and fac must have equal length."); int nlevels = 0; for (int i = 0; i < fac.size(); i++) if (fac[i] >= nlevels) nlevels = fac[i] + 1; vector > ans(nlevels); vector lngt(nlevels); lngt.setZero(); for (int i = 0; i < fac.size(); i++) lngt[fac[i]]++; for (int i = 0; i < nlevels; i++) ans[i].resize(lngt[i]); lngt.setZero(); for (int i = 0; i < fac.size(); i++) { ans[fac[i]][lngt[fac[i]]] = x[i]; lngt[fac[i]]++; } return ans; } /** Sum of vector, matrix or array */ template