glmnet/0000755000175000017500000000000014140304132011662 5ustar nileshnileshglmnet/MD50000644000175000017500000004222214140304132012174 0ustar nileshnilesh7eadd2af54e333025e1f11159dd379c5 *DESCRIPTION b3a2e443b697130de588b50a907b6d5a *NAMESPACE 8b706e28eea2b04105d04c5d28dc74cf *NEWS.md b7e403cbac1975dfabbd5b1e6ec68cfc *R/Cindex.R 747cac2d02f1ed0910e1f37a62969d65 *R/RcppExports.R 4bd3eb09b5b5e4d5dd4a6d885c734b0d *R/assess.coxnet.R 7172891b9097b44ef3d8951153eee111 *R/assess.glmnet.R 6d3a93c6972ccbfee9aa0c264cb08ad3 *R/auc.R 8cd96999dce298c1d81ce51ea2f6d2a3 *R/auc.mat.R 8b2f65e1c21c123294a132887081c627 *R/bigGlm.R d8808544bfbafb423ce60e359ade4b64 *R/blend.relaxed.R 04a38d922c4c78910aee28eb063fd474 *R/buildPredmat.array.R 70162e7910431ac9e89fcb5526469b84 *R/buildPredmat.coxnetlist.R 019453d01483ecda68866eaa73e1df3c *R/buildPredmat.default.R 885a478709d197d973cdc621cd3e5225 *R/check.dots.R 612fef55db8dfaf923f5b986d8dad11e *R/check.exclude.R 05d1834f8f3dc30e0428eb6d1dcb1ba1 *R/coef.cv.glmnet.R da1e68b0c4ae43a6ffcaa2ea3d7c287a *R/coef.cv.relaxed.R f288cf974491d93b56e5c4d6d204d658 *R/coef.glmnet.R a42ad691868ee94b3eae743299af81bc *R/coef.relaxed.R ab05dba77ad585b2ac3a465c26fc2b00 *R/coefnorm.R ee4cc296f6a922b28ba89fe64fa5ce56 *R/confusion.glmnet.R b7db022f09a79b60e233bc933cc961ce *R/coxgrad.R b2ee26c95947789e26860e88068ac7b1 *R/coxnet.R 836fa1cdc1092e7a00f781fff99c89a7 *R/coxnet.deviance.R 1f8e6fe29cea55e0c6ddf9edd4561458 *R/coxpath.R 9c81bd7c22c3ab10099606ad4aaabdf6 *R/cv.coxnet.R b3593fdbb51ba45fa9581d246850de4f *R/cv.elnet.R b2f4b2ab04489c3378cc4532551720ba *R/cv.fishnet.R bf30eb6653ad0f5c05e5e0c655718899 *R/cv.glmnet.R 987e0f369d7652826d113042b0d1a8da *R/cv.glmnet.raw.R 1500e23337c4de6276f9745adfbcefcf *R/cv.glmnetfit.R 86747e59ed9762208d848cf84d815095 *R/cv.lognet.R 5f5234bbe3dc2fbc77015609ad7094d7 *R/cv.mrelnet.R ece778c00ca6b58e088f6a700d23e15b *R/cv.multnet.R 54bb7948621814784ce66b4efa3b97d9 *R/cv.relaxed.raw.R c739878e7fc617dab75e8cd65cf9b356 *R/cvcompute.R 2b98d1871792cda71bd9323e9ee2a0b0 *R/cvstats.R 90e2fb9130f89ddaef65e43202dc4261 *R/cvtype.R 9a25662a16138900203c201bb4bd057b *R/data.R 959eaac0e190795f3ee1ced251d60a90 *R/deviance.glmnet.R 73741eabd56bb9e084c8cc859538698a *R/elnet.R 0c6c0b1dfa5c98b55f2e9e097df3747d *R/error.bars.R 9cf716424fdcf017725b7941fed3727c *R/family.glmnet.R a4660a7abf8a28e207417120e5b10957 *R/fishnet.R ebce52c8995ee16c058c2aa93bdcf582 *R/fix.lam.R c65547cba8e3d5ad16b28a84881e7fca *R/getOptcv.glmnet.R b0325f5e584fbe646aedb73d02f6b86d *R/getOptcv.relaxed.R dd14d7acb21ae8f1bb87a54ea763950b *R/getcoef.R c1a8bceef71c915555f8bffc7298137e *R/getcoef.multinomial.R c10ada4699c1cd7c0aa598ef73098b8c *R/glmnet-package.R f0d0d7f6a09f94dcef51e0b633014ebd *R/glmnet.R e0da0e93d1a87cc5d5342050a15e6354 *R/glmnet.control.R 741a587449cd2f3613c2e52f25bc0280 *R/glmnet.measures.R 97cb083d181bd866006b3796dc3179c4 *R/glmnetFlex.R a6224dfa92ac162a19e455b90ec64108 *R/glmnet_softmax.R 5bb185a77e6a3cdae7401bd4ffcaf8f7 *R/jerr.R 65abc981c18160e0a229da5225aeb812 *R/jerr.coxnet.R 5acd474cede565ec9bff39674d9cdac0 *R/jerr.elnet.R 0ee642ef97444c8b0c05b43face38094 *R/jerr.fishnet.R cb426164b68fd511d50afddd54af550f *R/jerr.lognet.R 955ddc9c91f5cb11e10bce2feb17b4aa *R/jerr.mrelnet.R 2448cfc1a8d2d38e7e3289b2af7b5600 *R/lambda.interp.R de7722522b536bc2b8d48e8f1892f76f *R/lognet.R dca4e022c62f90ac14272ddf8d4a1214 *R/makeX.R de3fcbceced4117b989ef8083113da6c *R/mrelnet.R 17d20a46471df38f1d1c6bb7cb16421a *R/na.mean.R 874f190461e3637bb4fe16c7da03ea6c *R/nonzeroCoef.R 47c7b014ce112b5eae37781f6a8c2ee4 *R/onAttach.R c63389262e77051b913d9bb3d9b5e5a1 *R/pb.R a71f87425fd5e31f66f607daee2c69e9 *R/plot.cv.glmnet.R ba6f5e462e5b0fb2f1240321600534b5 *R/plot.cv.relaxed.R 58a31af3fb88f68e7d8dde0b10c893ae *R/plot.glmnet.R ea50b48783cf8f7f19578990a2fcd656 *R/plot.mrelnet.R 463f4e07c1ce08ba03746f9bc7fcb327 *R/plot.multnet.R 18015a23b5efc79de433ac2b1fa5746a *R/plot.relaxed.R 636feb1ac600bb09dccdfabfecc52c1d *R/plotCoef.R fa0986bb58c46a8481b78b877a6e7636 *R/predict.coxnet.R 56a20beb9e09cba4456a611d6e385ca4 *R/predict.cv.glmnet.R 46ef747bea2ff68cef5ef2ffbb5bb466 *R/predict.cv.relaxed.R 459ca7b37d845f842e8b5681a2e3a5aa *R/predict.elnet.R 0f470237229a0f3b973d29ef3683d73b *R/predict.fishnet.R 1702dafa0f89817b8c77d231ad9224c3 *R/predict.glmnet.R 3272244ba2c54519d973d5a4cbc2df8a *R/predict.lognet.R 1bc0c68ce624df57b07bdabfd1e84597 *R/predict.mrelnet.R 02fc6050ced4badab91cc882e06f3bb5 *R/predict.multnet.R ed58f90aa0b7d7a5ead21c0838d5b330 *R/predict.relaxed.R 70cd43e9955641d2041c50030491a921 *R/print.bigGlm.R db093ef96748c43424e48993e2c23b0c *R/print.confusion.table.R 33d0f53e0210a76bcc6d6372b7a7577b *R/print.cv.glmnet.R f57e0561181a529df64e8b15133a0f2b *R/print.cv.relaxed.R 85eea47cfd8403a66297ea317bad0be3 *R/print.glmnet.R a5e64ca8661f9b618a0e59872956c921 *R/relax.glmnet.R 0f7e686cd02800174b7dc2165163d843 *R/response.coxnet.R b012181c241a4f6699d73f63bec2bd1d *R/rmult.R 533da5bd46d7fed918af5a04b1539d6c *R/roc.glmnet.R 732890f5e88bddcd5b331928863c8d27 *R/stratifySurv.R e44a2d10d5135f6931a13542d8996d56 *R/survfit.coxnet.R e2f22c34e46e1ec2964d1968e7116342 *R/survfit.cv.glmnet.R 0d040824da1ffcd1edb57b252f6b1f6e *R/zeromat.R 8920f300fb3d337d7048ef8af005d41e *README.md 58fd9f932b3d8dfb3cf73564f0bf8f6c *build/vignette.rds 0814c5a080c9ba7874362051b6cbd89f *data/BinomialExample.rda 7a404bc6a255ac95a6cc06cf4fe6ddbf *data/CVXResults.RData 9497636bb0edb7f76c792f33c47ba681 *data/CoxExample.rda a72ec47b364e0eb3a85fc2d402c7ac16 *data/MultiGaussianExample.rda 892fa8ad5b2ff24f9c989ebcec41bfb8 *data/MultinomialExample.rda cac6b8e217715098a5ee94e4066c1a4b *data/PoissonExample.rda 35754bd4878bc234696cc2d9b880d90e *data/QuickStartExample.rda 54067bd791ff172cff97ff9d26b6de12 *data/SparseExample.rda edcd84fb25d51203916ae3a39b1fc655 *inst/CITATION a93a7d8728d7660982e08bba82f720dc *inst/doc/Coxnet.R 0bb3a929eff4b5695d75d5c08ae8360c *inst/doc/Coxnet.Rmd d46408e0e84366bef28249a00c0dff3e *inst/doc/Coxnet.pdf 9ae5eaa92f330dbf58b156e1b5871dda *inst/doc/glmnet.R 79ccc7b5e10f841e74ed9612b0e10cfd *inst/doc/glmnet.Rmd d4013be5184e805bb2bb2c6279a07fb2 *inst/doc/glmnet.pdf 85579e41e7c36b006e98a47bb0ca7f6f *inst/doc/glmnetFamily.R cab72afa072399ca5ae1906987ef89f4 *inst/doc/glmnetFamily.Rmd 1251d72b1407436b2926ccdef5756c30 *inst/doc/glmnetFamily.pdf f3b0466d566df6079e63fe164a809f82 *inst/doc/relax.R e1dc532562ac69525a0b9c3c095be562 *inst/doc/relax.Rmd bf517a19d482af577f8738f07e1d90d1 *inst/doc/relax.pdf 64c471277309951b4697ffdd1ee15fb8 *inst/mortran/README.Rmd a475108243c0db9c33089a0275c7eaba *inst/mortran/glmnet5dpclean.m d8516f118db49ea4fbde5d5b2f6e67a6 *inst/mortran/wls.m 52e95162e7dc0e62ec329602b05cd001 *inst/testscripts/save_results.R 6e03129049a39674c7ea15de8339511d *man/BinomialExample.Rd 5a4389a6279bf8f6db41b6c7c5862bf9 *man/Cindex.Rd 9f14f46f2d349831ddf8fd8958bf69ef *man/CoxExample.Rd e472e3fbbaa3c1286891a59a561123ad *man/MultiGaussianExample.Rd 2023e16a435a534ce184ddbb01a04bb7 *man/MultinomialExample.Rd 287fe75c09580f4475f97e6a4653a2ca *man/PoissonExample.Rd 81a89d362e387093991343cbbe045fce *man/QuickStartExample.Rd d9cb26ef685c853112acfa17afee001f *man/SparseExample.Rd db803cacf2e2783af427a53ae29000de *man/assess.glmnet.Rd 404ddf86d813ed00f2ddfaf0cc88bf5f *man/beta_CVX.Rd 413f29637810f0c84718ca2f5bb00ab1 *man/bigGlm.Rd 9b08d40ce7e666b059b2679f2c878d9f *man/cox.fit.Rd e6388e65131a54875a8f44c9e536e17e *man/cox.path.Rd 02d8b57b5fe0bb24c5a4577fdbeb8649 *man/cox_obj_function.Rd 46f6b21620d2ef64e75df227bb2a662f *man/coxgrad.Rd 14afe7c0215649ac50246ef46b7e0b93 *man/coxnet.deviance.Rd a24de34ec09c91fd7ac2e2a002a74bef *man/cv.glmnet.Rd 18d78ba828a20778f74374075a832d81 *man/dev_function.Rd 9ade2a133f3c6e4049a766a561a05145 *man/deviance.glmnet.Rd d465886739abcf9ba23dd6928504e073 *man/elnet.fit.Rd 2604c132d7a0c60ba0bb2842687b9aaf *man/fid.Rd 0cb75faa99bbe1ccf8b63d7618f1d6a6 *man/figures/logo.png 3c5ba6e37b05e27f1f70179ebc5176c4 *man/get_cox_lambda_max.Rd 282c6f7db8fc51844dc28629a8b1e261 *man/get_eta.Rd 1e9b7d5537b29ef0a4c097e26e0fb2a7 *man/get_start.Rd ec561c83a75b43232a6a10335e911ac3 *man/glmnet-internal.Rd d6e356e0500e6f9f2678cb03f2ec828d *man/glmnet-package.Rd 3011fcd9b2eed1ab588f6f17c8b01125 *man/glmnet.Rd 5cc46f5cf2cc5389a89709d6ae129d0c *man/glmnet.control.Rd 6d468edc0d0317c2098d27c64e17955e *man/glmnet.fit.Rd fed187cbcdf90f65defb1e5de08b3539 *man/glmnet.measures.Rd 93b09ae713424336b982280c0e54d44f *man/glmnet.path.Rd a58741a0f8c88395ad8a12d3afc84cae *man/makeX.Rd 450ff2e96f274536ed5c1c7ae2b15478 *man/mycoxph.Rd 751f3230c6220d30f9e428ff985dc9c9 *man/mycoxpred.Rd 8e5d2c806cb13cc2b06207f68a31eba9 *man/na.replace.Rd 8ac2ff867e5e3c5fed650e9ba7fae90f *man/obj_function.Rd 64f275140d08ca77bde537001a99564d *man/pen_function.Rd 0a65347ee5ffbb8ac5344a3d9a0c4cc5 *man/plot.cv.glmnet.Rd 1f418aa4c5e1911ca6cc000e4f22bf5f *man/plot.glmnet.Rd 9fd729f8acf90d79120a3c58295a7a85 *man/predict.cv.glmnet.Rd 5c1735040debd1431eb6e0b194e7938f *man/predict.glmnet.Rd c94db387bc9bcc16d607294a3a3f50b6 *man/predict.glmnetfit.Rd 8e4be7a110c2b05437a16f7282d01de0 *man/print.cv.glmnet.Rd 3f6f703e63011a228505e5db088aa645 *man/print.glmnet.Rd e4c08beafea3fe0967fe62702d6df665 *man/response.coxnet.Rd 3adbd56c4685f732227a9845b470aeb8 *man/rmult.Rd a62678b3f19900ec74d299d7eebafbd7 *man/stratifySurv.Rd 25bf7abec92edfb4312fa50c0a1ebc66 *man/survfit.coxnet.Rd f2447246ed1bc0682526b694b2ff1daa *man/survfit.cv.glmnet.Rd 24b1289dbc6e8d8bcc0a90fe57286599 *man/use.cox.path.Rd ef3f52f020e0348152d26890bb7e717e *man/weighted_mean_sd.Rd cb52089613fee96e2294cef45c5547a7 *src/Makevars 5c88c2ea0b2e3d878ec61e8b708017da *src/RcppExports.cpp 2a9e9cd55b3b69583bbffd939254ef56 *src/elnet_exp.cpp df1527bce45036b9b3b532e9c376f645 *src/glmnet5dpclean.f 16dd5b3c69f01b454efa2b5c0e93c8f1 *src/glmnet_init.c 5b5a8fd460dfd1f4dcc723e9dd38ef89 *src/glmnetpp/CMakeLists.txt 0c0da0bb2f9c93bb0dde331c882137f0 *src/glmnetpp/benchmark/CMakeLists.txt 16b6687e073a6a96073e320674849393 *src/glmnetpp/benchmark/analyze/analyze.py 613ba2acabceb43e1ca9d63b332c6ee1 *src/glmnetpp/benchmark/analyze/analyze_gaussian_benchmark.py e89670200d433a995669fafc7dfeedaa *src/glmnetpp/benchmark/analyze/path_names.py 355e499b86b71ec9bc766aa01c8be62f *src/glmnetpp/benchmark/gaussian_benchmark.cpp 65888b5bcbf60fbdf85c5fcabe748aeb *src/glmnetpp/benchmark/gaussian_cov_benchmark.cpp 376be440da613c233bfa0825de90162a *src/glmnetpp/benchmark/gaussian_naive_benchmark.cpp d645aa7e2e24a951cbdf0d1f63d50934 *src/glmnetpp/clean-build.sh da3966e7790d32d9bccf88ecd49254ed *src/glmnetpp/cmake/glmnetppConfig.cmake.in 252860ac0f1d373daea4480fd249a48c *src/glmnetpp/docs/data/gaussian_benchmark.csv 47bf1f5b81e1c73cd2b72b9e2856be0a *src/glmnetpp/docs/figs/gaussian_benchmark_fig.png 26b5c2f03dec6ac7a3c48ade77d31f40 *src/glmnetpp/include/glmnetpp 053b4b6434235b59f4da7810d23b1a3a *src/glmnetpp/include/glmnetpp_bits/chkvars.hpp 28c59dabe37aa4766a5cb4508c21b75e *src/glmnetpp/include/glmnetpp_bits/elnet_driver.hpp 6e4b77e5ee6bca73740cc5d08d0f0d49 *src/glmnetpp/include/glmnetpp_bits/elnet_driver/decl.hpp 1a477121e4deab7cc1ef44003aa20500 *src/glmnetpp/include/glmnetpp_bits/elnet_driver/gaussian.hpp 537032cec1f37c5fb630f13f0e9f0f5d *src/glmnetpp/include/glmnetpp_bits/elnet_path.hpp 70f0171d7fc9d601244621dabe963526 *src/glmnetpp/include/glmnetpp_bits/elnet_path/base.hpp 129cbbf8f1cf0b29b5e3e3add3116a71 *src/glmnetpp/include/glmnetpp_bits/elnet_path/decl.hpp daa46950731c163578d3a7606a4d54a3 *src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_base.hpp 62a27e3688e24042f91253817b4bf168 *src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_cov.hpp 2ec5b7b9afe6cce62735f32086ca3ed5 *src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_naive.hpp a511a2de9995f7c0ce6adf00fa8a0df7 *src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_cov.hpp 3397d1748f428ab8c7b67334c8564d4c *src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_naive.hpp 36e9c1b2a7a9daf91f36af0292c7f1ca *src/glmnetpp/include/glmnetpp_bits/elnet_point.hpp e916488ebb83d49d5c4b0e362fb16c4e *src/glmnetpp/include/glmnetpp_bits/elnet_point/decl.hpp ce2d3dbee24ea02e99ddbefacc1f2802 *src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_base.hpp 1a9df5e1f19bc1542b3319c5e36afd2d *src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_cov.hpp 24693afe70b7233b9c15d46f5718fae2 *src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_naive.hpp ba90c10546b08218f4d54e10c2c09e4d *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/decl.hpp 204439f59278b235ed0dfca6ee90b197 *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_base.hpp afe77d2c56ef42272fbb008d72cbe281 *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_cov.hpp 358fc6a394477a364ed645fd0aa166b1 *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_naive.hpp b33ca808e3a0f674b3f7411af531cf9a *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_cov.hpp aa5f3ddcec51394ec844c6237dcee055 *src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_naive.hpp b4713c8bb0ca27925feecaee4ae527ea *src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_cov.hpp 7f8640e135084920f01d96e151fdc145 *src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_naive.hpp 618d9d3027e19071fe91348ce0d0632d *src/glmnetpp/include/glmnetpp_bits/elnet_point/traits.hpp 80f2321179fb625aad768fe4e3e344fe *src/glmnetpp/include/glmnetpp_bits/internal.hpp d5043536d5e4184b5a8d42663b30e5c0 *src/glmnetpp/include/glmnetpp_bits/standardize.hpp 6802d92d1c686d7fbd01fe57cabbf248 *src/glmnetpp/include/glmnetpp_bits/util/exceptions.hpp b0d6a60eac5f815a44ecbf7eb8b63254 *src/glmnetpp/include/glmnetpp_bits/util/functional.hpp b56c2cbfc34256d4b758bf64bab98bd1 *src/glmnetpp/include/glmnetpp_bits/util/iterator/counting_iterator.hpp dd231d06581099fb1e6dfb250b6c4772 *src/glmnetpp/include/glmnetpp_bits/util/iterator/one_to_zero_iterator.hpp 61093ed030690cf1e09f08f878cc390f *src/glmnetpp/include/glmnetpp_bits/util/mapped_sparse_matrix_wrapper.hpp 81dfb8df3d43cbba6e2af02957f50766 *src/glmnetpp/include/glmnetpp_bits/util/type_traits.hpp e078a1f29e968f97ef94e8309a25d7f7 *src/glmnetpp/include/glmnetpp_bits/util/types.hpp c46c9454ac623c1a0093622ca67fb6bf *src/glmnetpp/include/glmnetpp_bits/wls.hpp 105f93463b2e119e194b9549d6e6b19f *src/glmnetpp/src/CMakeLists.txt eb8da4153ff93b0da1f1f2be2afcfde2 *src/glmnetpp/src/internal.cpp df1527bce45036b9b3b532e9c376f645 *src/glmnetpp/src/legacy/glmnet5dpclean.f 3efb2d8f84cd9cc951c2a3d174d278ae *src/glmnetpp/src/legacy/legacy.h 3965fc0eb8205870ea724035d96d8945 *src/glmnetpp/src/legacy/pb.c f2fefcc9ad61b762fa701181df5ab618 *src/glmnetpp/src/legacy/wls.f aee709298d82dc0c27b564198590cb9b *src/glmnetpp/test/CMakeLists.txt a8dc88d6ba9538aeacfe11c185dd0761 *src/glmnetpp/test/chkvars_unittest.cpp 83d7255bb7df7659d262d523c7e77814 *src/glmnetpp/test/elnet_driver/gaussian_unittest.cpp d1f0eebba27dd9af09c0133cb1dc7f28 *src/glmnetpp/test/elnet_path/gaussian_base.hpp 611f64f4918b2ba5b5eb1842b569f738 *src/glmnetpp/test/elnet_path/gaussian_cov_unittest.cpp 5675c3d2f428e8be3e8febbc5d9d7c03 *src/glmnetpp/test/elnet_path/gaussian_naive_unittest.cpp c6e77174e77929a592a9736a79b4f088 *src/glmnetpp/test/elnet_path/sp_gaussian_cov_unittest.cpp 7c08cbf4ce6937b9ebe315a4d078e497 *src/glmnetpp/test/elnet_path/sp_gaussian_naive_unittest.cpp 0bcac997aa29e6bd269696ff25184906 *src/glmnetpp/test/internal_unittest.cpp e6d3e4ee69b13c5afb79d2cd77febff7 *src/glmnetpp/test/standardize_unittest.cpp 57de6cd90c4021fa44061a9056fdbd0c *src/glmnetpp/test/testutil/base_fixture.hpp 42c65c25cf493fffd15ef659259e57b3 *src/glmnetpp/test/testutil/data_util.hpp 4c5d7ffa51d9bcd1c282d0c0f57e6b67 *src/glmnetpp/test/testutil/mock_pb.hpp bc44b821dd043a074c87de68f566aff6 *src/glmnetpp/test/testutil/thread.hpp d4bc75d7efc21538324b8ae0aa3b9b98 *src/glmnetpp/test/testutil/translation/elnet.hpp 46c288c0e9357ec67f87f9d7c216d665 *src/glmnetpp/test/testutil/translation/elnet1.hpp 51a4b9def66fea0ee0d05f8dba99aa5e *src/glmnetpp/test/testutil/translation/elnet2.hpp 9a5fa8df7fefbeb7b0bfcccca9c8be1b *src/glmnetpp/test/testutil/translation/spelnet.hpp d4c85a461c1dac1a803b5bbab67b0572 *src/glmnetpp/test/testutil/translation/spelnet1.hpp 0e97e4d8c88a3a2a99c5b054b8ee6749 *src/glmnetpp/test/testutil/translation/spelnet2.hpp f0b96c710460008d60766c727ce0a780 *src/glmnetpp/test/translation/elnet1_unittest.cpp 115166128d6274ae862e3141211be6cb *src/glmnetpp/test/translation/elnet2_unittest.cpp 3dc399f8723d24f7c5b289e1605f54e9 *src/glmnetpp/test/translation/elnet_base_fixture.hpp c1bbcaa00fd89e9723c8397d34074d86 *src/glmnetpp/test/translation/elnet_unittest.cpp 18cde68e4f1b8e1aa2fa8c21587ffbb5 *src/glmnetpp/test/translation/spelnet1_unittest.cpp 4ed727f31ea7b0f70c0d9af0e5fd9112 *src/glmnetpp/test/translation/spelnet2_unittest.cpp 69464158746fec96d41325f18f6a9381 *src/glmnetpp/test/translation/spelnet_unittest.cpp b047cb04f0ce8cdd6646c63b15aaa28d *src/glmnetpp/test/util/type_traits_unittest.cpp bfde7f283c152c6a6a7504ea1430f7e6 *src/glmnetpp/test/wls_unittest.cpp 6fa673f8890259269162f206fc0ca325 *src/internal_params.h 3965fc0eb8205870ea724035d96d8945 *src/pb.c 42d9e05de8b2fed45944fd8299dc77c5 *src/pb_exp.cpp f2fefcc9ad61b762fa701181df5ab618 *src/wls.f 0f69ee4f50e9cd394ce8e6c6bba89a8e *src/wls_exp.cpp 0bb3a929eff4b5695d75d5c08ae8360c *vignettes/Coxnet.Rmd 789121c8d4e2c1681d05f19d7a69a054 *vignettes/assets/coxnet.RDS 3969a7d30dda77ddac9a8dcea40f8f58 *vignettes/assets/glmnet_refs.bib 6b485f932628ec1755201811c3e05f49 *vignettes/assets/vignette_binomial.png 3f2e5f9cf200b832dd68492f66f63f9e *vignettes/assets/vignette_gaussian.png 79ccc7b5e10f841e74ed9612b0e10cfd *vignettes/glmnet.Rmd cab72afa072399ca5ae1906987ef89f4 *vignettes/glmnetFamily.Rmd e1dc532562ac69525a0b9c3c095be562 *vignettes/relax.Rmd glmnet/NEWS.md0000644000175000017500000002144314140042027012766 0ustar nileshnilesh# glmnet 4.1-3 Some of the Fortran in glmnet has been replaced by C++, written by the newest member of our team, James Yang. * the `wls` routines (dense and sparse), that are the engines under the `glmnet.path` function when we use programmable families, are now written in C++, and lead to speedups of around 8x. * the family of elnet routines (sparse/dense, covariance/naive) for `glmnet(...,family="gaussian")` are all in C++, and lead to speedups around 4x. # glmnet 4.1-2 A new feature added, as well as some minor fixes to documentation. * The exclude argument has come to life. Users can now pass a function that can take arguments x, y and weights, or a subset of these, for filtering variables. Details in documentation and vignette. * Prediction with single `newx` observation failed before. This is fixed. * Labeling of predictions from `cv.glmnet` improved. * Fixed a bug in mortran/fortran that caused program to loop ad infinitum # glmnet 4.1-1 Fixed some bugs in the coxpath function to do with sparse X. * when some penalty factors are zero, and X is sparse, we should not call GLM to get the start * apply does not work as intended with sparse X, so we now use matrix multiplies instead in computing lambda_max * added documentation for `cv.glmnet` to explain implications of supplying `lambda` # glmnet 4.1 Expanded scope for the Cox model. * We now allow (start, stop) data in addition to the original right-censored all start at zero option. * Allow for strata as in `survival::coxph` * Allow for sparse X matrix with Cox models (was not available before) * Provide method for `survival::survfit` Vignettes are revised and reorganized. Additional index information stored on `cv.glmnet` objects, and included when printed. # glmnet 4.0-2 * Biggest change. Cindex and auc calculations now use the `concordance` function from package `survival` * Minor changes. Allow coefficient warm starts for glmnet.fit. The print method for glmnet now really prints %Dev rather than the fraction. # glmnet 4.0 Major revision with added functionality. Any GLM family can be used now with `glmnet`, not just the built-in families. By passing a "family" object as the family argument (rather than a character string), one gets access to all families supported by `glm`. This development was programmed by our newest member of the `glmnet` team, Kenneth Tay. # glmnet 3.0-3 Bug fixes * `Intercept=FALSE` with "Gaussian" is fixed. The `dev.ratio` comes out correctly now. The mortran code was changed directly in 4 places. look for "standard". Thanks to Kenneth Tay. # glmnet 3.0-2 Bug fixes * `confusion.glmnet` was sometimes not returning a list because of apply collapsing structure * `cv.mrelnet` and `cv.multnet` dropping dimensions inappropriately * Fix to `storePB` to avoid segfault. Thanks [Tomas Kalibera](https://github.com/kalibera)! * Changed the help for `assess.glmnet` and cousins to be more helpful! * Changed some logic in `lambda.interp` to avoid edge cases (thanks David Keplinger) # glmnet 3.0-1 Minor fix to correct Depends in the DESCRIPTION to R (>= 3.6.0) # glmnet 3.0 This is a major revision with much added functionality, listed roughly in order of importance. An additional vignette called `relax` is supplied to describe the usage. * `relax` argument added to `glmnet`. This causes the models in the path to be refit without regularization. The resulting object inherits from class `glmnet`, and has an additional component, itself a glmnet object, which is the relaxed fit. * `relax` argument to `cv.glmnet`. This allows selection from a mixture of the relaxed fit and the regular fit. The mixture is governed by an argument `gamma` with a default of 5 values between 0 and 1. * `predict`, `coef` and `plot` methods for `relaxed` and `cv.relaxed` objects. * `print` method for `relaxed` object, and new `print` methods for `cv.glmnet` and `cv.relaxed` objects. * A progress bar is provided via an additional argument `trace.it=TRUE` to `glmnet` and `cv.glmnet`. This can also be set for the session via `glmnet.control`. * Three new functions `assess.glmnet`, `roc.glmnet` and `confusion.glmnet` for displaying the performance of models. * `makeX` for building the `x` matrix for input to `glmnet`. Main functionality is *one-hot-encoding* of factor variables, treatment of `NA` and creating sparse inputs. * `bigGlm` for fitting the GLMs of `glmnet` unpenalized. In addition to these new features, some of the code in `glmnet` has been tidied up, especially related to CV. # glmnet 2.0-20 * Fixed a bug in internal function `coxnet.deviance` to do with input `pred`, as well as saturated `loglike` (missing) and weights * added a `coxgrad` function for computing the gradient # glmnet 2.0-19 * Fixed a bug in coxnet to do with ties between death set and risk set # glmnet 2.0-18 * Added an option alignment to `cv.glmnet`, for cases when wierd things happen # glmnet 2.0-17 * Further fixes to mortran to get clean fortran; current mortran src is in `inst/mortran` # glmnet 2.0-16 * Additional fixes to mortran; current mortran src is in `inst/mortran` * Mortran uses double precision, and variables are initialized to avoid `-Wall` warnings * cleaned up repeat code in CV by creating a utility function # glmnet 2.0-15 * Fixed up the mortran so that generic fortran compiler can run without any configure # glmnet 2.0-13 * Cleaned up some bugs to do with exact prediction * `newoffset` created problems all over - fixed these # glmnet 2.0-11 * Added protection with `exact=TRUE` calls to `coef` and `predict`. See help file for more details # glmnet 2.0-10 * Two iterations to fix to fix native fortran registration. # glmnet 2.0-8 * included native registration of fortran # glmnet 2.0-7 * constant `y` blows up `elnet`; error trap included * fixed `lambda.interp` which was returning `NaN` under degenerate circumstances. # glmnet 2.0-6 * added some code to extract time and status gracefully from a `Surv` object # glmnet 2.0-3 * changed the usage of `predict` and `coef` with `exact=TRUE`. The user is strongly encouraged to supply the original `x` and `y` values, as well as any other data such as weights that were used in the original fit. # glmnet 2.0-1 * Major upgrade to CV; let each model use its own lambdas, then predict at original set. * fixed some minor bugs # glmnet 1.9-9 * fixed subsetting bug in `lognet` when some weights are zero and `x` is sparse # glmnet 1.9-8 * fixed bug in multivariate response model (uninitialized variable), leading to valgrind issues * fixed issue with multinomial response matrix and zeros * Added a link to a glmnet vignette # glmnet 1.9-6 * fixed bug in `predict.glmnet`, `predict.multnet` and `predict.coxnet`, when `s=` argument is used with a vector of values. It was not doing the matrix multiply correctly * changed documentation of glmnet to explain logistic response matrix # glmnet 1.9-5 * added parallel capabilities, and fixed some minor bugs # glmnet 1.9-3 * added `intercept` option # glmnet 1.9-1 * added upper and lower bounds for coefficients * added `glmnet.control` for setting systems parameters * fixed serious bug in `coxnet` # glmnet 1.8-5 * added `exact=TRUE` option for prediction and coef functions # glmnet 1.8 * Major new release * added `mgaussian` family for multivariate response * added `grouped` option for multinomial family # glmnet 1.7-4 * nasty bug fixed in fortran - removed reference to dble * check class of `newx` and make `dgCmatrix` if sparse # glmnet 1.7-1 * `lognet` added a classnames component to the object * `predict.lognet(type="class")` now returns a character vector/matrix # glmnet 1.6 * `predict.glmnet` : fixed bug with `type="nonzero"` * `glmnet`: Now x can inherit from `sparseMatrix` rather than the very specific `dgCMatrix`, and this will trigger sparse mode for glmnet # glmnet 1.5 * `glmnet.Rd` (`lambda.min`) : changed value to 0.01 if `nobs < nvars`, (`lambda`) added warnings to avoid single value, (`lambda.min`): renamed it `lambda.min.ratio` * `glmnet` (`lambda.min`) : changed value to 0.01 if `nobs < nvars` (`HessianExact`) : changed the sense (it was wrong), (`lambda.min`): renamed it `lambda.min.ratio`. This allows it to be called `lambda.min` in a call though * `predict.cv.glmnet` (new function) : makes predictions directly from the saved `glmnet` object on the cv object * `coef.cv.glmnet` (new function) : as above * `predict.cv.glmnet.Rd` : help functions for the above * `cv.glmnet` : insert `drop(y)` to avoid 1 column matrices; now include a `glmnet.fit` object for later predictions * `nonzeroCoef` : added a special case for a single variable in `x`; it was dying on this * `deviance.glmnet` : included * `deviance.glmnet.Rd` : included # glmnet 1.4 * Note that this starts from version `glmnet_1.4`. glmnet/DESCRIPTION0000644000175000017500000000406314140304132013373 0ustar nileshnileshPackage: glmnet Type: Package Title: Lasso and Elastic-Net Regularized Generalized Linear Models Version: 4.1-3 Date: 2021-11-01 Authors@R: c(person("Jerome", "Friedman", role=c("aut")), person("Trevor", "Hastie", role=c("aut", "cre"), email = "hastie@stanford.edu"), person("Rob", "Tibshirani", role=c("aut")), person("Balasubramanian", "Narasimhan", role=c("aut")), person("Kenneth","Tay",role=c("aut")), person("Noah", "Simon", role=c("aut")), person("Junyang", "Qian", role=c("ctb")), person("James", "Yang", role=c("aut"))) Depends: R (>= 3.6.0), Matrix (>= 1.0-6) Imports: methods, utils, foreach, shape, survival, Rcpp Suggests: knitr, lars, testthat, xfun, rmarkdown SystemRequirements: C++14 Description: Extremely efficient procedures for fitting the entire lasso or elastic-net regularization path for linear regression, logistic and multinomial regression models, Poisson regression, Cox model, multiple-response Gaussian, and the grouped multinomial regression. There are two new and important additions. The family argument can be a GLM family object, which opens the door to any programmed family. This comes with a modest computational cost, so when the built-in families suffice, they should be used instead. The other novelty is the relax option, which refits each of the active sets in the path unpenalized. The algorithm uses cyclical coordinate descent in a path-wise fashion, as described in the papers listed in the URL below. License: GPL-2 VignetteBuilder: knitr Encoding: UTF-8 URL: https://glmnet.stanford.edu, https://dx.doi.org/10.18637/jss.v033.i01, https://dx.doi.org/10.18637/jss.v039.i05 RoxygenNote: 7.1.2 LinkingTo: RcppEigen, Rcpp NeedsCompilation: yes Packaged: 2021-11-02 17:16:44 UTC; hastie Author: Jerome Friedman [aut], Trevor Hastie [aut, cre], Rob Tibshirani [aut], Balasubramanian Narasimhan [aut], Kenneth Tay [aut], Noah Simon [aut], Junyang Qian [ctb], James Yang [aut] Maintainer: Trevor Hastie Repository: CRAN Date/Publication: 2021-11-02 18:50:02 UTC glmnet/README.md0000644000175000017500000000651313752553007013165 0ustar nileshnilesh # Lasso and Elastic-Net Regularized Generalized Linear Models [![CRAN\_Status\_Badge](https://www.r-pkg.org/badges/version/glmnet)](https://cran.r-project.org/package=glmnet)[![](https://cranlogs.r-pkg.org/badges/glmnet)](https://CRAN.R-project.org/package=glmnet) We provide extremely efficient procedures for fitting the entire lasso or elastic-net regularization path for linear regression (gaussian), multi-task gaussian, logistic and multinomial regression models (grouped or not), Poisson regression and the Cox model. The algorithm uses cyclical coordinate descent in a path-wise fashion. Details may be found in Friedman, Hastie, and Tibshirani ([2010](#ref-glmnet)), Simon et al. ([2011](#ref-coxnet)), Tibshirani et al. ([2012](#ref-strongrules)), Simon, Friedman, and Hastie ([2013](#ref-block)). Version 3.0 is a major release with several new features, including: - Relaxed fitting to allow models in the path to be refit without regularization. CV will select from these, or from specified mixtures of the relaxed fit and the regular fit; - Progress bar to monitor computation; - Assessment functions for displaying performance of models on test data. These include all the measures available via `cv.glmnet`, as well as confusion matrices and ROC plots for classification models; - print methods for CV output; - Functions for building the `x` input matrix for `glmnet` that allow for *one-hot-encoding* of factor variables, appropriate treatment of missing values, and an option to create a sparse matrix if appropriate. - A function for fitting unpenalized a single version of any of the GLMs of `glmnet`. Version 4.0 is a major release that allows for any GLM family, besides the built-in families. ## References
Friedman, Jerome, Trevor Hastie, and Rob Tibshirani. 2010. “Regularization Paths for Generalized Linear Models via Coordinate Descent.” *Journal of Statistical Software, Articles* 33 (1): 1–22. .
Simon, Noah, Jerome Friedman, and Trevor Hastie. 2013. “A Blockwise Descent Algorithm for Group-Penalized Multiresponse and Multinomial Regression.”
Simon, Noah, Jerome Friedman, Trevor Hastie, and Rob Tibshirani. 2011. “Regularization Paths for Cox’s Proportional Hazards Model via Coordinate Descent.” *Journal of Statistical Software, Articles* 39 (5): 1–13. .
Tibshirani, Robert, Jacob Bien, Jerome Friedman, Trevor Hastie, Noah Simon, Jonathan Taylor, and Ryan J. Tibshirani. 2012. “Strong Rules for Discarding Predictors in Lasso-Type Problems.” *Journal of the Royal Statistical Society: Series B (Statistical Methodology)* 74 (2): 245–66. .
glmnet/man/0000755000175000017500000000000014046050560012445 5ustar nileshnileshglmnet/man/coxgrad.Rd0000644000175000017500000000356014013330131014354 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxgrad.R \name{coxgrad} \alias{coxgrad} \title{Compute gradient for Cox model} \usage{ coxgrad(eta, y, w, std.weights = TRUE, diag.hessian = FALSE) } \arguments{ \item{eta}{Fit vector (usually from glmnet at a particular lambda).} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{w}{Observation weights (default is all equal to 1).} \item{std.weights}{If TRUE (default), observation weights are standardized to sum to 1.} \item{diag.hessian}{If \code{TRUE}, compute the diagonal of the Hessian of the log partial likelihood as well. Default is \code{FALSE}.} } \value{ A single gradient vector the same length as \code{eta}. If \code{diag.hessian=TRUE}, the diagonal of the Hessian is included as an attribute "diag_hessian". } \description{ Compute the gradient of the log partial likelihood at a particular fit for Cox model. } \details{ Compute a gradient vector at the fitted vector for the log partial likelihood. This is like a residual vector, and useful for manual screening of predictors for \code{glmnet} in applications where \code{p} is very large (as in GWAS). Uses the Breslow approach to ties. This function is essentially a wrapper: it checks whether the response provided is right-censored or (start, stop] survival data, and calls the appropriate internal routine. } \examples{ set.seed(1) eta <- rnorm(10) time <- runif(10, min = 1, max = 10) d <- ifelse(rnorm(10) > 0, 1, 0) y <- survival::Surv(time, d) coxgrad(eta, y) # return diagonal of Hessian as well coxgrad(eta, y, diag.hessian = TRUE) # example with (start, stop] data y2 <- survival::Surv(time, time + runif(10), d) coxgrad(eta, y2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = 10)) coxgrad(eta, y2) } \seealso{ \code{coxnet.deviance} } \keyword{Cox} \keyword{model} glmnet/man/pen_function.Rd0000644000175000017500000000127213752553007015434 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{pen_function} \alias{pen_function} \title{Elastic net penalty value} \usage{ pen_function(coefficients, alpha = 1, vp = 1) } \arguments{ \item{coefficients}{The model's coefficients (excluding intercept).} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net penalty value without the \code{lambda} factor. } \details{ The penalty is defined as \deqn{(1-\alpha)/2 \sum vp_j \beta_j^2 + \alpha \sum vp_j |\beta|.} Note the omission of the multiplicative \code{lambda} factor. } glmnet/man/plot.cv.glmnet.Rd0000644000175000017500000000402013556622666015623 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.cv.glmnet.R, R/plot.cv.relaxed.R \name{plot.cv.glmnet} \alias{plot.cv.glmnet} \alias{plot.cv.relaxed} \title{plot the cross-validation curve produced by cv.glmnet} \usage{ \method{plot}{cv.glmnet}(x, sign.lambda = 1, ...) \method{plot}{cv.relaxed}(x, se.bands = TRUE, ...) } \arguments{ \item{x}{fitted \code{"cv.glmnet"} object} \item{sign.lambda}{Either plot against \code{log(lambda)} (default) or its negative if \code{sign.lambda=-1}.} \item{\dots}{Other graphical parameters to plot} \item{se.bands}{Should shading be produced to show standard-error bands; default is \code{TRUE}} } \description{ Plots the cross-validation curve, and upper and lower standard deviation curves, as a function of the \code{lambda} values used. If the object has class \code{"cv.relaxed"} a different plot is produced, showing both \code{lambda} and \code{gamma} } \details{ A plot is produced, and nothing is returned. } \examples{ set.seed(1010) n = 1000 p = 100 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta = rnorm(nzc) fx = (x[, seq(nzc)] \%*\% beta) eps = rnorm(n) * 5 y = drop(fx + eps) px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) cvob1 = cv.glmnet(x, y) plot(cvob1) title("Gaussian Family", line = 2.5) cvob1r = cv.glmnet(x, y, relax = TRUE) plot(cvob1r) frame() set.seed(1011) par(mfrow = c(2, 2), mar = c(4.5, 4.5, 4, 1)) cvob2 = cv.glmnet(x, ly, family = "binomial") plot(cvob2) title("Binomial Family", line = 2.5) ## set.seed(1011) ## cvob3 = cv.glmnet(x, ly, family = "binomial", type = "class") ## plot(cvob3) ## title("Binomial Family", line = 2.5) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet} and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/mycoxpred.Rd0000644000175000017500000000116713775432176014772 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.coxnet.R \name{mycoxpred} \alias{mycoxpred} \title{Helper function to amend ... for new data in survfit.coxnet} \usage{ mycoxpred(object, s, ...) } \arguments{ \item{object}{A class \code{coxnet} object.} \item{s}{The response for the fitted model.} \item{...}{The same ... that was passed to survfit.coxnet.} } \description{ This function amends the function arguments passed to survfit.coxnet via ... if new data was passed to survfit.coxnet. It's a separate function as we have to deal with function options like newstrata and newoffset. } glmnet/man/glmnet.measures.Rd0000644000175000017500000000153313775432176016066 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.measures.R \name{glmnet.measures} \alias{glmnet.measures} \title{Display the names of the measures used in CV for different "glmnet" families} \usage{ glmnet.measures( family = c("all", "gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian", "GLM") ) } \arguments{ \item{family}{If a "glmnet" family is supplied, a list of the names of measures available for that family are produced. Default is "all", in which case the names of measures for all families are produced.} } \description{ Produces a list of names of measures } \details{ Try it and see. A very simple function to provide information } \seealso{ \code{cv.glmnet} and \code{assess.glmnet}. } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} glmnet/man/get_start.Rd0000644000175000017500000000434113775432176014751 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{get_start} \alias{get_start} \title{Get null deviance, starting mu and lambda max} \usage{ get_start( x, y, weights, family, intercept, is.offset, offset, exclude, vp, alpha ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed to be standardized.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{intercept}{Does the model we are fitting have an intercept term or not?} \item{is.offset}{Is the model being fit with an offset or not?} \item{offset}{Offset for the model. If \code{is.offset=FALSE}, this should be a zero vector of the same length as \code{y}.} \item{exclude}{Indices of variables to be excluded from the model.} \item{vp}{Separate penalty factors can be applied to each coefficient.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} } \description{ Return the null deviance, starting mu and lambda max values for initialization. For internal use only. } \details{ This function is called by \code{glmnet.path} for null deviance, starting mu and lambda max values. It is also called by \code{glmnet.fit} when used without warmstart, but they only use the null deviance and starting mu values. When \code{x} is not sparse, it is expected to already by centered and scaled. When \code{x} is sparse, the function will get its attributes \code{xm} and \code{xs} for its centering and scaling factors. Note that whether \code{x} is centered & scaled or not, the values of \code{mu} and \code{nulldev} don't change. However, the value of \code{lambda_max} does change, and we need \code{xm} and \code{xs} to get the correct value. } glmnet/man/use.cox.path.Rd0000644000175000017500000000127613775432176015300 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{use.cox.path} \alias{use.cox.path} \title{Check if glmnet should call cox.path} \usage{ use.cox.path(x, y) } \arguments{ \item{x}{Design matrix.} \item{y}{Response variable.} } \value{ TRUE if cox.path() should be called, FALSE otherwise. } \description{ Helper function to check if glmnet() should call cox.path(). } \details{ For \code{family="cox"}, we only call the original coxnet() function if (i) x is not sparse, (ii) y is right-censored data, and (iii) we are not fitting a stratified Cox model. This function also throws an error if y has a "strata" attribute but is not of type "stratifySurv". } glmnet/man/bigGlm.Rd0000644000175000017500000000343113560704061014140 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bigGlm.R \name{bigGlm} \alias{bigGlm} \title{fit a glm with all the options in \code{glmnet}} \usage{ bigGlm(x, ..., path = FALSE) } \arguments{ \item{x}{input matrix} \item{...}{Most other arguments to glmnet that make sense} \item{path}{Since \code{glmnet} does not do stepsize optimization, the Newton algorithm can get stuck and not converge, especially with unpenalized fits. With \code{path=TRUE}, the fit computed with pathwise lasso regularization. The current implementation does this twice: the first time to get the lambda sequence, and the second time with a zero attached to the end). Default is \code{path=FALSE}.} } \value{ It returns an object of class "bigGlm" that inherits from class "glmnet". That means it can be predicted from, coefficients extracted via \code{coef}. It has its own print method. } \description{ Fit a generalized linear model as in \code{glmnet} but unpenalized. This allows all the features of \code{glmnet} such as sparse x, bounds on coefficients, offsets, and so on. } \details{ This is essentially the same as fitting a "glmnet" model with a single value \code{lambda=0}, but it avoids some edge cases. CAVEAT: If the user tries a problem with N smaller than or close to p for some models, it is likely to fail (and maybe not gracefully!) If so, use the \code{path=TRUE} argument. } \examples{ # Gaussian x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = bigGlm(x, y) print(fit1) fit2=bigGlm(x,y>0,family="binomial") print(fit2) fit2p=bigGlm(x,y>0,family="binomial",path=TRUE) print(fit2p) } \seealso{ \code{print}, \code{predict}, and \code{coef} methods. } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/print.cv.glmnet.Rd0000644000175000017500000000315513775432176016007 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.cv.glmnet.R \name{print.cv.glmnet} \alias{print.cv.glmnet} \alias{print.cv.relaxed} \title{print a cross-validated glmnet object} \usage{ \method{print}{cv.glmnet}(x, digits = max(3, getOption("digits") - 3), ...) } \arguments{ \item{x}{fitted 'cv.glmnet' object} \item{digits}{significant digits in printout} \item{\dots}{additional print arguments} } \description{ Print a summary of the results of cross-validation for a glmnet model. } \details{ A summary of the cross-validated fit is produced, slightly different for a 'cv.relaxed' object than for a 'cv.glmnet' object. Note that a 'cv.relaxed' object inherits from class 'cv.glmnet', so by directly invoking \code{print.cv.glmnet(object)} will print the summary as if \code{relax=TRUE} had not been used. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = cv.glmnet(x, y) print(fit1) fit1r = cv.glmnet(x, y, relax = TRUE) print(fit1r) ## print.cv.glmnet(fit1r) ## CHECK WITH TREVOR } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent}\cr \url{https://arxiv.org/abs/1707.08692}\cr Hastie, T., Tibshirani, Robert, Tibshirani, Ryan (2019) \emph{Extended Comparisons of Best Subset Selection, Forward Stepwise Selection, and the Lasso} } \seealso{ \code{glmnet}, \code{predict} and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/cox_obj_function.Rd0000644000175000017500000000146513775432176016311 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox_obj_function} \alias{cox_obj_function} \title{Elastic net objective function value for Cox regression model} \usage{ cox_obj_function(y, pred, weights, lambda, alpha, coefficients, vp) } \arguments{ \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{pred}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{coefficients}{The model's coefficients.} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net objective function value for Cox regression model. } glmnet/man/response.coxnet.Rd0000644000175000017500000000133213775432176016107 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/response.coxnet.R \name{response.coxnet} \alias{response.coxnet} \title{Make response for coxnet} \usage{ response.coxnet(y) } \arguments{ \item{y}{Response variable. Either a class "Surv" object or a two-column matrix with columns named 'time' and 'status'.} } \value{ A class "Surv" object. } \description{ Internal function to make the response y passed to glmnet suitable for coxnet (i.e. glmnet with family = "cox"). Sanity checks are performed here too. } \details{ If y is a class "Surv" object, this function returns y with no changes. If y is a two-column matrix with columns named 'time' and 'status', it is converted into a "Surv" object. } glmnet/man/makeX.Rd0000644000175000017500000000614313553366407014021 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makeX.R \name{makeX} \alias{makeX} \title{convert a data frame to a data matrix with one-hot encoding} \usage{ makeX(train, test = NULL, na.impute = FALSE, sparse = FALSE, ...) } \arguments{ \item{train}{Required argument. A dataframe consisting of vectors, matrices and factors} \item{test}{Optional argument. A dataframe matching 'train' for use as testing data} \item{na.impute}{Logical, default \code{FALSE}. If \code{TRUE}, missing values for any column in the resultant 'x' matrix are replaced by the means of the nonmissing values derived from 'train'} \item{sparse}{Logical, default \code{FALSE}. If \code{TRUE} then the returned matrice(s) are converted to matrices of class "CsparseMatrix". Useful if some factors have a large number of levels, resulting in very big matrices, mostly zero} \item{...}{additional arguments, currently unused} } \value{ If only 'train' was provided, the function returns a matrix 'x'. If missing values were imputed, this matrix has an attribute containing its column means (before imputation). If 'test' was provided as well, a list with two components is returned: 'x' and 'xtest'. } \description{ Converts a data frame to a data matrix suitable for input to \code{glmnet}. Factors are converted to dummy matrices via "one-hot" encoding. Options deal with missing values and sparsity. } \details{ The main function is to convert factors to dummy matrices via "one-hot" encoding. Having the 'train' and 'test' data present is useful if some factor levels are missing in either. Since a factor with k levels leads to a submatrix with 1/k entries zero, with large k the \code{sparse=TRUE} option can be helpful; a large matrix will be returned, but stored in sparse matrix format. Finally, the function can deal with missing data. The current version has the option to replace missing observations with the mean from the training data. For dummy submatrices, these are the mean proportions at each level. } \examples{ set.seed(101) ### Single data frame X = matrix(rnorm(20), 10, 2) X3 = sample(letters[1:3], 10, replace = TRUE) X4 = sample(LETTERS[1:3], 10, replace = TRUE) df = data.frame(X, X3, X4) makeX(df) makeX(df, sparse = TRUE) ### Single data freame with missing values Xn = X Xn[3, 1] = NA Xn[5, 2] = NA X3n = X3 X3n[6] = NA X4n = X4 X4n[9] = NA dfn = data.frame(Xn, X3n, X4n) makeX(dfn) makeX(dfn, sparse = TRUE) makeX(dfn, na.impute = TRUE) makeX(dfn, na.impute = TRUE, sparse = TRUE) ### Test data as well X = matrix(rnorm(10), 5, 2) X3 = sample(letters[1:3], 5, replace = TRUE) X4 = sample(LETTERS[1:3], 5, replace = TRUE) dft = data.frame(X, X3, X4) makeX(df, dft) makeX(df, dft, sparse = TRUE) ### Missing data in test as well Xn = X Xn[3, 1] = NA Xn[5, 2] = NA X3n = X3 X3n[1] = NA X4n = X4 X4n[2] = NA dftn = data.frame(Xn, X3n, X4n) makeX(dfn, dftn) makeX(dfn, dftn, sparse = TRUE) makeX(dfn, dftn, na.impute = TRUE) makeX(dfn, dftn, sparse = TRUE, na.impute = TRUE) } \seealso{ \code{glmnet} } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} glmnet/man/survfit.cv.glmnet.Rd0000644000175000017500000000331313775432176016351 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.cv.glmnet.R \name{survfit.cv.glmnet} \alias{survfit.cv.glmnet} \title{Compute a survival curve from a cv.glmnet object} \usage{ \method{survfit}{cv.glmnet}(formula, s = c("lambda.1se", "lambda.min"), ...) } \arguments{ \item{formula}{A class \code{cv.glmnet} object. The object should have been fit with \code{family = "cox"}.} \item{s}{Value(s) of the penalty parameter lambda at which predictions are required. Default is the value s="lambda.1se" stored on the CV object. Alternatively s="lambda.min" can be used. If s is numeric, it is taken as the value(s) of lambda to be used.} \item{...}{Other arguments to be passed to \code{survfit.coxnet}.} } \value{ If \code{s} is a single value, an object of class "survfitcox" and "survfit" containing one or more survival curves. Otherwise, a list of such objects, one element for each value in \code{s}. Methods defined for survfit objects are print, summary and plot. } \description{ Computes the predicted survivor function for a Cox proportional hazards model with elastic net penalty from a cross-validated glmnet model. } \details{ This function makes it easier to use the results of cross-validation to compute a survival curve. } \examples{ set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) x <- matrix(xvec, nrow = nobs) beta <- rnorm(nvars / 3) fx <- x[, seq(nvars / 3)] \%*\% beta / 3 ty <- rexp(nobs, exp(fx)) tcens <- rbinom(n = nobs, prob = 0.3, size = 1) y <- survival::Surv(ty, tcens) cvfit <- cv.glmnet(x, y, family = "cox") # default: s = "lambda.1se" survival::survfit(cvfit, x = x, y = y) # s = "lambda.min" survival::survfit(cvfit, s = "lambda.min", x = x, y = y) } glmnet/man/predict.glmnetfit.Rd0000644000175000017500000000474313775432176016405 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{predict.glmnetfit} \alias{predict.glmnetfit} \title{Get predictions from a \code{glmnetfit} fit object} \usage{ \method{predict}{glmnetfit}( object, newx, s = NULL, type = c("link", "response", "coefficients", "nonzero"), exact = FALSE, newoffset, ... ) } \arguments{ \item{object}{Fitted "glmnetfit" object.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix. This argument is not used for \code{type = c("coefficients","nonzero")}.} \item{s}{Value(s) of the penalty parameter lambda at which predictions are required. Default is the entire sequence used to create the model.} \item{type}{Type of prediction required. Type "link" gives the linear predictors (eta scale); Type "response" gives the fitted values (mu scale). Type "coefficients" computes the coefficients at the requested values for s. Type "nonzero" returns a list of the indices of the nonzero coefficients for each value of s.} \item{exact}{This argument is relevant only when predictions are made at values of \code{s} (lambda) \emph{different} from those used in the fitting of the original model. If \code{exact=FALSE} (default), then the predict function uses linear interpolation to make predictions for values of \code{s} (lambda) that do not coincide with those used in the fitting algorithm. While this is often a good approximation, it can sometimes be a bit coarse. With \code{exact=TRUE}, these different values of \code{s} are merged (and sorted) with \code{object$lambda}, and the model is refit before predictions are made. In this case, it is required to supply the original data x= and y= as additional named arguments to predict() or coef(). The workhorse \code{predict.glmnet()} needs to update the model, and so needs the data used to create it. The same is true of weights, offset, penalty.factor, lower.limits, upper.limits if these were used in the original call. Failure to do so will result in an error.} \item{newoffset}{If an offset is used in the fit, then one must be supplied for making predictions (except for type="coefficients" or type="nonzero").} \item{...}{This is the mechanism for passing arguments like \code{x=} when \code{exact=TRUE}; see \code{exact} argument.} } \value{ The object returned depends on type. } \description{ Gives fitted values, linear predictors, coefficients and number of non-zero coefficients from a fitted \code{glmnetfit} object. } glmnet/man/coxnet.deviance.Rd0000644000175000017500000000464014013330131016002 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxnet.deviance.R \name{coxnet.deviance} \alias{coxnet.deviance} \title{Compute deviance for Cox model} \usage{ coxnet.deviance( pred = NULL, y, x = NULL, offset = NULL, weights = NULL, std.weights = TRUE, beta = NULL ) } \arguments{ \item{pred}{Fit vector or matrix (usually from glmnet at a particular lambda or a sequence of lambdas).} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{x}{Optional \code{x} matrix, to be supplied if \code{pred = NULL}.} \item{offset}{Optional offset vector.} \item{weights}{Observation weights (default is all equal to 1).} \item{std.weights}{If TRUE (default), observation weights are standardized to sum to 1.} \item{beta}{Optional coefficient vector/matrix, to be supplied if \code{pred = NULL}.} } \value{ A vector of deviances, one for each column of predictions. } \description{ Compute the deviance (-2 log partial likelihood) for Cox model. } \details{ Computes the deviance for a single set of predictions, or for a matrix of predictions. The user can either supply the predictions directly through the \code{pred} option, or by supplying the \code{x} matrix and \code{beta} coefficients. Uses the Breslow approach to ties. The function first checks if \code{pred} is passed: if so, it is used as the predictions. If \code{pred} is not passed but \code{x} and \code{beta} are passed, then these values are used to compute the predictions. If neither \code{x} nor \code{beta} are passed, then the predictions are all taken to be 0. \code{coxnet.deviance()} is a wrapper: it calls the appropriate internal routine based on whether the response is right-censored data or (start, stop] survival data. } \examples{ set.seed(1) eta <- rnorm(10) time <- runif(10, min = 1, max = 10) d <- ifelse(rnorm(10) > 0, 1, 0) y <- survival::Surv(time, d) coxnet.deviance(pred = eta, y = y) # if pred not provided, it is set to zero vector coxnet.deviance(y = y) # example with x and beta x <- matrix(rnorm(10 * 3), nrow = 10) beta <- matrix(1:3, ncol = 1) coxnet.deviance(y = y, x = x, beta = beta) # example with (start, stop] data y2 <- survival::Surv(time, time + runif(10), d) coxnet.deviance(pred = eta, y = y2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = 10)) coxnet.deviance(pred = eta, y = y2) } \seealso{ \code{coxgrad} } \keyword{Cox} \keyword{model} glmnet/man/plot.glmnet.Rd0000644000175000017500000000427013775432176015221 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.glmnet.R, R/plot.mrelnet.R, % R/plot.multnet.R, R/plot.relaxed.R \name{plot.glmnet} \alias{plot.glmnet} \alias{plot.multnet} \alias{plot.mrelnet} \alias{plot.relaxed} \title{plot coefficients from a "glmnet" object} \usage{ \method{plot}{glmnet}(x, xvar = c("norm", "lambda", "dev"), label = FALSE, ...) \method{plot}{mrelnet}( x, xvar = c("norm", "lambda", "dev"), label = FALSE, type.coef = c("coef", "2norm"), ... ) \method{plot}{multnet}( x, xvar = c("norm", "lambda", "dev"), label = FALSE, type.coef = c("coef", "2norm"), ... ) \method{plot}{relaxed}(x, xvar = c("lambda", "dev"), label = FALSE, gamma = 1, ...) } \arguments{ \item{x}{fitted \code{"glmnet"} model} \item{xvar}{What is on the X-axis. \code{"norm"} plots against the L1-norm of the coefficients, \code{"lambda"} against the log-lambda sequence, and \code{"dev"} against the percent deviance explained.} \item{label}{If \code{TRUE}, label the curves with variable sequence numbers.} \item{\dots}{Other graphical parameters to plot} \item{type.coef}{If \code{type.coef="2norm"} then a single curve per variable, else if \code{type.coef="coef"}, a coefficient plot per response} \item{gamma}{Value of the mixing parameter for a "relaxed" fit} } \description{ Produces a coefficient profile plot of the coefficient paths for a fitted \code{"glmnet"} object. } \details{ A coefficient profile plot is produced. If \code{x} is a multinomial model, a coefficient plot is produced for each class. } \examples{ x=matrix(rnorm(100*20),100,20) y=rnorm(100) g2=sample(1:2,100,replace=TRUE) g4=sample(1:4,100,replace=TRUE) fit1=glmnet(x,y) plot(fit1) plot(fit1,xvar="lambda",label=TRUE) fit3=glmnet(x,g4,family="multinomial") plot(fit3,pch=19) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet}, and \code{print}, \code{predict} and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/glmnet.fit.Rd0000644000175000017500000001505513775432176015030 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{glmnet.fit} \alias{glmnet.fit} \title{Fit a GLM with elastic net regularization for a single value of lambda} \usage{ glmnet.fit( x, y, weights, lambda, alpha = 1, offset = rep(0, nobs), family = gaussian(), intercept = TRUE, thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.glmnet.path = FALSE, save.fit = FALSE, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. \code{glmnet.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor. Useful for the "poisson" family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is NULL. If supplied, then values must also be supplied to the \code{predict} function.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. Default is \code{gaussian()}. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{warm}{Either a \code{glmnetfit} object or a list (with names \code{beta} and \code{a0} containing coefficients and intercept respectively) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.glmnet.path}{Was \code{glmnet.fit()} called from \code{glmnet.path()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} \item{trace.it}{Controls how much information is printed to screen. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted. Default is \code{trace.it=0} (no information printed). (\code{trace.it=1} not used for compatibility with \code{glmnet.path}.)} } \value{ An object with class "glmnetfit" and "glmnet". The list returned contains more keys than that of a "glmnet" object. \item{a0}{Intercept value.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of FORTRAN routine, used for warm starts. For internal use only.} \item{family}{Family used for the model.} \item{converged}{A logical variable: was the algorithm judged to have converged?} \item{boundary}{A logical variable: is the fitted value on the boundary of the attainable values?} \item{obj_function}{Objective function value at the solution.} } \description{ Fit a generalized linear model via penalized maximum likelihood for a single value of lambda. Can deal with any GLM family. } \details{ WARNING: Users should not call \code{glmnet.fit} directly. Higher-level functions in this package call \code{glmnet.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{glmnet.fit} solves the elastic net problem for a single, user-specified value of lambda. \code{glmnet.fit} works for any GLM family. It solves the problem using iteratively reweighted least squares (IRLS). For each IRLS iteration, \code{glmnet.fit} makes a quadratic (Newton) approximation of the log-likelihood, then calls \code{elnet.fit} to minimize the resulting approximation. In terms of standardization: \code{glmnet.fit} does not standardize \code{x} and \code{weights}. \code{penalty.factor} is standardized so that they sum up to \code{nvars}. } glmnet/man/print.glmnet.Rd0000644000175000017500000000263613775432176015403 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.glmnet.R \name{print.glmnet} \alias{print.glmnet} \alias{print.relaxed} \alias{print.bigGlm} \title{print a glmnet object} \usage{ \method{print}{glmnet}(x, digits = max(3, getOption("digits") - 3), ...) } \arguments{ \item{x}{fitted glmnet object} \item{digits}{significant digits in printout} \item{\dots}{additional print arguments} } \value{ The matrix above is silently returned } \description{ Print a summary of the glmnet path at each step along the path. } \details{ The call that produced the object \code{x} is printed, followed by a three-column matrix with columns \code{Df}, \verb{\%Dev} and \code{Lambda}. The \code{Df} column is the number of nonzero coefficients (Df is a reasonable name only for lasso fits). \verb{\%Dev} is the percent deviance explained (relative to the null deviance). In the case of a 'relaxed' fit, an additional column is inserted, \verb{\%Dev R} which gives the percent deviance explained by the relaxed model. For a "bigGlm" model, a simpler summary is printed. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) print(fit1) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008). Regularization Paths for Generalized Linear Models via Coordinate Descent } \seealso{ \code{glmnet}, \code{predict} and \code{coef} methods. } \keyword{models} \keyword{regression} glmnet/man/BinomialExample.Rd0000644000175000017500000000076014046314073016007 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{BinomialExample} \alias{BinomialExample} \title{Synthetic dataset with binary response} \format{ List containing the following elements: \describe{ \item{x}{100 by 30 matrix of numeric values.} \item{y}{Numeric vector of length 100 containing 44 zeros and 56 ones.} } } \usage{ data(BinomialExample) } \description{ Randomly generated data for binomial regression example. } \keyword{data} glmnet/man/QuickStartExample.Rd0000644000175000017500000000073014046314073016344 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{QuickStartExample} \alias{QuickStartExample} \title{Synthetic dataset with Gaussian response} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values.} \item{y}{Numeric vector of length 100.} } } \usage{ data(QuickStartExample) } \description{ Randomly generated data for Gaussian regression example. } \keyword{data} glmnet/man/SparseExample.Rd0000644000175000017500000000107614046314073015513 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{SparseExample} \alias{SparseExample} \title{Synthetic dataset with sparse design matrix} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values. x is in sparse matrix format, having class "dgCMatrix".} \item{y}{Numeric vector of length 100.} } } \usage{ data(SparseExample) } \description{ Randomly generated data for Gaussian regression example with the design matrix x being in sparse matrix format. } \keyword{data} glmnet/man/fid.Rd0000644000175000017500000000160013775432176013512 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxgrad.R \name{fid} \alias{fid} \title{Helper function for Cox deviance and gradient} \usage{ fid(x, index) } \arguments{ \item{x}{Sorted vector of death times.} \item{index}{Vector of indices for the death times.} } \value{ A list with two arguments. \item{index_first}{A vector of indices for the first observation at each death time as they appear in the sorted list.} \item{index_ties}{If there are no ties at all, this is NULL. If not, this is a list with length equal to the number of unique times with ties. For each time with ties, index_ties gives the indices of the observations with a death at that time.} } \description{ Helps to find ties in death times of data. } \examples{ # Example with no ties glmnet:::fid(c(1, 4, 5, 6), 1:5) # Example with ties glmnet:::fid(c(1, 1, 1, 2, 3, 3, 4, 4, 4), 1:9) } glmnet/man/obj_function.Rd0000644000175000017500000000157113752553007015426 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{obj_function} \alias{obj_function} \title{Elastic net objective function value} \usage{ obj_function(y, mu, weights, family, lambda, alpha, coefficients, vp) } \arguments{ \item{y}{Quantitative response variable.} \item{mu}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{coefficients}{The model's coefficients (excluding intercept).} \item{vp}{Penalty factors for each of the coefficients.} } \description{ Returns the elastic net objective function value. } glmnet/man/glmnet.control.Rd0000644000175000017500000000472613775432176015731 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.control.R \name{glmnet.control} \alias{glmnet.control} \title{internal glmnet parameters} \usage{ glmnet.control( fdev = 1e-05, devmax = 0.999, eps = 1e-06, big = 9.9e+35, mnlam = 5, pmin = 1e-09, exmx = 250, prec = 1e-10, mxit = 100, itrace = 0, epsnr = 1e-06, mxitnr = 25, factory = FALSE ) } \arguments{ \item{fdev}{minimum fractional change in deviance for stopping path; factory default = 1.0e-5} \item{devmax}{maximum fraction of explained deviance for stopping path; factory default = 0.999} \item{eps}{minimum value of lambda.min.ratio (see glmnet); factory default= 1.0e-6} \item{big}{large floating point number; factory default = 9.9e35. Inf in definition of upper.limit is set to big} \item{mnlam}{minimum number of path points (lambda values) allowed; factory default = 5} \item{pmin}{minimum probability for any class. factory default = 1.0e-9. Note that this implies a pmax of 1-pmin.} \item{exmx}{maximum allowed exponent. factory default = 250.0} \item{prec}{convergence threshold for multi response bounds adjustment solution. factory default = 1.0e-10} \item{mxit}{maximum iterations for multiresponse bounds adjustment solution. factory default = 100} \item{itrace}{If 1 then progress bar is displayed when running \code{glmnet} and \code{cv.glmnet}. factory default = 0} \item{epsnr}{convergence threshold for \code{glmnet.fit}. factory default = 1.0e-6} \item{mxitnr}{maximum iterations for the IRLS loop in \code{glmnet.fit}. factory default = 25} \item{factory}{If \code{TRUE}, reset all the parameters to the factory default; default is \code{FALSE}} } \value{ A list with named elements as in the argument list } \description{ View and/or change the factory default parameters in glmnet } \details{ If called with no arguments, \code{glmnet.control()} returns a list with the current settings of these parameters. Any arguments included in the call sets those parameters to the new values, and then silently returns. The values set are persistent for the duration of the R session. } \examples{ glmnet.control(fdev = 0) #continue along path even though not much changes glmnet.control() # view current settings glmnet.control(factory = TRUE) # reset all the parameters to their default } \seealso{ \code{glmnet} } \author{ Jerome Friedman, Kenneth Tay, Trevor Hastie\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/deviance.glmnet.Rd0000644000175000017500000000305513553366407016016 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deviance.glmnet.R \name{deviance.glmnet} \alias{deviance.glmnet} \title{Extract the deviance from a glmnet object} \usage{ \method{deviance}{glmnet}(object, ...) } \arguments{ \item{object}{fitted glmnet object} \item{\dots}{additional print arguments} } \value{ (1-dev.ratio)*nulldev } \description{ Compute the deviance sequence from the glmnet object } \details{ A glmnet object has components \code{dev.ratio} and \code{nulldev}. The former is the fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Null deviance is defined to be 2*(loglike_sat -loglike(Null)); The NULL model refers to the intercept model, except for the Cox, where it is the 0 model. Hence dev.ratio=1-deviance/nulldev, and this \code{deviance} method returns (1-dev.ratio)*nulldev. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) deviance(fit1) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent} } \seealso{ \code{glmnet}, \code{predict}, \code{print}, and \code{coef} methods. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/get_cox_lambda_max.Rd0000644000175000017500000000332613775432176016554 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{get_cox_lambda_max} \alias{get_cox_lambda_max} \title{Get lambda max for Cox regression model} \usage{ get_cox_lambda_max( x, y, alpha, weights = rep(1, nrow(x)), offset = rep(0, nrow(x)), exclude = c(), vp = rep(1, ncol(x)) ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed to be standardized.} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}.} \item{weights}{Observation weights.} \item{offset}{Offset for the model. Default is a zero vector of length \code{nrow(y)}.} \item{exclude}{Indices of variables to be excluded from the model.} \item{vp}{Separate penalty factors can be applied to each coefficient.} } \description{ Return the lambda max value for Cox regression model, used for computing initial lambda values. For internal use only. } \details{ This function is called by \code{cox.path} for the value of lambda max. When \code{x} is not sparse, it is expected to already by centered and scaled. When \code{x} is sparse, the function will get its attributes \code{xm} and \code{xs} for its centering and scaling factors. The value of \code{lambda_max} changes depending on whether \code{x} is centered and scaled or not, so we need \code{xm} and \code{xs} to get the correct value. } glmnet/man/glmnet.path.Rd0000644000175000017500000001476713775432176015213 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{glmnet.path} \alias{glmnet.path} \title{Fit a GLM with elastic net regularization for a path of lambda values} \usage{ glmnet.path( x, y, weights = NULL, lambda = NULL, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), alpha = 1, offset = NULL, family = gaussian(), standardize = TRUE, intercept = TRUE, thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = integer(0), lower.limits = -Inf, upper.limits = Inf, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. Can be a sparse matrix.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. Default is 1 for each observation.} \item{lambda}{A user supplied lambda sequence. Typical usage is to have the program compute its own lambda sequence based on \code{nlambda} and \code{lambda.min.ratio}. Supplying a value of lambda overrides this.} \item{nlambda}{The number of lambda values, default is 100.} \item{lambda.min.ratio}{Smallest value for lambda as a fraction of lambda.max, the (data derived) entry value (i.e. the smallest value for which all coefficients are zero). The default depends on the sample size \code{nobs} relative to the number of variables \code{nvars}. If \code{nobs >= nvars}, the default is 0.0001, close to zero. If \code{nobs < nvars}, the default is 0.01. A very small value of \code{lambda.min.ratio} will lead to a saturated fit in the \code{nobs < nvars} case. This is undefined for some families of models, and the function will exit gracefully when the percentage deviance explained is almost 1.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor. Useful for the "poisson" family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is NULL. If supplied, then values must also be supplied to the \code{predict} function.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function. Default is \code{gaussian()}. (See \code{\link[stats:family]{family}} for details on family functions.)} \item{standardize}{Logical flag for x variable standardization, prior to fitting the model sequence. The coefficients are always returned on the original scale. Default is \code{standardize=TRUE}. If variables are in the same units already, you might not wish to standardize.} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}.} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{trace.it}{Controls how much information is printed to screen. Default is \code{trace.it=0} (no information printed). If \code{trace.it=1}, a progress bar is displayed. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted.} } \value{ An object with class "glmnetfit" and "glmnet". \item{a0}{Intercept sequence of length \code{length(lambda)}.} \item{beta}{A \code{nvars x length(lambda)} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients for each value of lambda.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{The actual sequence of lambda values used. When alpha=0, the largest lambda reported does not quite give the zero coefficients reported (lambda=inf would in principle). Instead, the largest lambda for alpha=0.001 is used, and the sequence of lambda values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data summed over all lambda values.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{family}{Family used for the model.} \item{nobs}{Number of observations.} } \description{ Fit a generalized linear model via penalized maximum likelihood for a path of lambda values. Can deal with any GLM family. } \details{ \code{glmnet.path} solves the elastic net problem for a path of lambda values. It generalizes \code{glmnet::glmnet} in that it works for any GLM family. Sometimes the sequence is truncated before \code{nlambda} values of lambda have been used. This happens when \code{glmnet.path} detects that the decrease in deviance is marginal (i.e. we are near a saturated fit). } \examples{ set.seed(1) x <- matrix(rnorm(100 * 20), nrow = 100) y <- ifelse(rnorm(100) > 0, 1, 0) # binomial with probit link fit1 <- glmnet:::glmnet.path(x, y, family = binomial(link = "probit")) } glmnet/man/beta_CVX.Rd0000644000175000017500000000107714046050560014374 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet-package.R \name{beta_CVX} \alias{beta_CVX} \alias{x} \alias{y} \title{Simulated data for the glmnet vignette} \format{ Data objects used to demonstrate features in the glmnet vignette } \description{ Simple simulated data, used to demonstrate the features of glmnet } \details{ These datasets are artificial, and are used to test out some of the features of glmnet. } \examples{ data(QuickStartExample) x <- QuickStartExample$x; y <- QuickStartExample$y glmnet(x, y) } \keyword{datasets} glmnet/man/elnet.fit.Rd0000644000175000017500000001272113775432176014646 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{elnet.fit} \alias{elnet.fit} \title{Solve weighted least squares (WLS) problem for a single lambda value} \usage{ elnet.fit( x, y, weights, lambda, alpha = 1, intercept = TRUE, thresh = 1e-07, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.glmnet.fit = FALSE, save.fit = FALSE ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Quantitative response variable.} \item{weights}{Observation weights. \code{elnet.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0 \le \alpha \le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{intercept}{Should intercept be fitted (default=TRUE) or set to zero (FALSE)?} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-7}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to \code{nvars}.} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}.} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}.} \item{warm}{Either a \code{glmnetfit} object or a list (with names \code{beta} and \code{a0} containing coefficients and intercept respectively) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.glmnet.fit}{Was \code{elnet.fit()} called from \code{glmnet.fit()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} } \value{ An object with class "glmnetfit" and "glmnet". The list returned has the same keys as that of a \code{glmnet} object, except that it might have an additional \code{warm_fit} key. \item{a0}{Intercept value.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the intercept model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{Always FALSE, since offsets do not appear in the WLS problem. Included for compability with glmnet output.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of FORTRAN routine, used for warm starts. For internal use only.} } \description{ Solves the weighted least squares (WLS) problem for a single lambda value. Internal function that users should not call directly. } \details{ WARNING: Users should not call \code{elnet.fit} directly. Higher-level functions in this package call \code{elnet.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{elnet.fit} is essentially a wrapper around a FORTRAN subroutine which minimizes \deqn{1/2 \sum w_i (y_i - X_i^T \beta)^2 + \sum \lambda \gamma_j [(1-\alpha)/2 \beta^2+\alpha|\beta|],} over \eqn{\beta}, where \eqn{\gamma_j} is the relative penalty factor on the jth variable. If \code{intercept = TRUE}, then the term in the first sum is \eqn{w_i (y_i - \beta_0 - X_i^T \beta)^2}, and we are minimizing over both \eqn{\beta_0} and \eqn{\beta}. None of the inputs are standardized except for \code{penalty.factor}, which is standardized so that they sum up to \code{nvars}. } glmnet/man/na.replace.Rd0000644000175000017500000000304013553366407014755 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/makeX.R \name{na.replace} \alias{na.replace} \title{Replace the missing entries in a matrix columnwise with the entries in a supplied vector} \usage{ na.replace(x, m = rowSums(x, na.rm = TRUE)) } \arguments{ \item{x}{A matrix with potentially missing values, and also potentially in sparse matrix format (i.e. inherits from "sparseMatrix")} \item{m}{Optional argument. A vector of values used to replace the missing entries, columnwise. If missing, the column means of 'x' are used} } \value{ A version of 'x' is returned with the missing values replaced. } \description{ Missing entries in any given column of the matrix are replaced by the column means or the values in a supplied vector. } \details{ This is a simple imputation scheme. This function is called by \code{makeX} if the \code{na.impute=TRUE} option is used, but of course can be used on its own. If 'x' is sparse, the result is sparse, and the replacements are done so as to maintain sparsity. } \examples{ set.seed(101) ### Single data frame X = matrix(rnorm(20), 10, 2) X[3, 1] = NA X[5, 2] = NA X3 = sample(letters[1:3], 10, replace = TRUE) X3[6] = NA X4 = sample(LETTERS[1:3], 10, replace = TRUE) X4[9] = NA dfn = data.frame(X, X3, X4) x = makeX(dfn) m = rowSums(x, na.rm = TRUE) na.replace(x, m) x = makeX(dfn, sparse = TRUE) na.replace(x, m) } \seealso{ \code{makeX} and \code{glmnet} } \author{ Trevor Hastie\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} glmnet/man/glmnet.Rd0000644000175000017500000005033714046050560014232 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet.R, R/relax.glmnet.R \name{glmnet} \alias{glmnet} \alias{relax.glmnet} \title{fit a GLM with lasso or elasticnet regularization} \usage{ glmnet( x, y, family = c("gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian"), weights = NULL, offset = NULL, alpha = 1, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), lambda = NULL, standardize = TRUE, intercept = TRUE, thresh = 1e-07, dfmax = nvars + 1, pmax = min(dfmax * 2 + 20, nvars), exclude = NULL, penalty.factor = rep(1, nvars), lower.limits = -Inf, upper.limits = Inf, maxit = 1e+05, type.gaussian = ifelse(nvars < 500, "covariance", "naive"), type.logistic = c("Newton", "modified.Newton"), standardize.response = FALSE, type.multinomial = c("ungrouped", "grouped"), relax = FALSE, trace.it = 0, ... ) relax.glmnet(fit, x, ..., maxp = n - 3, path = FALSE, check.args = TRUE) } \arguments{ \item{x}{input matrix, of dimension nobs x nvars; each row is an observation vector. Can be in sparse matrix format (inherit from class \code{"sparseMatrix"} as in package \code{Matrix})} \item{y}{response variable. Quantitative for \code{family="gaussian"}, or \code{family="poisson"} (non-negative counts). For \code{family="binomial"} should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For \code{family="multinomial"}, can be a \code{nc>=2} level factor, or a matrix with \code{nc} columns of counts or proportions. For either \code{"binomial"} or \code{"multinomial"}, if \code{y} is presented as a vector, it will be coerced into a factor. For \code{family="cox"}, preferably a \code{Surv} object from the survival package: see Details section for more information. For \code{family="mgaussian"}, \code{y} is a matrix of quantitative responses.} \item{family}{Either a character string representing one of the built-in families, or else a \code{glm()} family object. For more information, see Details section below or the documentation for response type (above).} \item{weights}{observation weights. Can be total counts if responses are proportion matrices. Default is 1 for each observation} \item{offset}{A vector of length \code{nobs} that is included in the linear predictor (a \code{nobs x nc} matrix for the \code{"multinomial"} family). Useful for the \code{"poisson"} family (e.g. log of exposure time), or for refining a model by starting at a current fit. Default is \code{NULL}. If supplied, then values must also be supplied to the \code{predict} function.} \item{alpha}{The elasticnet mixing parameter, with \eqn{0\le\alpha\le 1}. The penalty is defined as \deqn{(1-\alpha)/2||\beta||_2^2+\alpha||\beta||_1.} \code{alpha=1} is the lasso penalty, and \code{alpha=0} the ridge penalty.} \item{nlambda}{The number of \code{lambda} values - default is 100.} \item{lambda.min.ratio}{Smallest value for \code{lambda}, as a fraction of \code{lambda.max}, the (data derived) entry value (i.e. the smallest value for which all coefficients are zero). The default depends on the sample size \code{nobs} relative to the number of variables \code{nvars}. If \code{nobs > nvars}, the default is \code{0.0001}, close to zero. If \code{nobs < nvars}, the default is \code{0.01}. A very small value of \code{lambda.min.ratio} will lead to a saturated fit in the \code{nobs < nvars} case. This is undefined for \code{"binomial"} and \code{"multinomial"} models, and \code{glmnet} will exit gracefully when the percentage deviance explained is almost 1.} \item{lambda}{A user supplied \code{lambda} sequence. Typical usage is to have the program compute its own \code{lambda} sequence based on \code{nlambda} and \code{lambda.min.ratio}. Supplying a value of \code{lambda} overrides this. WARNING: use with care. Avoid supplying a single value for \code{lambda} (for predictions after CV use \code{predict()} instead). Supply instead a decreasing sequence of \code{lambda} values. \code{glmnet} relies on its warms starts for speed, and its often faster to fit a whole path than compute a single fit.} \item{standardize}{Logical flag for x variable standardization, prior to fitting the model sequence. The coefficients are always returned on the original scale. Default is \code{standardize=TRUE}. If variables are in the same units already, you might not wish to standardize. See details below for y standardization with \code{family="gaussian"}.} \item{intercept}{Should intercept(s) be fitted (default=TRUE) or set to zero (FALSE)} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than \code{thresh} times the null deviance. Defaults value is \code{1E-7}.} \item{dfmax}{Limit the maximum number of variables in the model. Useful for very large \code{nvars}, if a partial path is desired.} \item{pmax}{Limit the maximum number of variables ever to be nonzero} \item{exclude}{Indices of variables to be excluded from the model. Default is none. Equivalent to an infinite penalty factor for the variables excluded (next item). Users can supply instead an \code{exclude} function that generates the list of indices. This function is most generally defined as \code{function(x, y, weights, ...)}, and is called inside \code{glmnet} to generate the indices for excluded variables. The \code{...} argument is required, the others are optional. This is useful for filtering wide data, and works correctly with \code{cv.glmnet}. See the vignette 'Introduction' for examples.} \item{penalty.factor}{Separate penalty factors can be applied to each coefficient. This is a number that multiplies \code{lambda} to allow differential shrinkage. Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in \code{exclude}). Note: the penalty factors are internally rescaled to sum to nvars, and the lambda sequence will reflect this change.} \item{lower.limits}{Vector of lower limits for each coefficient; default \code{-Inf}. Each of these must be non-positive. Can be presented as a single value (which will then be replicated), else a vector of length \code{nvars}} \item{upper.limits}{Vector of upper limits for each coefficient; default \code{Inf}. See \code{lower.limits}} \item{maxit}{Maximum number of passes over the data for all lambda values; default is 10^5.} \item{type.gaussian}{Two algorithm types are supported for (only) \code{family="gaussian"}. The default when \code{nvar<500} is \code{type.gaussian="covariance"}, and saves all inner-products ever computed. This can be much faster than \code{type.gaussian="naive"}, which loops through \code{nobs} every time an inner-product is computed. The latter can be far more efficient for \code{nvar >> nobs} situations, or when \code{nvar > 500}.} \item{type.logistic}{If \code{"Newton"} then the exact hessian is used (default), while \code{"modified.Newton"} uses an upper-bound on the hessian, and can be faster.} \item{standardize.response}{This is for the \code{family="mgaussian"} family, and allows the user to standardize the response variables} \item{type.multinomial}{If \code{"grouped"} then a grouped lasso penalty is used on the multinomial coefficients for a variable. This ensures they are all in our out together. The default is \code{"ungrouped"}} \item{relax}{If \code{TRUE} then for each \emph{active set} in the path of solutions, the model is refit without any regularization. See \code{details} for more information. This argument is new, and users may experience convergence issues with small datasets, especially with non-gaussian families. Limiting the value of 'maxp' can alleviate these issues in some cases.} \item{trace.it}{If \code{trace.it=1}, then a progress bar is displayed; useful for big models that take a long time to fit.} \item{...}{Additional argument used in \code{relax.glmnet}. These include some of the original arguments to 'glmnet', and each must be named if used.} \item{fit}{For \code{relax.glmnet} a fitted 'glmnet' object} \item{maxp}{a limit on how many relaxed coefficients are allowed. Default is 'n-3', where 'n' is the sample size. This may not be sufficient for non-gaussian familes, in which case users should supply a smaller value. This argument can be supplied directly to 'glmnet'.} \item{path}{Since \code{glmnet} does not do stepsize optimization, the Newton algorithm can get stuck and not converge, especially with relaxed fits. With \code{path=TRUE}, each relaxed fit on a particular set of variables is computed pathwise using the original sequence of lambda values (with a zero attached to the end). Not needed for Gaussian models, and should not be used unless needed, since will lead to longer compute times. Default is \code{path=FALSE}. appropriate subset of variables} \item{check.args}{Should \code{relax.glmnet} make sure that all the data dependent arguments used in creating 'fit' have been resupplied. Default is 'TRUE'.} } \value{ An object with S3 class \code{"glmnet","*" }, where \code{"*"} is \code{"elnet"}, \code{"lognet"}, \code{"multnet"}, \code{"fishnet"} (poisson), \code{"coxnet"} or \code{"mrelnet"} for the various types of models. If the model was created with \code{relax=TRUE} then this class has a prefix class of \code{"relaxed"}. \item{call}{the call that produced this object} \item{a0}{Intercept sequence of length \code{length(lambda)}} \item{beta}{For \code{"elnet"}, \code{"lognet"}, \code{"fishnet"} and \code{"coxnet"} models, a \code{nvars x length(lambda)} matrix of coefficients, stored in sparse column format (\code{"CsparseMatrix"}). For \code{"multnet"} and \code{"mgaussian"}, a list of \code{nc} such matrices, one for each class.} \item{lambda}{The actual sequence of \code{lambda} values used. When \code{alpha=0}, the largest lambda reported does not quite give the zero coefficients reported (\code{lambda=inf} would in principle). Instead, the largest \code{lambda} for \code{alpha=0.001} is used, and the sequence of \code{lambda} values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained (for \code{"elnet"}, this is the R-square). The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)); The NULL model refers to the intercept model, except for the Cox, where it is the 0 model.} \item{df}{The number of nonzero coefficients for each value of \code{lambda}. For \code{"multnet"}, this is the number of variables with a nonzero coefficient for \emph{any} class.} \item{dfmat}{For \code{"multnet"} and \code{"mrelnet"} only. A matrix consisting of the number of nonzero coefficients per class} \item{dim}{dimension of coefficient matrix (ices)} \item{nobs}{number of observations} \item{npasses}{total passes over the data summed over all lambda values} \item{offset}{a logical variable indicating whether an offset was included in the model} \item{jerr}{error flag, for warnings and errors (largely for internal debugging).} \item{relaxed}{If \code{relax=TRUE}, this additional item is another glmnet object with different values for \code{beta} and \code{dev.ratio}} } \description{ Fit a generalized linear model via penalized maximum likelihood. The regularization path is computed for the lasso or elasticnet penalty at a grid of values for the regularization parameter lambda. Can deal with all shapes of data, including very large sparse data matrices. Fits linear, logistic and multinomial, poisson, and Cox regression models. } \details{ The sequence of models implied by \code{lambda} is fit by coordinate descent. For \code{family="gaussian"} this is the lasso sequence if \code{alpha=1}, else it is the elasticnet sequence. The objective function for \code{"gaussian"} is \deqn{1/2 RSS/nobs + \lambda*penalty,} and for the other models it is \deqn{-loglik/nobs + \lambda*penalty.} Note also that for \code{"gaussian"}, \code{glmnet} standardizes y to have unit variance (using 1/n rather than 1/(n-1) formula) before computing its lambda sequence (and then unstandardizes the resulting coefficients); if you wish to reproduce/compare results with other software, best to supply a standardized y. The coefficients for any predictor variables with zero variance are set to zero for all values of lambda. \subsection{Details on \code{family} option}{ From version 4.0 onwards, glmnet supports both the original built-in families, as well as \emph{any} family object as used by \code{stats:glm()}. This opens the door to a wide variety of additional models. For example \code{family=binomial(link=cloglog)} or \code{family=negative.binomial(theta=1.5)} (from the MASS library). Note that the code runs faster for the built-in families. The built in families are specifed via a character string. For all families, the object produced is a lasso or elasticnet regularization path for fitting the generalized linear regression paths, by maximizing the appropriate penalized log-likelihood (partial likelihood for the "cox" model). Sometimes the sequence is truncated before \code{nlambda} values of \code{lambda} have been used, because of instabilities in the inverse link functions near a saturated fit. \code{glmnet(...,family="binomial")} fits a traditional logistic regression model for the log-odds. \code{glmnet(...,family="multinomial")} fits a symmetric multinomial model, where each class is represented by a linear model (on the log-scale). The penalties take care of redundancies. A two-class \code{"multinomial"} model will produce the same fit as the corresponding \code{"binomial"} model, except the pair of coefficient matrices will be equal in magnitude and opposite in sign, and half the \code{"binomial"} values. Two useful additional families are the \code{family="mgaussian"} family and the \code{type.multinomial="grouped"} option for multinomial fitting. The former allows a multi-response gaussian model to be fit, using a "group -lasso" penalty on the coefficients for each variable. Tying the responses together like this is called "multi-task" learning in some domains. The grouped multinomial allows the same penalty for the \code{family="multinomial"} model, which is also multi-responsed. For both of these the penalty on the coefficient vector for variable j is \deqn{(1-\alpha)/2||\beta_j||_2^2+\alpha||\beta_j||_2.} When \code{alpha=1} this is a group-lasso penalty, and otherwise it mixes with quadratic just like elasticnet. A small detail in the Cox model: if death times are tied with censored times, we assume the censored times occurred just \emph{before} the death times in computing the Breslow approximation; if users prefer the usual convention of \emph{after}, they can add a small number to all censoring times to achieve this effect. } \subsection{Details on response for \code{family="cox"}}{ For Cox models, the response should preferably be a \code{Surv} object, created by the \code{Surv()} function in \pkg{survival} package. For right-censored data, this object should have type "right", and for (start, stop] data, it should have type "counting". To fit stratified Cox models, strata should be added to the response via the \code{stratifySurv()} function before passing the response to \code{glmnet()}. (For backward compatibility, right-censored data can also be passed as a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored.) } \subsection{Details on \code{relax} option}{ If \code{relax=TRUE} a duplicate sequence of models is produced, where each active set in the elastic-net path is refit without regularization. The result of this is a matching \code{"glmnet"} object which is stored on the original object in a component named \code{"relaxed"}, and is part of the glmnet output. Generally users will not call \code{relax.glmnet} directly, unless the original 'glmnet' object took a long time to fit. But if they do, they must supply the fit, and all the original arguments used to create that fit. They can limit the length of the relaxed path via 'maxp'. } } \examples{ # Gaussian x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) fit1 = glmnet(x, y) print(fit1) coef(fit1, s = 0.01) # extract coefficients at a single value of lambda predict(fit1, newx = x[1:10, ], s = c(0.01, 0.005)) # make predictions # Relaxed fit1r = glmnet(x, y, relax = TRUE) # can be used with any model # multivariate gaussian y = matrix(rnorm(100 * 3), 100, 3) fit1m = glmnet(x, y, family = "mgaussian") plot(fit1m, type.coef = "2norm") # binomial g2 = sample(c(0,1), 100, replace = TRUE) fit2 = glmnet(x, g2, family = "binomial") fit2n = glmnet(x, g2, family = binomial(link=cloglog)) fit2r = glmnet(x,g2, family = "binomial", relax=TRUE) fit2rp = glmnet(x,g2, family = "binomial", relax=TRUE, path=TRUE) # multinomial g4 = sample(1:4, 100, replace = TRUE) fit3 = glmnet(x, g4, family = "multinomial") fit3a = glmnet(x, g4, family = "multinomial", type.multinomial = "grouped") # poisson N = 500 p = 20 nzc = 5 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) f = x[, seq(nzc)] \%*\% beta mu = exp(f) y = rpois(N, mu) fit = glmnet(x, y, family = "poisson") plot(fit) pfit = predict(fit, x, s = 0.001, type = "response") plot(pfit, y) # Cox set.seed(10101) N = 1000 p = 30 nzc = p/3 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(N, hx) tcens = rbinom(n = N, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) fit = glmnet(x, y, family = "cox") plot(fit) # Cox example with (start, stop] data set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) start_time <- runif(100, min = 0, max = 5) stop_time <- start_time + runif(100, min = 0.1, max = 3) status <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv_ss <- survival::Surv(start_time, stop_time, status) fit <- glmnet(x, jsurv_ss, family = "cox") # Cox example with strata jsurv_ss2 <- stratifySurv(jsurv_ss, rep(1:2, each = 50)) fit <- glmnet(x, jsurv_ss2, family = "cox") # Sparse n = 10000 p = 200 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) iz = sample(1:(n * p), size = n * p * 0.85, replace = FALSE) x[iz] = 0 sx = Matrix(x, sparse = TRUE) inherits(sx, "sparseMatrix") #confirm that it is sparse beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta eps = rnorm(n) y = fx + eps px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) system.time(fit1 <- glmnet(sx, y)) system.time(fit2n <- glmnet(x, y)) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent (2010), Journal of Statistical Software, Vol. 33(1), 1-22}, \url{https://web.stanford.edu/~hastie/Papers/glmnet.pdf}.\cr Simon, N., Friedman, J., Hastie, T. and Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5), 1-13}, \url{https://www.jstatsoft.org/v39/i05/}.\cr Tibshirani, Robert, Bien, J., Friedman, J., Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2012) \emph{Strong Rules for Discarding Predictors in Lasso-type Problems, JRSSB, Vol. 74(2), 245-266}, \url{https://statweb.stanford.edu/~tibs/ftp/strong.pdf}.\cr Hastie, T., Tibshirani, Robert and Tibshirani, Ryan. \emph{Extended Comparisons of Best Subset Selection, Forward Stepwise Selection, and the Lasso (2017), Stanford Statistics Technical Report}, \url{https://arxiv.org/abs/1707.08692}.\cr Glmnet webpage with four vignettes, \url{https://glmnet.stanford.edu}. } \seealso{ \code{print}, \code{predict}, \code{coef} and \code{plot} methods, and the \code{cv.glmnet} function. } \author{ Jerome Friedman, Trevor Hastie, Balasubramanian Narasimhan, Noah Simon, Kenneth Tay and Rob Tibshirani\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/glmnet-internal.Rd0000644000175000017500000000353313775432176016057 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet-package.R \name{glmnet-internal} \alias{glmnet-internal} \alias{auc} \alias{assess.coxnet} \alias{auc.mat} \alias{cvtype} \alias{cvstats} \alias{cvcompute} \alias{getcoef} \alias{getcoef.multinomial} \alias{fix.lam} \alias{error.bars} \alias{getmin} \alias{elnet} \alias{mrelnet} \alias{lognet} \alias{fishnet} \alias{coefnorm} \alias{coxnet} \alias{cv.lognet} \alias{cv.elnet} \alias{cv.multnet} \alias{cv.mrelnet} \alias{cv.coxnet} \alias{cv.fishnet} \alias{cv.glmnet.raw} \alias{cv.relaxed.raw} \alias{blend.relaxed} \alias{checkgamma.relax} \alias{buildPredmat} \alias{buildPredmat.mrelnetlist} \alias{buildPredmat.multnetlist} \alias{buildPredmat.lognetlist} \alias{buildPredmat.array} \alias{buildPredmat.coxnetlist} \alias{buildPredmat.default} \alias{lambda.interp} \alias{nonzeroCoef} \alias{glmnet_softmax} \alias{getOptcv.glmnet} \alias{getOptcv.relaxed} \alias{jerr} \alias{jerr.elnet} \alias{jerr.lognet} \alias{jerr.fishnet} \alias{jerr.coxnet} \alias{jerr.mrelnet} \alias{plotCoef} \alias{zeromat} \alias{na.mean} \alias{check_dots} \alias{na_sparse_fix} \alias{prepareX} \title{Internal glmnet functions} \description{ These are not intended for use by users. \code{lambda.interp} does linear interpolation of the lambdas to obtain a prediction at a new point s. \code{glmnet_softmax} does the classification for multinomial models. \code{nonzeroCoef} determines in an efficient manner which variables are nonzero in each fit. \code{jerr} prints out error messages from the fortran. \code{plotCoef} is called by the \code{plot} method for \code{glmnet} objects. \code{check_dots} is used in \code{coef} and \code{predict} with argument \code{exact=TRUE}, to make sure user supplies original data used to fit the \code{"glmnet"} object. } \author{ Trevor Hastie } \keyword{internal} glmnet/man/rmult.Rd0000644000175000017500000000117613554367224014117 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rmult.R \name{rmult} \alias{rmult} \title{Generate multinomial samples from a probability matrix} \usage{ rmult(p) } \arguments{ \item{p}{matrix of probabilities, with number of columns the number of classes} } \value{ a vector of class memberships } \description{ Generate multinomial samples } \details{ Simple function that calls the \code{rmultinom} function. It generates a class label for each row of its input matrix of class probabilities. } \author{ Trevor Hastie \cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } glmnet/man/Cindex.Rd0000644000175000017500000000276113752553007014163 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Cindex.R \name{Cindex} \alias{Cindex} \title{compute C index for a Cox model} \usage{ Cindex(pred, y, weights = rep(1, nrow(y))) } \arguments{ \item{pred}{Predictions from a \code{"coxnet"} object} \item{y}{a survival response object - a matrix with two columns "time" and "status"; see documentation for "glmnet"} \item{weights}{optional observation weights} } \description{ Computes Harrel's C index for predictions from a \code{"coxnet"} object. } \details{ Computes the concordance index, taking into account censoring. } \examples{ set.seed(10101) N = 1000 p = 30 nzc = p/3 x = matrix(rnorm(N * p), N, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(N, hx) tcens = rbinom(n = N, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) fit = glmnet(x, y, family = "cox") pred = predict(fit, newx = x) apply(pred, 2, Cindex, y=y) cv.glmnet(x, y, family = "cox", type.measure = "C") } \references{ Harrel Jr, F. E. and Lee, K. L. and Mark, D. B. (1996) \emph{Tutorial in biostatistics: multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing error}, Statistics in Medicine, 15, pages 361--387. } \seealso{ \code{cv.glmnet} } \author{ Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{Cox} \keyword{cross-validation} \keyword{models} glmnet/man/glmnet-package.Rd0000644000175000017500000000501513752553007015623 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet-package.R \docType{package} \name{glmnet-package} \alias{glmnet-package} \title{Elastic net model paths for some generalized linear models} \description{ This package fits lasso and elastic-net model paths for regression, logistic and multinomial regression using coordinate descent. The algorithm is extremely fast, and exploits sparsity in the input x matrix where it exists. A variety of predictions can be made from the fitted models. } \details{ \tabular{ll}{ Package: \tab glmnet\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2008-05-14\cr License: \tab What license is it under?\cr } Very simple to use. Accepts \code{x,y} data for regression models, and produces the regularization path over a grid of values for the tuning parameter \code{lambda}. Only 5 functions: \code{glmnet}\cr \code{predict.glmnet}\cr \code{plot.glmnet}\cr \code{print.glmnet}\cr \code{coef.glmnet} } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) g2 = sample(1:2, 100, replace = TRUE) g4 = sample(1:4, 100, replace = TRUE) fit1 = glmnet(x, y) predict(fit1, newx = x[1:5, ], s = c(0.01, 0.005)) predict(fit1, type = "coef") plot(fit1, xvar = "lambda") fit2 = glmnet(x, g2, family = "binomial") predict(fit2, type = "response", newx = x[2:5, ]) predict(fit2, type = "nonzero") fit3 = glmnet(x, g4, family = "multinomial") predict(fit3, newx = x[1:3, ], type = "response", s = 0.01) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent}, \url{https://web.stanford.edu/~hastie/Papers/glmnet.pdf}\cr \emph{Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010}\cr \url{https://www.jstatsoft.org/v33/i01/}\cr Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5) 1-13}\cr \url{https://www.jstatsoft.org/v39/i05/}\cr Tibshirani, Robert., Bien, J., Friedman, J.,Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2012) \emph{Strong Rules for Discarding Predictors in Lasso-type Problems, JRSSB, vol 74},\cr \url{https://statweb.stanford.edu/~tibs/ftp/strong.pdf}\cr \emph{Glmnet webpage with four vignettes} \url{https://glmnet.stanford.edu} } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{package} \keyword{regression} glmnet/man/cox.fit.Rd0000644000175000017500000001215313775432176014327 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox.fit} \alias{cox.fit} \title{Fit a Cox regression model with elastic net regularization for a single value of lambda} \usage{ cox.fit( x, y, weights, lambda, alpha = 1, offset = rep(0, nobs), thresh = 1e-10, maxit = 1e+05, penalty.factor = rep(1, nvars), exclude = c(), lower.limits = -Inf, upper.limits = Inf, warm = NULL, from.cox.path = FALSE, save.fit = FALSE, trace.it = 0 ) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed that any standardization needed has already been done.} \item{y}{Survival response variable, must be a Surv or stratifySurv object.} \item{weights}{Observation weights. \code{cox.fit} does NOT standardize these weights.} \item{lambda}{A single value for the \code{lambda} hyperparameter.} \item{alpha}{See glmnet help file} \item{offset}{See glmnet help file} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{maxit}{Maximum number of passes over the data; default is \code{10^5}. (If a warm start object is provided, the number of passes the warm start object performed is included.)} \item{penalty.factor}{See glmnet help file} \item{exclude}{See glmnet help file} \item{lower.limits}{See glmnet help file} \item{upper.limits}{See glmnet help file} \item{warm}{Either a \code{glmnetfit} object or a list (with name \code{beta} containing coefficients) which can be used as a warm start. Default is \code{NULL}, indicating no warm start. For internal use only.} \item{from.cox.path}{Was \code{cox.fit()} called from \code{cox.path()}? Default is FALSE.This has implications for computation of the penalty factors.} \item{save.fit}{Return the warm start object? Default is FALSE.} \item{trace.it}{Controls how much information is printed to screen. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted. Default is \code{trace.it=0} (no information printed). (\code{trace.it=1} not used for compatibility with \code{glmnet.path}.)} } \value{ An object with class "coxnet", "glmnetfit" and "glmnet". The list returned contains more keys than that of a "glmnet" object. \item{a0}{Intercept value, \code{NULL} for "cox" family.} \item{beta}{A \code{nvars x 1} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{Lambda value used.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the 0 model.} \item{npasses}{Total passes over the data.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} \item{warm_fit}{If \code{save.fit=TRUE}, output of FORTRAN routine, used for warm starts. For internal use only.} \item{family}{Family used for the model, always "cox".} \item{converged}{A logical variable: was the algorithm judged to have converged?} \item{boundary}{A logical variable: is the fitted value on the boundary of the attainable values?} \item{obj_function}{Objective function value at the solution.} } \description{ Fit a Cox regression model via penalized maximum likelihood for a single value of lambda. Can deal with (start, stop] data and strata, as well as sparse design matrices. } \details{ WARNING: Users should not call \code{cox.fit} directly. Higher-level functions in this package call \code{cox.fit} as a subroutine. If a warm start object is provided, some of the other arguments in the function may be overriden. \code{cox.fit} solves the elastic net problem for a single, user-specified value of lambda. \code{cox.fit} works for Cox regression models, including (start, stop] data and strata. It solves the problem using iteratively reweighted least squares (IRLS). For each IRLS iteration, \code{cox.fit} makes a quadratic (Newton) approximation of the log-likelihood, then calls \code{elnet.fit} to minimize the resulting approximation. In terms of standardization: \code{cox.fit} does not standardize \code{x} and \code{weights}. \code{penalty.factor} is standardized so that they sum up to \code{nvars}. } glmnet/man/weighted_mean_sd.Rd0000644000175000017500000000120114013330131016201 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{weighted_mean_sd} \alias{weighted_mean_sd} \title{Helper function to compute weighted mean and standard deviation} \usage{ weighted_mean_sd(x, weights = rep(1, nrow(x))) } \arguments{ \item{x}{Observation matrix.} \item{weights}{Optional weight vector.} } \value{ A list with components. \item{mean}{vector of weighted means of columns of x} \item{sd}{vector of weighted standard deviations of columns of x} } \description{ Helper function to compute weighted mean and standard deviation. Deals gracefully whether x is sparse matrix or not. } glmnet/man/get_eta.Rd0000644000175000017500000000151513752553007014355 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{get_eta} \alias{get_eta} \title{Helper function to get etas (linear predictions)} \usage{ get_eta(x, beta, a0) } \arguments{ \item{x}{Input matrix, of dimension \code{nobs x nvars}; each row is an observation vector. If it is a sparse matrix, it is assumed to be unstandardized. It should have attributes \code{xm} and \code{xs}, where \code{xm(j)} and \code{xs(j)} are the centering and scaling factors for variable j respsectively. If it is not a sparse matrix, it is assumed to be standardized.} \item{beta}{Feature coefficients.} \item{a0}{Intercept.} } \description{ Given x, coefficients and intercept, return linear predictions. Wrapper that works with both regular and sparse x. Only works for single set of coefficients and intercept. } glmnet/man/PoissonExample.Rd0000644000175000017500000000075714046314073015715 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{PoissonExample} \alias{PoissonExample} \title{Synthetic dataset with count response} \format{ List containing the following elements: \describe{ \item{x}{500 by 20 matrix of numeric values.} \item{y}{Numeric vector of length 500 consisting of non-negative integers.} } } \usage{ data(PoissonExample) } \description{ Randomly generated data for Poisson regression example. } \keyword{data} glmnet/man/predict.cv.glmnet.Rd0000644000175000017500000000557313775432176016313 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.cv.glmnet.R, R/predict.cv.relaxed.R \name{predict.cv.glmnet} \alias{predict.cv.glmnet} \alias{coef.cv.glmnet} \alias{coef.cv.relaxed} \alias{predict.cv.relaxed} \title{make predictions from a "cv.glmnet" object.} \usage{ \method{predict}{cv.glmnet}(object, newx, s = c("lambda.1se", "lambda.min"), ...) \method{predict}{cv.relaxed}( object, newx, s = c("lambda.1se", "lambda.min"), gamma = c("gamma.1se", "gamma.min"), ... ) } \arguments{ \item{object}{Fitted \code{"cv.glmnet"} or \code{"cv.relaxed"} object.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix; can be sparse as in \code{Matrix} package. See documentation for \code{predict.glmnet}.} \item{s}{Value(s) of the penalty parameter \code{lambda} at which predictions are required. Default is the value \code{s="lambda.1se"} stored on the CV \code{object}. Alternatively \code{s="lambda.min"} can be used. If \code{s} is numeric, it is taken as the value(s) of \code{lambda} to be used. (For historical reasons we use the symbol 's' rather than 'lambda' to reference this parameter)} \item{\dots}{Not used. Other arguments to predict.} \item{gamma}{Value (single) of 'gamma' at which predictions are to be made} } \value{ The object returned depends on the \dots{} argument which is passed on to the \code{predict} method for \code{glmnet} objects. } \description{ This function makes predictions from a cross-validated glmnet model, using the stored \code{"glmnet.fit"} object, and the optimal value chosen for \code{lambda} (and \code{gamma} for a 'relaxed' fit. } \details{ This function makes it easier to use the results of cross-validation to make a prediction. } \examples{ x = matrix(rnorm(100 * 20), 100, 20) y = rnorm(100) cv.fit = cv.glmnet(x, y) predict(cv.fit, newx = x[1:5, ]) coef(cv.fit) coef(cv.fit, s = "lambda.min") predict(cv.fit, newx = x[1:5, ], s = c(0.001, 0.002)) cv.fitr = cv.glmnet(x, y, relax = TRUE) predict(cv.fit, newx = x[1:5, ]) coef(cv.fit) coef(cv.fit, s = "lambda.min", gamma = "gamma.min") predict(cv.fit, newx = x[1:5, ], s = c(0.001, 0.002), gamma = "gamma.min") } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent, Journal of Statistical Software, Vol. 33, Issue 1, Feb 2010}\cr \url{https://www.jstatsoft.org/v33/i01/} \url{https://arxiv.org/abs/1707.08692}\cr Hastie, T., Tibshirani, Robert, Tibshirani, Ryan (2019) \emph{Extended Comparisons of Best Subset Selection, Forward Stepwise Selection, and the Lasso} } \seealso{ \code{glmnet}, and \code{print}, and \code{coef} methods, and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/CoxExample.Rd0000644000175000017500000000121414046314073015001 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{CoxExample} \alias{CoxExample} \title{Synthetic dataset with right-censored survival response} \format{ List containing the following elements: \describe{ \item{x}{1,000 by 30 matrix of numeric values.} \item{y}{1,000 by 2 matrix with column names "time" and "status". The first column consists of positive numbers representing time to event, while the second column represents the status indicator (0=right-censored, 1=observed).} } } \usage{ data(CoxExample) } \description{ Randomly generated data for Cox regression example. } \keyword{data} glmnet/man/mycoxph.Rd0000644000175000017500000000122013775432176014435 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.coxnet.R \name{mycoxph} \alias{mycoxph} \title{Helper function to fit coxph model for survfit.coxnet} \usage{ mycoxph(object, s, ...) } \arguments{ \item{object}{A class \code{coxnet} object.} \item{s}{The value of the penalty parameter lambda at which the survival curve is required.} \item{...}{The same ... that was passed to survfit.coxnet.} } \description{ This function constructs the coxph call needed to run the "hack" of coxph with 0 iterations. It's a separate function as we have to deal with function options like strata, offset and observation weights. } glmnet/man/survfit.coxnet.Rd0000644000175000017500000000460113775432176015755 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survfit.coxnet.R \name{survfit.coxnet} \alias{survfit.coxnet} \title{Compute a survival curve from a coxnet object} \usage{ \method{survfit}{coxnet}(formula, s = NULL, ...) } \arguments{ \item{formula}{A class \code{coxnet} object.} \item{s}{Value(s) of the penalty parameter lambda at which the survival curve is required. Default is the entire sequence used to create the model. However, it is recommended that \code{survfit.coxnet} is called for a single penalty parameter.} \item{...}{This is the mechanism for passing additional arguments like (i) x= and y= for the x and y used to fit the model, (ii) weights= and offset= when the model was fit with these options, (iii) arguments for new data (newx, newoffset, newstrata), and (iv) arguments to be passed to survfit.coxph().} } \value{ If \code{s} is a single value, an object of class "survfitcox" and "survfit" containing one or more survival curves. Otherwise, a list of such objects, one element for each value in \code{s}. Methods defined for survfit objects are print, summary and plot. } \description{ Computes the predicted survivor function for a Cox proportional hazards model with elastic net penalty. } \details{ To be consistent with other functions in \code{glmnet}, if \code{s} is not specified, survival curves are returned for the entire lambda sequence. This is not recommended usage: it is best to call \code{survfit.coxnet} with a single value of the penalty parameter for the \code{s} option. } \examples{ set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) beta <- rnorm(nvars / 3) fx <- x[, seq(nvars / 3)] \%*\% beta / 3 ty <- rexp(nobs, exp(fx)) tcens <- rbinom(n = nobs, prob = 0.3, size = 1) y <- survival::Surv(ty, tcens) fit1 <- glmnet(x, y, family = "cox") # survfit object for Cox model where lambda = 0.1 sf1 <- survival::survfit(fit1, s = 0.1, x = x, y = y) plot(sf1) # example with new data sf2 <- survival::survfit(fit1, s = 0.1, x = x, y = y, newx = x[1:3, ]) plot(sf2) # example with strata y2 <- stratifySurv(y, rep(1:2, length.out = nobs)) fit2 <- glmnet(x, y2, family = "cox") sf3 <- survival::survfit(fit2, s = 0.1, x = x, y = y2) sf4 <- survival::survfit(fit2, s = 0.1, x = x, y = y2, newx = x[1:3, ], newstrata = c(1, 1, 1)) } glmnet/man/dev_function.Rd0000644000175000017500000000106013752553007015423 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnetFlex.R \name{dev_function} \alias{dev_function} \title{Elastic net deviance value} \usage{ dev_function(y, mu, weights, family) } \arguments{ \item{y}{Quantitative response variable.} \item{mu}{Model's predictions for \code{y}.} \item{weights}{Observation weights.} \item{family}{A description of the error distribution and link function to be used in the model. This is the result of a call to a family function.} } \description{ Returns the elastic net deviance value. } glmnet/man/cox.path.Rd0000644000175000017500000001071013775432176014476 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coxpath.R \name{cox.path} \alias{cox.path} \title{Fit a Cox regression model with elastic net regularization for a path of lambda values} \usage{ cox.path( x, y, weights = NULL, offset = NULL, alpha = 1, nlambda = 100, lambda.min.ratio = ifelse(nobs < nvars, 0.01, 1e-04), lambda = NULL, standardize = TRUE, thresh = 1e-10, exclude = NULL, penalty.factor = rep(1, nvars), lower.limits = -Inf, upper.limits = Inf, maxit = 1e+05, trace.it = 0, ... ) } \arguments{ \item{x}{See glmnet help file} \item{y}{Survival response variable, must be a \code{Surv} or \code{stratifySurv} object.} \item{weights}{See glmnet help file} \item{offset}{See glmnet help file} \item{alpha}{See glmnet help file} \item{nlambda}{See glmnet help file} \item{lambda.min.ratio}{See glmnet help file} \item{lambda}{See glmnet help file} \item{standardize}{See glmnet help file} \item{thresh}{Convergence threshold for coordinate descent. Each inner coordinate-descent loop continues until the maximum change in the objective after any coefficient update is less than thresh times the null deviance. Default value is \code{1e-10}.} \item{exclude}{See glmnet help file} \item{penalty.factor}{See glmnet help file} \item{lower.limits}{See glmnet help file} \item{upper.limits}{See glmnet help file} \item{maxit}{See glmnet help file} \item{trace.it}{Controls how much information is printed to screen. Default is \code{trace.it=0} (no information printed). If \code{trace.it=1}, a progress bar is displayed. If \code{trace.it=2}, some information about the fitting procedure is printed to the console as the model is being fitted.} \item{...}{Other arguments passed from glmnet (not used right now).} } \value{ An object of class "coxnet" and "glmnet". \item{a0}{Intercept value, \code{NULL} for "cox" family.} \item{beta}{A \code{nvars x length(lambda)} matrix of coefficients, stored in sparse matrix format.} \item{df}{The number of nonzero coefficients for each value of lambda.} \item{dim}{Dimension of coefficient matrix.} \item{lambda}{The actual sequence of lambda values used. When alpha=0, the largest lambda reported does not quite give the zero coefficients reported (lambda=inf would in principle). Instead, the largest lambda for alpha=0.001 is used, and the sequence of lambda values is derived from this.} \item{dev.ratio}{The fraction of (null) deviance explained. The deviance calculations incorporate weights if present in the model. The deviance is defined to be 2*(loglike_sat - loglike), where loglike_sat is the log-likelihood for the saturated model (a model with a free parameter per observation). Hence dev.ratio=1-dev/nulldev.} \item{nulldev}{Null deviance (per observation). This is defined to be 2*(loglike_sat -loglike(Null)). The null model refers to the 0 model.} \item{npasses}{Total passes over the data summed over all lambda values.} \item{jerr}{Error flag, for warnings and errors (largely for internal debugging).} \item{offset}{A logical variable indicating whether an offset was included in the model.} \item{call}{The call that produced this object.} \item{nobs}{Number of observations.} } \description{ Fit a Cox regression model via penalized maximum likelihood for a path of lambda values. Can deal with (start, stop] data and strata, as well as sparse design matrices. } \details{ Sometimes the sequence is truncated before \code{nlambda} values of lambda have been used. This happens when \code{cox.path} detects that the decrease in deviance is marginal (i.e. we are near a saturated fit). } \examples{ set.seed(2) nobs <- 100; nvars <- 15 xvec <- rnorm(nobs * nvars) xvec[sample.int(nobs * nvars, size = 0.4 * nobs * nvars)] <- 0 x <- matrix(xvec, nrow = nobs) beta <- rnorm(nvars / 3) fx <- x[, seq(nvars / 3)] \%*\% beta / 3 ty <- rexp(nobs, exp(fx)) tcens <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv <- survival::Surv(ty, tcens) fit1 <- glmnet:::cox.path(x, jsurv) # works with sparse x matrix x_sparse <- Matrix::Matrix(x, sparse = TRUE) fit2 <- glmnet:::cox.path(x_sparse, jsurv) # example with (start, stop] data set.seed(2) start_time <- runif(100, min = 0, max = 5) stop_time <- start_time + runif(100, min = 0.1, max = 3) status <- rbinom(n = nobs, prob = 0.3, size = 1) jsurv_ss <- survival::Surv(start_time, stop_time, status) fit3 <- glmnet:::cox.path(x, jsurv_ss) # example with strata jsurv_ss2 <- stratifySurv(jsurv_ss, rep(1:2, each = 50)) fit4 <- glmnet:::cox.path(x, jsurv_ss2) } glmnet/man/assess.glmnet.Rd0000644000175000017500000001050414046050560015522 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/assess.glmnet.R, R/confusion.glmnet.R, % R/roc.glmnet.R \name{assess.glmnet} \alias{assess.glmnet} \alias{confusion.glmnet} \alias{roc.glmnet} \title{assess performance of a 'glmnet' object using test data.} \usage{ assess.glmnet( object, newx = NULL, newy, weights = NULL, family = c("gaussian", "binomial", "poisson", "multinomial", "cox", "mgaussian"), ... ) confusion.glmnet( object, newx = NULL, newy, family = c("binomial", "multinomial"), ... ) roc.glmnet(object, newx = NULL, newy, ...) } \arguments{ \item{object}{Fitted \code{"glmnet"} or \code{"cv.glmnet"}, \code{"relaxed"} or \code{"cv.relaxed"} object, OR a matrix of predictions (for \code{roc.glmnet} or \code{assess.glmnet}). For \code{roc.glmnet} the model must be a 'binomial', and for \code{confusion.glmnet} must be either 'binomial' or 'multinomial'} \item{newx}{If predictions are to made, these are the 'x' values. Required for \code{confusion.glmnet}} \item{newy}{required argument for all functions; the new response values} \item{weights}{For observation weights for the test observations} \item{family}{The family of the model, in case predictions are passed in as 'object'} \item{...}{additional arguments to \code{predict.glmnet} when "object" is a "glmnet" fit, and predictions must be made to produce the statistics.} } \value{ \code{assess.glmnet} produces a list of vectors of measures. \code{roc.glmnet} a list of 'roc' two-column matrices, and \code{confusion.glmnet} a list of tables. If a single prediction is provided, or predictions are made from a CV object, the latter two drop the list status and produce a single matrix or table. } \description{ Given a test set, produce summary performance measures for the glmnet model(s) } \details{ \code{assess.glmnet} produces all the different performance measures provided by \code{cv.glmnet} for each of the families. A single vector, or a matrix of predictions can be provided, or fitted model objects or CV objects. In the case when the predictions are still to be made, the \code{...} arguments allow, for example, 'offsets' and other prediction parameters such as values for 'gamma' for 'relaxed' fits. \code{roc.glmnet} produces for a single vector a two column matrix with columns TPR and FPR (true positive rate and false positive rate). This object can be plotted to produce an ROC curve. If more than one predictions are called for, then a list of such matrices is produced. \code{confusion.glmnet} produces a confusion matrix tabulating the classification results. Again, a single table or a list, with a print method. } \examples{ data(QuickStartExample) x <- QuickStartExample$x; y <- QuickStartExample$y set.seed(11) train = sample(seq(length(y)),70,replace=FALSE) fit1 = glmnet(x[train,], y[train]) assess.glmnet(fit1, newx = x[-train,], newy = y[-train]) preds = predict(fit1, newx = x[-train, ], s = c(1, 0.25)) assess.glmnet(preds, newy = y[-train], family = "gaussian") fit1c = cv.glmnet(x, y, keep = TRUE) fit1a = assess.glmnet(fit1c$fit.preval, newy=y,family="gaussian") plot(fit1c$lambda, log="x",fit1a$mae,xlab="Log Lambda",ylab="Mean Absolute Error") abline(v=fit1c$lambda.min, lty=2, col="red") data(BinomialExample) x <- BinomialExample$x; y <- BinomialExample$y fit2 = glmnet(x[train,], y[train], family = "binomial") assess.glmnet(fit2,newx = x[-train,], newy=y[-train], s=0.1) plot(roc.glmnet(fit2, newx = x[-train,], newy=y[-train])[[10]]) fit2c = cv.glmnet(x, y, family = "binomial", keep=TRUE) idmin = match(fit2c$lambda.min, fit2c$lambda) plot(roc.glmnet(fit2c$fit.preval, newy = y)[[idmin]]) data(MultinomialExample) x <- MultinomialExample$x; y <- MultinomialExample$y set.seed(103) train = sample(seq(length(y)),100,replace=FALSE) fit3 = glmnet(x[train,], y[train], family = "multinomial") confusion.glmnet(fit3, newx = x[-train, ], newy = y[-train], s = 0.01) fit3c = cv.glmnet(x, y, family = "multinomial", type.measure="class", keep=TRUE) idmin = match(fit3c$lambda.min, fit3c$lambda) confusion.glmnet(fit3c$fit.preval, newy = y, family="multinomial")[[idmin]] } \seealso{ \code{cv.glmnet}, \code{glmnet.measures} and \code{vignette("relax",package="glmnet")} } \author{ Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{classification} \keyword{models} glmnet/man/figures/0000755000175000017500000000000013623043702014111 5ustar nileshnileshglmnet/man/figures/logo.png0000644000175000017500000007500013623043702015561 0ustar nileshnileshPNG  IHDRYuaiCCPkCGColorSpaceDisplayP3(c``RI,(aa``+) rwRR` b >@% 0|/:%5I^bՋD0գd S JSl): ȞbC@$XMH3}VHHIBOGbCnₜJc%VhʢG`(*x%(30s 8, Ě30n~@\;b 'v$%)-rH @=iF`yF'{Vc``ww1Py!e5DeXIfMM*i&Š$oiTXtXML:com.adobe.xmp 960 540 1 ze@IDATx}E< 9 JPd`Ĝs`9lzݗaM1`NAD9Tu{z`f[0+:NS IA2#Dfƽи̬}/QhHb DAM"ٴDrUȓ'1JhRjw2M@ #)B؉LF/ߓ| ?8_+4;uAuEIBBa~`;2 d:'w\vpخ"kO2eng &q(m-.{\"?T8}!ujS ;o7 UrcgJuTiUA:͒KN&7@rs,YxzC M"rC>"so|(O[ 붔T#Pr2c&sRZ+זɍSߐo~ @B_d,٧S+2fhR{v'-mJE{AD>Q(t7A ^ R3 %?2Rwn& 8`bZ׻g_oV}e7h-A%DC =\{ҧgH{"VX>cS ;aLEl\'VBnjm͗g^Xx&k. 5{th!W\2Z na/Z8~< =ߔEeN'i"oP3vLҺ0T7ltBn&MGTn!!U2埏,R%Y@jXh7RnBQU~war$f*8L$$'q\xx<W*%+u\ĉHLoA5X.&B M@Z\ 70sJ'r93ώSLG`ti q38&T򸂦P°vuȤ˿}c,Zrde9ɔ[˵q87J_Dȴrm|\y} j']:ZXH,M$-'5u-zuIڐL)c#W?Fztm])/4Qlg徧ޕf,]fvCv-䒳)Cf;Yk l]w B+#tǪXޥΪćZ&OBM V !uzvk||yugJK n8sX E Gđspb*&dN+\*a#qlܸUyO43!',YenUNյ\b8|`+XIҳP9ܱj.6H뮮oJvl tb`4+}2r(釭 @H -UDA:c/;/Jp! hA0Ԗyri9&X"6xĂRwDl3?^ 7 0 ~hi׺ |c}e}Awf `7UEQfHn uG9@>okD'Zyᒺߺx?oקCҒJq?TK$5q >ϖKOs.tS+3ߔ fV\, chp@(qnfuos@< 0J= H4&BQx3;F^rlYr$gAs%e|åK]Ham 4KTUg]Euke]%NGJN9r)+J<*YŊhצAdu+5!`4>bFJ l1T=k !a5zarj3fn&?)J3і-,_:%2T;^5RN/pqՄMxG\SkM(bg۩V\\|R'epIZ1D9*]i[ WB>nI?@l#M'arYSu+l,Y;x8EN*.Xٯ[{2W4IEdB ?n(`1`a`fE'*:TC{Z&cFyK7kOCqj&Q)6n揿\ 897\qQnܐ^rE6HMM7_KC-#8'ޔ*&"BH 9<{äELS*lM(׬*<0_^g-IvvW%kW94῅خFZz87>JFwIŜ&v~H9ʥdMf7EʝM5<~1p0T66H 7H\6V~bsxrnrR2@ÁY'y3xˊu[>(5ѳ) 9iAXs&ڶWMuSa԰Fp:|y'ꢾ~dj uPdm;it_ҡNQ J 6[eRMT%v\סd3&cJ>)_e+kauk܀\_rRpm$HRZV!=6{WZk#Ef'sNJ:Rka! CNuiA@i4m"H`Ob3H%\w$ZHZTsG+谉jYlC\(U<0͹7DVƐ@o\ԍdAeD\ &.mpE1`}sմ'(gI !(: W|&7aOaG X3lvQ@wf昈Bjk7n|3NrG9`j] #N$A$9EoFz 'O chџkM?੆ۂxFP[" ]0>jMwΑ>Y @vPG \( ܾ8 ^=x??,X ]t}7}VUTʺ5kkPc Q~}C dc,} &n#&'߃M6-4TQn! I`R̓_T]Mad , _NJu5īZ`… SNҫW#oUVSO@@AWNNF%r7|?TNp)0,0P- e UO=n].U@q(mŒKȝ,"tV ` s!zd^&}ix/^D9E0T2+KcrYO:z4̑o}Sg;JգK+:Wiw]C6kG;$r"UՄF:a~r|:b ^rRǓӤm۶r@y!p 7Â{KߒN"E v(7E=͞L,Jno>–HH=Zd Qk^'#vc~h=Rb:lloKDzN94Lle|y)7۠$f<(s["m G㏰Էs^bg7o#Z4"Y{[:)?#(x\c-οbjb7N?fv zAqx; bsm0H4o-WӞZ(?LJ0Xfmps^a\[()gAe u Ɣ)+-}T^[PZ㯥TB8h3(ɁaʤbHE=h^7w-J׶FZfeI>*%UWHIzI@u„1dKQ)> c  }R<۴ʑ)8c<&Rw%p< q4Qhc(4( z֢{mNqVgU"AQ'qH|_!1 wZuSa> CA6 9"4\v|M݅IAFl d@iծ?H#dSv (4~|x=1&yN{G !!!ٷ$_(r E1Ж8e bgɠOa-2қ"icqi#2%ʁR Lp>Evd!rθphH8T~bxR[ qY+,c/yrk\ɆQ#JZvj#~Ggm]oc̞?TTQZA 6rueY!RGR䉙Q4(QϾ<,ڔu3vu03OcT\28v"tZy/>.^b$3xtJ+< .VhD+/=+ [K,j}I=BZPĈmF˸Ѣw3lU .7=0G^iH@ /ܕbU-3\'UۆC~]qul+>nOJ9#8J'u! 4 g }5DIpcpm*`_S%XZ@T*A q-S(SdA7*1:2 JYok-U S7e+E k/"#u;DQ"~ V?ItGUٳ3?o;VF@Lr9vg>=* ?x;DzU6gP@Z&:R1IZp@2ɭ?F< GcX>rLIjAː@{!m:u~?tL ?pMN`eό;v}ީ= `wuHB c3j2A OnՏ!|[|Ss YUiDm5kKIAZQ`Q!9^B BɒW·P>f{cg/@P,R Gd@|lso=RUv_l(1,Xk 1}e>z@ácv`|i Pu:!XR#dm-g/˳?rGJiq2G_e={ujdʦ%@ ۆ"cCC`Hs)^7!8kgcm\twy,k-e%rhܧ/. 4t0*j -W%UXIQ .\V̟%]vFٶdhrԘe"X6uY$ ?(7Wf} /ҫp3NޤE~vkl?܆zQuJ8@ i Ér6W| (QQ؈tcHZA6ȝO >Z3t8r`rV u6.ak0a75Hieﹿ}Vk6 'gbA$"`%`^rҀ@A}tԫBRY>wat8* L8`Km 3 |H7dZ58`{W[q+>p)ۻ.9.sRBЁbMFD!j؛nzA>W@6@~ZfuT `Y> ##݊jW{fvkrQ94º)=Dϩm9`9/t1'PV |>;?&@(b W:`¾ XeUR.!ah}_t@5zO]EvsN` OV#ΈU ~Ĕp5e]%`$5YGпXE1>w \1eHwGqEEE2'eλoi15unQ si݂Dd $ ` dbK%C!,Y@$Vdo-?;9IE'p b?F~|# yKc xk2ң{Gtq Q<f`웅lx L>T.8ehؽ,&n{_yZ潻DW!3q?ԫt(yp%J,ap8vHK")+)K\_@7u;JklZb @Z\D *"k:*hVM qqgdt˔JƟ$ HG՘Fluҧg~:ȱpDaDR#3^pv98bfGvk+Xzl>ot>Y[A K"Sderf&";3@ $6P̗(3BQ42xW|Eʶl,Vx.𑀵)CMOK`۪Qwuu6883+)gmUGD41R8}`wޜrY\{N 6)DAR +DB~Ct`?I*T |8(t?fi/` Y2aRtGvE)a"/pDLL9Ng,_}Uѐ: D+9VKT)p :} Yp lFﳯ`ҫo_~p!#bDc->4LOAnՉrd>lDRPä[6/]v؃ccލZ)Js';!y|*a䡨ΞL@esyebj0{[,}VQ }Bfk^X2[rOK#9kA~(R֯]'8l26#EK)sbm#*]ݻH]YE4&:fh3G}}7Ig,dG){d`&Cv72v1NFe+=6l3g}ab%$猄`0!`y2E67X^ie)H!qEE8>CXvUf @YP4udiM5WňlʘF6:M:FjU#3Xa {)6I {2XyۼV' 赚M@f .08FBO2EG%M%r[7UZDhw$p\נ`#|ѵyAdT)g1:>(b t;Ҫ]"/Q G2ӠPu 7 3il~z B?Dj/ >tǐNNM&Ӵ '0$ g ^ PPWW!J#WWn"Jsm ,hS(%%h  e3"H࿷z]aeN?a?.YpfL$0{y̰,y )Z8SY%u hV|l׆:waHwd.I)> ^p@[ ~ q6;[XMSƌl=^"/efl,ʱO[ri 4izPW5fç!ϾM1iJ[c>R“,~0D\30d\7:tEQ!q4ώ$9Y0~,PDb$ 9M{ޕ}ST"Ƭ0q0b/LX,Elb Rs >g??H,|#5wg6v?`Ya*aY *$!\W ' ޸12wW噅+f 3Ek^tgoS0c"jɎ/e 2[ ;3_O.AIUarGZ*`ʫ[W+\kLrFaĞs Q^_68߼Ip SCv6N(B죰3JSWQayZ$Lzr7~M_ L{ͽCxu83wgw6o,AxylJWdz.v32% @xrL, oڽhSנr\i-/))~;\6dKg}\vҷ1k)IL>|%+qR,@`#)6C<3wd laH;G@d`Ŧ\l8yY)}q6S>u$Hr3j_Psm0b8@YO, ڬC% (}~dS'^sbvmF8=uּ$fuD@"bTd}>vj33M@L\eE% 'okhy"]IaSQ7ܼ殬D0 a}9vdOEq}Klrw6n~C DDA788OݸAE!Ss%Pvu}(6>sez"}јȧL 2gxzh\ZP$)S*)}CgAejB/0x0sU=6HKRT(+9bÁc0C=l栿 ҄Wp,Dr5F@YMu+oܲ נT̩T|I Fx fj`RNٜy%pUGn bka$0Sl:uq:k[T ۴N CZ3=WyQ4ҋzB/P-us e $j?6UF!G"!NN7bc(xb\tS R[87ՅT%)i;@2Ajb4D|IqyΒrGS3.dŶUFj!tKP,ug%L2T@He=:Cܪ28ؠ\52cc< ͑S'U?CH@8XY^6«O<ƭۼ^f{M^Q dxE2Ξl|2p6WY+K9Y)"@TWp*g (25'ȯ&~Wu\&eAb`l:49h$ "$1Ӷ!P6˂Ar@LDz4Px+51AڊDA)F7LLkQjm ]g̙)/n'0Cb*P#QYP p B!/JɜAt@d!f9xIu_!' @ hDE1 D_%HAZ-9b$rjɲī\d/,_z>lќ~ ~jq E7= e3pmx݅CGH.@B` ('[(<Tn%R8:e.Pd/кBY[TlR=: rS ,Dnjm!|R&ȄS܃J:t\l/xԢCfq'$Ⓧ}4!McQ]d_}N^*_}s 9[+P!=,E58(G7[q֏ ?zeA! %?|׬\Moy$!q0MVkO#^S_~~- 2Y{>I%&]tr2Z{ĒI4, UhB?nImQ8fiO68XѣtA͔l* x*F4Xx@/0Wy6\=!9tE1.W!}GA: p#Uʹ293CRVmqpyѬU i+y6":RͲ(BrEh d0!rv{Ta(?!TT)#e_B0'Y-RkqJhaH$a}n8_ m9Ar'a@S)@\5t]@gqF'G_S#.. W"Ը#6&AD`e!3T٘Gge&  Qy/+1'yƈSM]8GY8V%Y|U@1!'y*_}AVmZ ;y[da/@S59 648#ߺ6D#BiJS](GL >E ʺAh583U]w >Hdǟ3q!)3L.#rd+>™̋"4WH@:tP=atD> SVR.hG>Sg%,+mǟ&ǀKDoQpi# 7()ZLԔ8u>\|ر ycŚ y߸zo"-a|UV,~GL֓\v*DZaNR |Z,TK1hHh(Qgt?dR[=c];9% W 8},W*!k/y%Pg fr>YHYȦTA@rX[`'&j,UJ p ܉hrIXr] Ā;Dۍ7/r{_[2f5ZL/pqA`YРGϓI#ceSW+ -F<,T `̇A*+.*G4{a(RCa) f4[r,93袢 T~"UY):1X0z= >' @ G槂Z'@/ 2|`싥]ÈxEǹPꊾCFtl"4SpXRoXM?q'/wsf墜WKʊtun(VєWzghSQǷ˰ߺeIDUѝ5 :,)9fj!2TG / _+Ze!U4"?'o2GȔτ+ g7@% 9&9p+0|"_E6C&B#$hC)(I+J9S.k9tWל|}G D$͊IvlK*C'(Yts.(V\.8Qxwn8YP(`ik(sKl|O=zϙDo4C ]N^b'_",#n~0GO N] #KP_|FB1v!99C:%%j/"CghuudBi•&P*jd!OK6Z(&1EGE#?˔^ L>C}Ħs]bՇ)}N49ɷM\u3'A0X*$}{( D hZq@l<f۶vo" 7+hxi# שHmw\8U}\6lu'8 Qf$Tb]زnt޿:r) 6Mh||&rDFaIG2GeCge 3jlsvzcaV vai: e`#^@~)PO< SyF@Mbt|~+# &8?G!w5%zL#hNm ȺoWjaĤ' .'56`0F9:A% lY8aĺ恄دlG}?)۽%z&$uKN>[NwytXH; q#-D|0 ehEHv҄+C?5IDAT-L@Vl\f@<(BQ/ёP=N"& RLL''U +Q3;E) t y@ZM$N:ʕg](̙ҬC${CDÂ?s^wMAd'Mkp T1M%$ y3fai C utM~zeҷG0_& "2IsC}5!E|ѣP៏L|}裤"JCN}5YΜt8,*jA 7 ׎,D3g (M>Fnn|Wgjl’8L9'MF$-@1!H'|f\QS&W $N_(⨇y^{-אyjQ:"'BȃSo~0..+ml: "ZC{P.Si13۶sryȰ=<{oo>o(qKwN^n" 9\Wzߝv0-@N!kO@bM 8J̸T6BGʔϐ68iib%1n}!۟)+i5!+#>S4^A52daK#tN pT1Nf"hNB< RYO¢EҦ?6OZĕ&TňO>o Fv½1VwYVD"?ҳkDP! r Lz NQ`*9b9ECX&?ťD&! ~v5ҧkOJw={C$λ}=,L'[Jvn>Ҥp$\{[00$r9̙IɃ:3eptW~xUzoŧ\hҡ}'ߓAIVʼ)ALw I$1[HfL:R~=xxb__~t8s]8~Ka8\Z ϕndDBTXTa>}} t~/tiӖ^%0u!!;C,d[ܚHthX lx\swφ &W4~iUXB8ɚ,d \15i)Tդ韆/g} =SCupVlr}B1VkQT`-Z:4lʂ}JY HG ,w o [d\jfJگ&K]b003-Xw"lUCXjG`-Ĉ } 5o1Ga0!8 Cf7/y/}c`ȍ^n?25+_ xVl˖ծ Ik->nݺ 6 #l.L4B{f!*9Zd~a),,m߀DaGEh0%0T~vo=я]s;k73;cۦC4{)ύ塩s uO9&X[6vGZ8E_~9rC]Zclk]m"2QW]U t1ڡ®>Y\S.[L6S]~Ə(䷱Q6:3cQFɏ~zصIQ$ ?Cs?G&M|[㏇oQŷ/M;%kW# #>XŞ,kӞn˷c=R44) vCwh6u7T XĞD]JJ{l3ꚹk%lL!Qܡd-&4~g+ "Ky7@]+:k?wqᣥݓ6$OU>~4 קv2'l A?1 ׯ_/mڴsX[캝Oו7&\WeqF}ꩧR駟*,?p+bT| /x J~@uS1{Zߞ(>K+y[N:Z2.]R4uN:\wuҾ}{~ /4vDEk4]Xn:mhmյ6D峕O˵^+y(zK/mW1cFJޑ#Gʒ%Kd͚5vZYnn2d\z饚~GVRdٞO=.Nq-nI=z^$)GS^M89} /͖rks 'JÈC=4LMkh:]VJ?L $N=0_3nONYgزe<=:$XW_}5L0͛ .Luc|Xgx_2eJUVa~+갸 6E5뷆8uDq ϻ-1pukc _uϏd{s9a^o~Zi)0}il`ޑ.|ͬYqJ0-oQQQL dye;myq%q+m%f~֗Ԩ!*m} eZpKkzu$Y+%~ʕ)Ǝ+qDzY\X(4Y;(,6K端t,!QhU3zr\|űeEn)sUjŊfc=_~>g/OW^r)}' qYw.$ Yh]ίl">r_l:a?)x YƍCYg_",.1 %\t>LY#3?gf?Кęݵ9t~`{j2w59nb`իWyC+s=C q#!CiiiHLӵkW믿.o´׿UV Srsg>'X w)V9Ͽ˿,Lcm{SWǛiuۙxkS4"qX;k}~TH< `Μ9a:( |OʇlScǎGμ{ʙ˙:u@w(LX~E,2av~ Q8-%|1lʰq+-m-pQtX lGn݄AC=~~GRh>;/E4 EB>?pjtA)0Y߀Rkgz7c?? .H!N]i8R :tWD0roFp6m~O~L6ѾhٖAѪ@QuY}tוgw[u T? -gyzZ=ddqq$!u ∂uSBgxÇ tu؇*sΡ(L?pח9_0;;#lE=X`r~? pK,u&a(wy% ஻ a={v:>јB}T-$,ƿn[۠JgX l0g]g?p ^y5ʈ*|Iz6K"; >[X>׷*KPAeQBbYs^ҐM0u:ea`.lX>ϨAN6qD!w\fₕu5ˍrE{w{&lCE;;Oahfd`kܸqg;'珕AW {E5Da23831COo{}1Du}*r_c ƊL 裏j9|sVb>4x0k2 XhQNH?3m-~h}͠'[ƻE6ͻ˞ yUX@^O c=lq>@Qry,5V>fMkX',X5*nQhrZ*:i1c`]khj]r.H>mvOpkyS9aGwNlYh>ŧwȡgfќ)}jE9]x~`@1DN=#mJgE>N;s]m#n8@zƲCQ^o˼a(П7)/Vc-׮]2leXZ^k<"GgA"h*/ cQ0W)^0=w?=x3eGO?OzA"m=?/Ҕ:Fxj$[1HOIy»tWfNE-ӮҚUQ.ʙՄ+q~:) ?+Z]*,98x~W^O3y?~^HF*Dk'=䓚b!6Xm^ `h%bqK?3IO/2-%t'뇕G"#X;=(C,ϰ,_C^e&U $- ;9˰OV2]tmr8,fR6 )+|.mֻvW߮ \S!Rmoȕ~L~#;('L+Lyy''}po/mAi)Lb3%L[ #!AO <$W灵p,YWhWf +.!]:LStBXEWzrǂ. q>=9Ap+ {bȔ3I^B`rK䁧V yؖw)I}%QeoXl-z{\$̘ipn:qb?j9k2_=(rO#0$ FʑhO(VRP>Z-w|B v/-Ŧ huy)ӷi!H"<kH#.<ŷ厇ޔ %W쥴rrIy'؄ź:$$QȎXJyq3 |hlؼYZ)k\r‘˔I6XNtֽOH( WpyѲΒ_kjr #Ayˆrcs|`*U_GA4P($WM#* UCA-eD@`+ϖ)ӏ$`c^GX6;8}Aw \@^Xny`6oy  F8cݳPdzpwizRX Ӱry2/US&6pn@cdKv7ivk qRQU)Ӟ\7x)|R.Zȓ)g SZ$7Ak]C-`?qRJ fѲשaaĎ}AZ~ݓIL:J~ö֕ L !afhힹjV:9 aTd-2yr9#ua;6FM"D2MJ .EI5Lc9 ċ]ɖbwj e! QiGKN&-qE1J tBHRˣt!eĢVcڴ !ʰޘBEx_nP6aBMlg0@.>ua5:RWlbӠDa"֫ ?[/J`Ȯ2Py2zHwXPAkwlH:}m!{xuI%2`Αl._XkTieq΁|ǙXBy24N  ( vuX/+.J (=$)-=*{O9՛jbzy kO|On{hlZM;F֥0>iUnJHn^sr)C$.TL)jY܁)܌Mr=e۫ hvH?P OVzG/&7JQWŦC@Mm3 %kI5l6l(#/$npC\2nFLmeVZ$b?vHf~'_.E%7W?P.9uH`ϓ'l<5OȫO.ĕIR*,̓s8@UҦv<#kjX' 83vZcϾ %Kx45KqyOF@;Ͼ\cʒh#0S?@l'cdpѭG#H*MJI%nHV/7dҕB, CIpprDCಳ{FI ?.EAz6Y9qx/ҍ]4 c%i=+h6'aA*7gJU%`-l!8N;k0 OjZjD5\X]Z"m-9 nN˖}`lJq`ģV!Gz֦} noLFu<ڞӥ{WcEuV>vq>s ZրHDm44j4mŴ("4T+ae BG FΙ;sgee+Û ;3oνw=s=\: d$vW2_9(8 JŁ3gzy xO0v9U,a(҅xnP~#? SX~QўofF;n\3*X w#U" t?/[_N!5Di@88֦/Iǡ<ǁ˽qiLUf6WCA2:>׎?7! 8 0IEV]ύ@cFʒTG2 \#DQ|SLC ž9]]dhnj I*~_^kzs;:cw1ۆ-RpZ"7 εvJnN_ܰ{tӝ {yy&o~Dݍdq̄D69TGΨ{L{ёPDf7ӔUM)λW^ O9v ]yȊ;fȯVeԨĦqD~k~Sz%@'sՍmL'\vTybM+O'[c?"0H*rM_<is? rC\>C^yn!S`,OvX8|]DA@9>{ʤ/g^{cYZ;BEяTMV9e-RhM!FzW@/}\|m {ރ%򛈤nbgMCL_ņ ovnoSz"oK4:{GÙmܺb"2ymn&\=rvuΞGF`T== cz^ s'ixrky8x%&zˊ"+v&8x4o*fMpH*az ϨQۀ9 p?ybFaC?0SG ]*ZLzvűj{lێC\ݧDDK>8ȣkȸ#cȩya1 4a^s?9܋osRq4?]=g4f%\km:Q ^07?V/RфehtݳiPL>%ڪ~{vcɺK}>pn8gMGoŚ/(!&p_:/iIW_CiO!4F F8r:p`״H=l!hFgp#Ъ.`'?oYrmy\( <@ip!@L, 0 ~rTf+Dԧ&W/jyhThjQXk@-jĐ x ی(G͒m"\p]A䪥n a:W/,`f$;(| }B@hpε?KNaC8=.-ty|sQ;X"F x9|WueH\\&a45"(- eAa ҕ+"q\!=8LIC0pQ34'kWΕ{JhD^&i{!ʎ Ȁ7myGeL.(IJ\<dP ](kuVbDX7m'/*!!J45p c?'G Xf]p7lqviȈbH`-gt7VsK<ϓ');hJ0WQ؁΁McjR5JA-#RP{QԴNEƑ1;mWxy}B%L7  %`Y*?2(3L;/-6 %l-E$j  qu窹nNJ;Y*'2('4YVUh /{6ă@E ܪ!Ǒ}ik̺K}]q?VA #xPba.YPWOU-ە#<jyXC@FNe-)Dr\!a-?Mi^IENDB`glmnet/man/MultinomialExample.Rd0000644000175000017500000000101614046314073016542 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{MultinomialExample} \alias{MultinomialExample} \title{Synthetic dataset with multinomial response} \format{ List containing the following elements: \describe{ \item{x}{500 by 30 matrix of numeric values.} \item{y}{Numeric vector of length 500 containing 142 ones, 174 twos and 184 threes.} } } \usage{ data(MultinomialExample) } \description{ Randomly generated data for multinomial regression example. } \keyword{data} glmnet/man/cv.glmnet.Rd0000644000175000017500000002652514013330131014630 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cv.glmnet.R \name{cv.glmnet} \alias{cv.glmnet} \title{Cross-validation for glmnet} \usage{ cv.glmnet( x, y, weights = NULL, offset = NULL, lambda = NULL, type.measure = c("default", "mse", "deviance", "class", "auc", "mae", "C"), nfolds = 10, foldid = NULL, alignment = c("lambda", "fraction"), grouped = TRUE, keep = FALSE, parallel = FALSE, gamma = c(0, 0.25, 0.5, 0.75, 1), relax = FALSE, trace.it = 0, ... ) } \arguments{ \item{x}{\code{x} matrix as in \code{glmnet}.} \item{y}{response \code{y} as in \code{glmnet}.} \item{weights}{Observation weights; defaults to 1 per observation} \item{offset}{Offset vector (matrix) as in \code{glmnet}} \item{lambda}{Optional user-supplied lambda sequence; default is \code{NULL}, and \code{glmnet} chooses its own sequence. Note that this is done for the full model (master sequence), and separately for each fold. The fits are then alligned using the master sequence (see the \code{allignment} argument for additional details). Adapting \code{lambda} for each fold leads to better convergence. When \code{lambda} is supplied, the same sequence is used everywhere, but in some GLMs can lead to convergence issues.} \item{type.measure}{loss to use for cross-validation. Currently five options, not all available for all models. The default is \code{type.measure="deviance"}, which uses squared-error for gaussian models (a.k.a \code{type.measure="mse"} there), deviance for logistic and poisson regression, and partial-likelihood for the Cox model. \code{type.measure="class"} applies to binomial and multinomial logistic regression only, and gives misclassification error. \code{type.measure="auc"} is for two-class logistic regression only, and gives area under the ROC curve. \code{type.measure="mse"} or \code{type.measure="mae"} (mean absolute error) can be used by all models except the \code{"cox"}; they measure the deviation from the fitted mean to the response. \code{type.measure="C"} is Harrel's concordance measure, only available for \code{cox} models.} \item{nfolds}{number of folds - default is 10. Although \code{nfolds} can be as large as the sample size (leave-one-out CV), it is not recommended for large datasets. Smallest value allowable is \code{nfolds=3}} \item{foldid}{an optional vector of values between 1 and \code{nfold} identifying what fold each observation is in. If supplied, \code{nfold} can be missing.} \item{alignment}{This is an experimental argument, designed to fix the problems users were having with CV, with possible values \code{"lambda"} (the default) else \code{"fraction"}. With \code{"lambda"} the \code{lambda} values from the master fit (on all the data) are used to line up the predictions from each of the folds. In some cases this can give strange values, since the effective \code{lambda} values in each fold could be quite different. With \code{"fraction"} we line up the predictions in each fold according to the fraction of progress along the regularization. If in the call a \code{lambda} argument is also provided, \code{alignment="fraction"} is ignored (with a warning).} \item{grouped}{This is an experimental argument, with default \code{TRUE}, and can be ignored by most users. For all models except the \code{"cox"}, this refers to computing \code{nfolds} separate statistics, and then using their mean and estimated standard error to describe the CV curve. If \code{grouped=FALSE}, an error matrix is built up at the observation level from the predictions from the \code{nfold} fits, and then summarized (does not apply to \code{type.measure="auc"}). For the \code{"cox"} family, \code{grouped=TRUE} obtains the CV partial likelihood for the Kth fold by \emph{subtraction}; by subtracting the log partial likelihood evaluated on the full dataset from that evaluated on the on the (K-1)/K dataset. This makes more efficient use of risk sets. With \code{grouped=FALSE} the log partial likelihood is computed only on the Kth fold} \item{keep}{If \code{keep=TRUE}, a \emph{prevalidated} array is returned containing fitted values for each observation and each value of \code{lambda}. This means these fits are computed with this observation and the rest of its fold omitted. The \code{foldid} vector is also returned. Default is keep=FALSE. If \code{relax=TRUE}, then a list of such arrays is returned, one for each value of 'gamma'. Note: if the value 'gamma=1' is omitted, this case is included in the list since it corresponds to the original 'glmnet' fit.} \item{parallel}{If \code{TRUE}, use parallel \code{foreach} to fit each fold. Must register parallel before hand, such as \code{doMC} or others. See the example below.} \item{gamma}{The values of the parameter for mixing the relaxed fit with the regularized fit, between 0 and 1; default is \code{gamma = c(0, 0.25, 0.5, 0.75, 1)}} \item{relax}{If \code{TRUE}, then CV is done with respect to the mixing parameter \code{gamma} as well as \code{lambda}. Default is \code{relax=FALSE}} \item{trace.it}{If \code{trace.it=1}, then progress bars are displayed; useful for big models that take a long time to fit. Limited tracing if \code{parallel=TRUE}} \item{\dots}{Other arguments that can be passed to \code{glmnet}} } \value{ an object of class \code{"cv.glmnet"} is returned, which is a list with the ingredients of the cross-validation fit. If the object was created with \code{relax=TRUE} then this class has a prefix class of \code{"cv.relaxed"}. \item{lambda}{the values of \code{lambda} used in the fits.} \item{cvm}{The mean cross-validated error - a vector of length \code{length(lambda)}.} \item{cvsd}{estimate of standard error of \code{cvm}.} \item{cvup}{upper curve = \code{cvm+cvsd}.} \item{cvlo}{lower curve = \code{cvm-cvsd}.} \item{nzero}{number of non-zero coefficients at each \code{lambda}.} \item{name}{a text string indicating type of measure (for plotting purposes).} \item{glmnet.fit}{a fitted glmnet object for the full data.} \item{lambda.min}{value of \code{lambda} that gives minimum \code{cvm}.} \item{lambda.1se}{largest value of \code{lambda} such that error is within 1 standard error of the minimum.} \item{fit.preval}{if \code{keep=TRUE}, this is the array of prevalidated fits. Some entries can be \code{NA}, if that and subsequent values of \code{lambda} are not reached for that fold} \item{foldid}{if \code{keep=TRUE}, the fold assignments used} \item{index}{a one column matrix with the indices of \code{lambda.min} and \code{lambda.1se} in the sequence of coefficients, fits etc.} \item{relaxed}{if \code{relax=TRUE}, this additional item has the CV info for each of the mixed fits. In particular it also selects \code{lambda, gamma} pairs corresponding to the 1se rule, as well as the minimum error. It also has a component \code{index}, a two-column matrix which contains the \code{lambda} and \code{gamma} indices corresponding to the "min" and "1se" solutions.} } \description{ Does k-fold cross-validation for glmnet, produces a plot, and returns a value for \code{lambda} (and \code{gamma} if \code{relax=TRUE}) } \details{ The function runs \code{glmnet} \code{nfolds}+1 times; the first to get the \code{lambda} sequence, and then the remainder to compute the fit with each of the folds omitted. The error is accumulated, and the average error and standard deviation over the folds is computed. Note that \code{cv.glmnet} does NOT search for values for \code{alpha}. A specific value should be supplied, else \code{alpha=1} is assumed by default. If users would like to cross-validate \code{alpha} as well, they should call \code{cv.glmnet} with a pre-computed vector \code{foldid}, and then use this same fold vector in separate calls to \code{cv.glmnet} with different values of \code{alpha}. Note also that the results of \code{cv.glmnet} are random, since the folds are selected at random. Users can reduce this randomness by running \code{cv.glmnet} many times, and averaging the error curves. If \code{relax=TRUE} then the values of \code{gamma} are used to mix the fits. If \eqn{\eta} is the fit for lasso/elastic net, and \eqn{\eta_R} is the relaxed fit (with unpenalized coefficients), then a relaxed fit mixed by \eqn{\gamma} is \deqn{\eta(\gamma)=(1-\gamma)\eta_R+\gamma\eta.} There is practically no extra cost for having a lot of values for \code{gamma}. However, 5 seems sufficient for most purposes. CV then selects both \code{gamma} and \code{lambda}. } \examples{ set.seed(1010) n = 1000 p = 100 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta eps = rnorm(n) * 5 y = drop(fx + eps) px = exp(fx) px = px/(1 + px) ly = rbinom(n = length(px), prob = px, size = 1) set.seed(1011) cvob1 = cv.glmnet(x, y) plot(cvob1) coef(cvob1) predict(cvob1, newx = x[1:5, ], s = "lambda.min") title("Gaussian Family", line = 2.5) set.seed(1011) cvob1a = cv.glmnet(x, y, type.measure = "mae") plot(cvob1a) title("Gaussian Family", line = 2.5) set.seed(1011) par(mfrow = c(2, 2), mar = c(4.5, 4.5, 4, 1)) cvob2 = cv.glmnet(x, ly, family = "binomial") plot(cvob2) title("Binomial Family", line = 2.5) frame() set.seed(1011) cvob3 = cv.glmnet(x, ly, family = "binomial", type.measure = "class") plot(cvob3) title("Binomial Family", line = 2.5) \dontrun{ cvob1r = cv.glmnet(x, y, relax = TRUE) plot(cvob1r) predict(cvob1r, newx = x[, 1:5]) set.seed(1011) cvob3a = cv.glmnet(x, ly, family = "binomial", type.measure = "auc") plot(cvob3a) title("Binomial Family", line = 2.5) set.seed(1011) mu = exp(fx/10) y = rpois(n, mu) cvob4 = cv.glmnet(x, y, family = "poisson") plot(cvob4) title("Poisson Family", line = 2.5) # Multinomial n = 500 p = 30 nzc = trunc(p/10) x = matrix(rnorm(n * p), n, p) beta3 = matrix(rnorm(30), 10, 3) beta3 = rbind(beta3, matrix(0, p - 10, 3)) f3 = x \%*\% beta3 p3 = exp(f3) p3 = p3/apply(p3, 1, sum) g3 = glmnet:::rmult(p3) set.seed(10101) cvfit = cv.glmnet(x, g3, family = "multinomial") plot(cvfit) title("Multinomial Family", line = 2.5) # Cox beta = rnorm(nzc) fx = x[, seq(nzc)] \%*\% beta/3 hx = exp(fx) ty = rexp(n, hx) tcens = rbinom(n = n, prob = 0.3, size = 1) # censoring indicator y = cbind(time = ty, status = 1 - tcens) # y=Surv(ty,1-tcens) with library(survival) foldid = sample(rep(seq(10), length = n)) fit1_cv = cv.glmnet(x, y, family = "cox", foldid = foldid) plot(fit1_cv) title("Cox Family", line = 2.5) # Parallel require(doMC) registerDoMC(cores = 4) x = matrix(rnorm(1e+05 * 100), 1e+05, 100) y = rnorm(1e+05) system.time(cv.glmnet(x, y)) system.time(cv.glmnet(x, y, parallel = TRUE)) } } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent}, \url{https://web.stanford.edu/~hastie/Papers/glmnet.pdf}\cr \emph{Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010}\cr \url{https://www.jstatsoft.org/v33/i01/}\cr Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5) 1-13}\cr \url{https://www.jstatsoft.org/v39/i05/} } \seealso{ \code{glmnet} and \code{plot}, \code{predict}, and \code{coef} methods for \code{"cv.glmnet"} and \code{"cv.relaxed"} objects. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Noah Simon helped develop the 'coxnet' function.\cr Jeffrey Wong and B. Narasimhan helped with the parallel option\cr Maintainer: Trevor Hastie \email{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/MultiGaussianExample.Rd0000644000175000017500000000105514046314073017040 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{MultiGaussianExample} \alias{MultiGaussianExample} \title{Synthetic dataset with multiple Gaussian responses} \format{ List containing the following elements: \describe{ \item{x}{100 by 20 matrix of numeric values.} \item{y}{100 by 4 matrix of numeric values, each column representing one response vector.} } } \usage{ data(MultiGaussianExample) } \description{ Randomly generated data for multi-response Gaussian regression example. } \keyword{data} glmnet/man/predict.glmnet.Rd0000644000175000017500000001272313775432176015677 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coef.glmnet.R, R/predict.glmnet.R, % R/predict.relaxed.R \name{coef.glmnet} \alias{coef.glmnet} \alias{predict.glmnet} \alias{coef.relaxed} \alias{predict.relaxed} \alias{predict.elnet} \alias{predict.lognet} \alias{predict.multnet} \alias{predict.mrelnet} \alias{predict.fishnet} \alias{predict.coxnet} \title{Extract coefficients from a glmnet object} \usage{ \method{coef}{glmnet}(object, s = NULL, exact = FALSE, ...) \method{predict}{glmnet}( object, newx, s = NULL, type = c("link", "response", "coefficients", "nonzero", "class"), exact = FALSE, newoffset, ... ) \method{predict}{relaxed}( object, newx, s = NULL, gamma = 1, type = c("link", "response", "coefficients", "nonzero", "class"), exact = FALSE, newoffset, ... ) } \arguments{ \item{object}{Fitted \code{"glmnet"} model object or a \code{"relaxed"} model (which inherits from class "glmnet").} \item{s}{Value(s) of the penalty parameter \code{lambda} at which predictions are required. Default is the entire sequence used to create the model.} \item{exact}{This argument is relevant only when predictions are made at values of \code{s} (lambda) \emph{different} from those used in the fitting of the original model. Not available for \code{"relaxed"} objects. If \code{exact=FALSE} (default), then the predict function uses linear interpolation to make predictions for values of \code{s} (lambda) that do not coincide with those used in the fitting algorithm. While this is often a good approximation, it can sometimes be a bit coarse. With \code{exact=TRUE}, these different values of \code{s} are merged (and sorted) with \code{object$lambda}, and the model is refit before predictions are made. In this case, it is required to supply the original data \code{x=} and \code{y=} as additional named arguments to \code{predict()} or \code{coef()}. The workhorse \code{predict.glmnet()} needs to \code{update} the model, and so needs the data used to create it. The same is true of \code{weights}, \code{offset}, \code{penalty.factor}, \code{lower.limits}, \code{upper.limits} if these were used in the original call. Failure to do so will result in an error.} \item{\dots}{This is the mechanism for passing arguments like \code{x=} when \code{exact=TRUE}; see\code{exact} argument.} \item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix; can be sparse as in \code{Matrix} package. This argument is not used for \code{type=c("coefficients","nonzero")}} \item{type}{Type of prediction required. Type \code{"link"} gives the linear predictors for \code{"binomial"}, \code{"multinomial"}, \code{"poisson"} or \code{"cox"} models; for \code{"gaussian"} models it gives the fitted values. Type \code{"response"} gives the fitted probabilities for \code{"binomial"} or \code{"multinomial"}, fitted mean for \code{"poisson"} and the fitted relative-risk for \code{"cox"}; for \code{"gaussian"} type \code{"response"} is equivalent to type \code{"link"}. Type \code{"coefficients"} computes the coefficients at the requested values for \code{s}. Note that for \code{"binomial"} models, results are returned only for the class corresponding to the second level of the factor response. Type \code{"class"} applies only to \code{"binomial"} or \code{"multinomial"} models, and produces the class label corresponding to the maximum probability. Type \code{"nonzero"} returns a list of the indices of the nonzero coefficients for each value of \code{s}.} \item{newoffset}{If an offset is used in the fit, then one must be supplied for making predictions (except for \code{type="coefficients"} or \code{type="nonzero"})} \item{gamma}{Single value of \code{gamma} at which predictions are required, for "relaxed" objects.} } \value{ The object returned depends on type. } \description{ Similar to other predict methods, this functions predicts fitted values, logits, coefficients and more from a fitted \code{"glmnet"} object. } \details{ The shape of the objects returned are different for \code{"multinomial"} objects. This function actually calls \code{NextMethod()}, and the appropriate predict method is invoked for each of the three model types. \code{coef(...)} is equivalent to \code{predict(type="coefficients",...)} } \examples{ x=matrix(rnorm(100*20),100,20) y=rnorm(100) g2=sample(1:2,100,replace=TRUE) g4=sample(1:4,100,replace=TRUE) fit1=glmnet(x,y) predict(fit1,newx=x[1:5,],s=c(0.01,0.005)) predict(fit1,type="coef") fit2=glmnet(x,g2,family="binomial") predict(fit2,type="response",newx=x[2:5,]) predict(fit2,type="nonzero") fit3=glmnet(x,g4,family="multinomial") predict(fit3,newx=x[1:3,],type="response",s=0.01) } \references{ Friedman, J., Hastie, T. and Tibshirani, R. (2008) \emph{Regularization Paths for Generalized Linear Models via Coordinate Descent}, \url{https://web.stanford.edu/~hastie/Papers/glmnet.pdf}\cr \emph{Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010}\cr \url{https://www.jstatsoft.org/v33/i01/}\cr Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) \emph{Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent, Journal of Statistical Software, Vol. 39(5) 1-13}\cr \url{https://www.jstatsoft.org/v39/i05/} } \seealso{ \code{glmnet}, and \code{print}, and \code{coef} methods, and \code{cv.glmnet}. } \author{ Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr Maintainer: Trevor Hastie \href{mailto:hastie@stanford.edu}{hastie@stanford.edu} } \keyword{models} \keyword{regression} glmnet/man/stratifySurv.Rd0000644000175000017500000000230114013330131015442 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stratifySurv.R \name{stratifySurv} \alias{stratifySurv} \title{Add strata to a Surv object} \usage{ stratifySurv(y, strata = rep(1, length(y))) } \arguments{ \item{y}{A Surv object.} \item{strata}{A vector of length equal to the number of observations in y, indicating strata membership. Default is all belong to same strata.} } \value{ An object of class \code{stratifySurv} (in addition to all the classes \code{y} belonged to). } \description{ Helper function to add strata as an attribute to a Surv object. The output of this function can be used as the response in \code{glmnet()} for fitting stratified Cox models. } \details{ When fitting a stratified Cox model with \code{glmnet()}, strata should be added to a \code{Surv} response with this helper function. Note that it is not sufficient to add strata as an attribute to the \code{Surv} response manually: if the result does not have class \code{stratifySurv}, subsetting of the response will not work properly. } \examples{ y <- survival::Surv(1:10, rep(0:1, length.out = 10)) strata <- rep(1:3, length.out = 10) y2 <- stratifySurv(y, strata) # returns stratifySurv object } glmnet/src/0000755000175000017500000000000014140271174012462 5ustar nileshnileshglmnet/src/internal_params.h0000644000175000017500000000026314140040573016010 0ustar nileshnilesh#pragma once struct InternalParamsExp { double sml; double eps; double big; int mnlam; double rsqmax; double pmin; double exmx; int itrace; }; glmnet/src/glmnet5dpclean.f0000644000175000017500000173723414065203754015561 0ustar nileshnileshc mortran 2.0 (version of 7/04/75 mod 7/4/87 (ajc)) subroutine get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace *) implicit double precision(a-h,o-z) data sml0,eps0,big0,mnlam0,rsqmax0,pmin0,exmx0,itrace0 /1.0d-5,1. *0d-6,9.9d35,5,0.999,1.0d-9,250.0,0/ sml=sml0 eps=eps0 big=big0 mnlam=mnlam0 rsqmax=rsqmax0 pmin=pmin0 exmx=exmx0 itrace=itrace0 return entry chg_fract_dev(arg) sml0=arg return entry chg_dev_max(arg) rsqmax0=arg return entry chg_min_flmin(arg) eps0=arg return entry chg_big(arg) big0=arg return entry chg_min_lambdas(irg) mnlam0=irg return entry chg_min_null_prob(arg) pmin0=arg return entry chg_max_exp(arg) exmx0=arg return entry chg_itrace(irg) itrace0=irg return end subroutine elnet(ka,parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam, flmin,u *lam,thr,isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),vp(ni),ca(nx,nlam),cl(2,ni) double precision ulam(nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 10021 jerr=10000 return 10021 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) if(ka .ne. 1)goto 10041 call elnetu (parm,no,ni,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam,thr, *isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) goto 10051 10041 continue call elnetn (parm,no,ni,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam,thr,i *sd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) 10051 continue continue deallocate(vq) return end subroutine elnetu(parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam, flmin,ula *m,thr,isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,g,xv,vlam integer, dimension (:), allocatable :: ju allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 10071 jerr=7777 return 10071 continue call standard(no,ni,x,y,w,isd,intr,ju,g,xm,xs,ym,ys,xv,jerr) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 10091 do 10101 j=1,ni cl(:,j)=cl(:,j)*xs(j) 10101 continue continue 10091 continue if(flmin.ge.1.0) vlam=ulam/ys call elnet1(parm,ni,ju,vp,cl,g,no,ne,nx,x,nlam,flmin,vlam,thr,maxi *t,xv, lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 10111 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 10121 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 10121 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 10111 continue continue deallocate(xm,xs,g,ju,xv,vlam) return end subroutine standard(no,ni,x,y,w,isd,intr,ju,g,xm,xs,ym,ys,xv,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),g(ni),xm(ni),xs(ni),xv(ni) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 10141 ym=0.0 y=v*y ys=sqrt(dot_product(y,y)) y=y/ys do 10151 j=1,ni if(ju(j).eq.0)goto 10151 xm(j)=0.0 x(:,j)=v*x(:,j) xv(j)=dot_product(x(:,j),x(:,j)) if(isd .eq. 0)goto 10171 xbq=dot_product(v,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 10181 10171 continue xs(j)=1.0 10181 continue continue 10151 continue continue goto 10191 10141 continue do 10201 j=1,ni if(ju(j).eq.0)goto 10201 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 10201 continue continue if(isd .ne. 0)goto 10221 xs=1.0 goto 10231 10221 continue do 10241 j=1,ni if(ju(j).eq.0)goto 10241 x(:,j)=x(:,j)/xs(j) 10241 continue continue xv=1.0 10231 continue continue ym=dot_product(w,y) y=v*(y-ym) ys=sqrt(dot_product(y,y)) y=y/ys 10191 continue continue g=0.0 do 10251 j=1,ni if(ju(j).ne.0) g(j)=dot_product(y,x(:,j)) 10251 continue continue deallocate(v) return end subroutine elnet1(beta,ni,ju,vp,cl,g,no,ne,nx,x,nlam,flmin,ulam,th *r,maxit,xv, lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),g(ni),x(no,ni),ulam(nlam),ao(nx,nlam) double precision rsqo(nlam),almo(nlam),xv(ni) double precision cl(2,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,da integer, dimension (:), allocatable :: mm double precision, dimension (:,:), allocatable :: c allocate(c(1:ni,1:nx),stat=jerr) if(jerr.ne.0) return; call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(da(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 alf=1.0 if(flmin .ge. 1.0)goto 10271 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 10271 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 10281 m=1,nlam if(itrace.ne.0) call setpb(m-1) if(flmin .lt. 1.0)goto 10301 alm=ulam(m) goto 10291 10301 if(m .le. 2)goto 10311 alm=alm*alf goto 10291 10311 if(m .ne. 1)goto 10321 alm=big goto 10331 10321 continue alm=0.0 do 10341 j=1,ni if(ju(j).eq.0)goto 10341 if(vp(j).le.0.0)goto 10341 alm=max(alm,abs(g(j))/vp(j)) 10341 continue continue alm=alf*alm/max(bta,1.0d-3) 10331 continue 10291 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 continue 10351 continue if(iz*jz.ne.0) go to 10360 nlp=nlp+1 dlx=0.0 do 10371 k=1,ni if(ju(k).eq.0)goto 10371 ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10371 if(mm(k) .ne. 0)goto 10391 nin=nin+1 if(nin.gt.nx)goto 10372 do 10401 j=1,ni if(ju(j).eq.0)goto 10401 if(mm(j) .eq. 0)goto 10421 c(j,nin)=c(k,mm(j)) goto 10401 10421 continue if(j .ne. k)goto 10441 c(j,nin)=xv(j) goto 10401 10441 continue c(j,nin)=dot_product(x(:,j),x(:,k)) 10401 continue continue mm(k)=nin ia(nin)=k 10391 continue del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 10451 j=1,ni if(ju(j).ne.0) g(j)=g(j)-c(j,mm(k))*del 10451 continue continue 10371 continue 10372 continue if(dlx.lt.thr)goto 10352 if(nin.gt.nx)goto 10352 if(nlp .le. maxit)goto 10471 jerr=-m return 10471 continue 10360 continue iz=1 da(1:nin)=a(ia(1:nin)) continue 10481 continue nlp=nlp+1 dlx=0.0 do 10491 l=1,nin k=ia(l) ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10491 del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 10501 j=1,nin g(ia(j))=g(ia(j))-c(ia(j),mm(k))*del 10501 continue continue 10491 continue continue if(dlx.lt.thr)goto 10482 if(nlp .le. maxit)goto 10521 jerr=-m return 10521 continue goto 10481 10482 continue da(1:nin)=a(ia(1:nin))-da(1:nin) do 10531 j=1,ni if(mm(j).ne.0)goto 10531 if(ju(j).ne.0) g(j)=g(j)-dot_product(da(1:nin),c(j,1:nin)) 10531 continue continue jz=0 goto 10351 10352 continue if(nin .le. nx)goto 10551 jerr=-10000-m goto 10282 10551 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 10281 if(flmin.ge.1.0)goto 10281 me=0 do 10561 j=1,nin if(ao(j,m).ne.0.0) me=me+1 10561 continue continue if(me.gt.ne)goto 10282 if(rsq-rsq0.lt.sml*rsq)goto 10282 if(rsq.gt.rsqmax)goto 10282 10281 continue 10282 continue deallocate(a,mm,c,da) return end subroutine elnetn(parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam,flmin,ulam, *thr,isd, intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),x(no,ni),y(no),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,vlam integer, dimension (:), allocatable :: ju allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 10581 jerr=7777 return 10581 continue call standard1(no,ni,x,y,w,isd,intr,ju,xm,xs,ym,ys,xv,jerr) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 10601 do 10611 j=1,ni cl(:,j)=cl(:,j)*xs(j) 10611 continue continue 10601 continue if(flmin.ge.1.0) vlam=ulam/ys call elnet2(parm,ni,ju,vp,cl,y,no,ne,nx,x,nlam,flmin,vlam,thr,maxi *t,xv, lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 10621 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 10631 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 10631 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 10621 continue continue deallocate(xm,xs,ju,xv,vlam) return end subroutine standard1(no,ni,x,y,w,isd,intr,ju,xm,xs,ym,ys,xv,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),xm(ni),xs(ni),xv(ni) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 10651 ym=0.0 y=v*y ys=sqrt(dot_product(y,y)) y=y/ys do 10661 j=1,ni if(ju(j).eq.0)goto 10661 xm(j)=0.0 x(:,j)=v*x(:,j) xv(j)=dot_product(x(:,j),x(:,j)) if(isd .eq. 0)goto 10681 xbq=dot_product(v,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 10691 10681 continue xs(j)=1.0 10691 continue continue 10661 continue continue go to 10700 10651 continue do 10711 j=1,ni if(ju(j).eq.0)goto 10711 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 10711 continue continue if(isd .ne. 0)goto 10731 xs=1.0 goto 10741 10731 continue do 10751 j=1,ni if(ju(j).eq.0)goto 10751 x(:,j)=x(:,j)/xs(j) 10751 continue continue xv=1.0 10741 continue continue ym=dot_product(w,y) y=v*(y-ym) ys=sqrt(dot_product(y,y)) y=y/ys 10700 continue deallocate(v) return end subroutine elnet2(beta,ni,ju,vp,cl,y,no,ne,nx,x,nlam,flmin,ulam,th *r,maxit,xv, lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),y(no),x(no,ni),ulam(nlam),ao(nx,nlam) double precision rsqo(nlam),almo(nlam),xv(ni) double precision cl(2,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,g integer, dimension (:), allocatable :: mm,ix call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ix(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta ix=0 alf=1.0 if(flmin .ge. 1.0)goto 10771 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 10771 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) alm=0.0 do 10781 j=1,ni if(ju(j).eq.0)goto 10781 g(j)=abs(dot_product(y,x(:,j))) 10781 continue continue do 10791 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 10811 alm=ulam(m) goto 10801 10811 if(m .le. 2)goto 10821 alm=alm*alf goto 10801 10821 if(m .ne. 1)goto 10831 alm=big goto 10841 10831 continue alm0=0.0 do 10851 j=1,ni if(ju(j).eq.0)goto 10851 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 10851 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 10841 continue 10801 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 10861 k=1,ni if(ix(k).eq.1)goto 10861 if(ju(k).eq.0)goto 10861 if(g(k).gt.tlam*vp(k)) ix(k)=1 10861 continue continue continue 10871 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 10901 jerr=-m return 10901 continue nlp=nlp+1 dlx=0.0 do 10911 k=1,ni if(ix(k).eq.0)goto 10911 gk=dot_product(y,x(:,k)) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10911 if(mm(k) .ne. 0)goto 10931 nin=nin+1 if(nin.gt.nx)goto 10912 mm(k)=nin ia(nin)=k 10931 continue del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y=y-del*x(:,k) dlx=max(xv(k)*del**2,dlx) 10911 continue 10912 continue if(nin.gt.nx)goto 10872 if(dlx .ge. thr)goto 10951 ixx=0 do 10961 k=1,ni if(ix(k).eq.1)goto 10961 if(ju(k).eq.0)goto 10961 g(k)=abs(dot_product(y,x(:,k))) if(g(k) .le. ab*vp(k))goto 10981 ix(k)=1 ixx=1 10981 continue 10961 continue continue if(ixx.eq.1) go to 10880 goto 10872 10951 continue if(nlp .le. maxit)goto 11001 jerr=-m return 11001 continue 10360 continue iz=1 continue 11011 continue nlp=nlp+1 dlx=0.0 do 11021 l=1,nin k=ia(l) gk=dot_product(y,x(:,k)) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11021 del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y=y-del*x(:,k) dlx=max(xv(k)*del**2,dlx) 11021 continue continue if(dlx.lt.thr)goto 11012 if(nlp .le. maxit)goto 11041 jerr=-m return 11041 continue goto 11011 11012 continue jz=0 goto 10871 10872 continue if(nin .le. nx)goto 11061 jerr=-10000-m goto 10792 11061 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 10791 if(flmin.ge.1.0)goto 10791 me=0 do 11071 j=1,nin if(ao(j,m).ne.0.0) me=me+1 11071 continue continue if(me.gt.ne)goto 10792 if(rsq-rsq0.lt.sml*rsq)goto 10792 if(rsq.gt.rsqmax)goto 10792 10791 continue 10792 continue deallocate(a,mm,g,ix) return end subroutine chkvars(no,ni,x,ju) implicit double precision(a-h,o-z) double precision x(no,ni) integer ju(ni) do 11081 j=1,ni ju(j)=0 t=x(1,j) do 11091 i=2,no if(x(i,j).eq.t)goto 11091 ju(j)=1 goto 11092 11091 continue 11092 continue 11081 continue continue return end subroutine uncomp(ni,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(*),a(ni) integer ia(*) a=0.0 if(nin.gt.0) a(ia(1:nin))=ca(1:nin) return end subroutine modval(a0,ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision ca(nin),x(n,*),f(n) integer ia(nin) f=a0 if(nin.le.0) return do 11101 i=1,n f(i)=f(i)+dot_product(ca(1:nin),x(i,ia(1:nin))) 11101 continue continue return end subroutine spelnet(ka,parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam, * flmin,ulam,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr *) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 11121 jerr=10000 return 11121 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) if(ka .ne. 1)goto 11141 call spelnetu (parm,no,ni,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,flmin,u *lam,thr,isd, intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) goto 11151 11141 continue call spelnetn (parm,no,ni,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,flmin,ul *am,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) 11151 continue continue deallocate(vq) return end subroutine spelnetu(parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam, f *lmin,ulam,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,g,xv,vlam integer, dimension (:), allocatable :: ju allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 11171 jerr=7777 return 11171 continue call spstandard(no,ni,x,ix,jx,y,w,ju,isd,intr,g,xm,xs,ym,ys,xv,jer *r) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 11191 do 11201 j=1,ni cl(:,j)=cl(:,j)*xs(j) 11201 continue continue 11191 continue if(flmin.ge.1.0) vlam=ulam/ys call spelnet1(parm,ni,g,no,w,ne,nx,x,ix,jx,ju,vp,cl,nlam,flmin,vla *m,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 11211 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 11221 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 11221 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 11211 continue continue deallocate(xm,xs,g,ju,xv,vlam) return end subroutine spstandard(no,ni,x,ix,jx,y,w,ju,isd,intr,g,xm,xs,ym,ys, *xv,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),g(ni),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 11241 ym=0.0 ys=sqrt(dot_product(w,y**2)) y=y/ys do 11251 j=1,ni if(ju(j).eq.0)goto 11251 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 11271 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 11281 11271 continue xs(j)=1.0 11281 continue continue 11251 continue continue goto 11291 11241 continue do 11301 j=1,ni if(ju(j).eq.0)goto 11301 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 11301 continue continue if(isd .ne. 0)goto 11321 xs=1.0 goto 11331 11321 continue xv=1.0 11331 continue continue ym=dot_product(w,y) y=y-ym ys=sqrt(dot_product(w,y**2)) y=y/ys 11291 continue continue g=0.0 do 11341 j=1,ni if(ju(j).eq.0)goto 11341 jb=ix(j) je=ix(j+1)-1 g(j)=dot_product(w(jx(jb:je))*y(jx(jb:je)),x(jb:je))/xs(j) 11341 continue continue return end subroutine spelnet1(beta,ni,g,no,w,ne,nx,x,ix,jx,ju,vp,cl,nlam,flm *in,ulam, thr,maxit,xm,xs,xv,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision g(ni),vp(ni),x(*),ulam(nlam),w(no) double precision ao(nx,nlam),rsqo(nlam),almo(nlam) double precision xm(ni),xs(ni),xv(ni),cl(2,ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,da integer, dimension (:), allocatable :: mm double precision, dimension (:,:), allocatable :: c allocate(c(1:ni,1:nx),stat=jerr) if(jerr.ne.0) return; call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(da(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 alf=1.0 if(flmin .ge. 1.0)goto 11361 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 11361 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 11371 m=1,nlam if(itrace.ne.0) call setpb(m-1) if(flmin .lt. 1.0)goto 11391 alm=ulam(m) goto 11381 11391 if(m .le. 2)goto 11401 alm=alm*alf goto 11381 11401 if(m .ne. 1)goto 11411 alm=big goto 11421 11411 continue alm=0.0 do 11431 j=1,ni if(ju(j).eq.0)goto 11431 if(vp(j).le.0.0)goto 11431 alm=max(alm,abs(g(j))/vp(j)) 11431 continue continue alm=alf*alm/max(bta,1.0d-3) 11421 continue 11381 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 continue 11441 continue if(iz*jz.ne.0) go to 10360 nlp=nlp+1 dlx=0.0 do 11451 k=1,ni if(ju(k).eq.0)goto 11451 ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11451 if(mm(k) .ne. 0)goto 11471 nin=nin+1 if(nin.gt.nx)goto 11452 do 11481 j=1,ni if(ju(j).eq.0)goto 11481 if(mm(j) .eq. 0)goto 11501 c(j,nin)=c(k,mm(j)) goto 11481 11501 continue if(j .ne. k)goto 11521 c(j,nin)=xv(j) goto 11481 11521 continue c(j,nin)= (row_prod(j,k,ix,jx,x,w)-xm(j)*xm(k))/(xs(j)*xs(k)) 11481 continue continue mm(k)=nin ia(nin)=k 11471 continue del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 11531 j=1,ni if(ju(j).ne.0) g(j)=g(j)-c(j,mm(k))*del 11531 continue continue 11451 continue 11452 continue if(dlx.lt.thr)goto 11442 if(nin.gt.nx)goto 11442 if(nlp .le. maxit)goto 11551 jerr=-m return 11551 continue 10360 continue iz=1 da(1:nin)=a(ia(1:nin)) continue 11561 continue nlp=nlp+1 dlx=0.0 do 11571 l=1,nin k=ia(l) ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11571 del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 11581 j=1,nin g(ia(j))=g(ia(j))-c(ia(j),mm(k))*del 11581 continue continue 11571 continue continue if(dlx.lt.thr)goto 11562 if(nlp .le. maxit)goto 11601 jerr=-m return 11601 continue goto 11561 11562 continue da(1:nin)=a(ia(1:nin))-da(1:nin) do 11611 j=1,ni if(mm(j).ne.0)goto 11611 if(ju(j).ne.0) g(j)=g(j)-dot_product(da(1:nin),c(j,1:nin)) 11611 continue continue jz=0 goto 11441 11442 continue if(nin .le. nx)goto 11631 jerr=-10000-m goto 11372 11631 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 11371 if(flmin.ge.1.0)goto 11371 me=0 do 11641 j=1,nin if(ao(j,m).ne.0.0) me=me+1 11641 continue continue if(me.gt.ne)goto 11372 if(rsq-rsq0.lt.sml*rsq)goto 11372 if(rsq.gt.rsqmax)goto 11372 11371 continue 11372 continue deallocate(a,mm,c,da) return end subroutine spelnetn(parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam,flm *in,ulam, thr,isd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),vp(ni),y(no),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,vlam integer, dimension (:), allocatable :: ju allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 11661 jerr=7777 return 11661 continue call spstandard1(no,ni,x,ix,jx,y,w,ju,isd,intr,xm,xs,ym,ys,xv,jerr *) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 11681 do 11691 j=1,ni cl(:,j)=cl(:,j)*xs(j) 11691 continue continue 11681 continue if(flmin.ge.1.0) vlam=ulam/ys call spelnet2(parm,ni,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,nlam,flmin,vla *m,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 11701 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 11711 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 11711 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 11701 continue continue deallocate(xm,xs,ju,xv,vlam) return end subroutine spstandard1(no,ni,x,ix,jx,y,w,ju,isd,intr,xm,xs,ym,ys,x *v,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 11731 ym=0.0 ys=sqrt(dot_product(w,y**2)) y=y/ys do 11741 j=1,ni if(ju(j).eq.0)goto 11741 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 11761 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 11771 11761 continue xs(j)=1.0 11771 continue continue 11741 continue continue return 11731 continue do 11781 j=1,ni if(ju(j).eq.0)goto 11781 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 11781 continue continue if(isd .ne. 0)goto 11801 xs=1.0 goto 11811 11801 continue xv=1.0 11811 continue continue ym=dot_product(w,y) y=y-ym ys=sqrt(dot_product(w,y**2)) y=y/ys return end subroutine spelnet2(beta,ni,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,nlam,flm *in,ulam, thr,maxit,xm,xs,xv,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision y(no),w(no),x(*),vp(ni),ulam(nlam),cl(2,ni) double precision ao(nx,nlam),rsqo(nlam),almo(nlam),xm(ni),xs(ni),x *v(ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,g integer, dimension (:), allocatable :: mm,iy call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 iy=0 alf=1.0 if(flmin .ge. 1.0)goto 11831 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 11831 continue rsq=0.0 a=0.0 mm=0 o=0.0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 11841 j=1,ni if(ju(j).eq.0)goto 11841 jb=ix(j) je=ix(j+1)-1 g(j)=abs(dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(j)) 11841 continue continue do 11851 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 11871 alm=ulam(m) goto 11861 11871 if(m .le. 2)goto 11881 alm=alm*alf goto 11861 11881 if(m .ne. 1)goto 11891 alm=big goto 11901 11891 continue alm0=0.0 do 11911 j=1,ni if(ju(j).eq.0)goto 11911 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 11911 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 11901 continue 11861 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 11921 k=1,ni if(iy(k).eq.1)goto 11921 if(ju(k).eq.0)goto 11921 if(g(k).gt.tlam*vp(k)) iy(k)=1 11921 continue continue continue 11931 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 11951 jerr=-m return 11951 continue nlp=nlp+1 dlx=0.0 do 11961 k=1,ni if(iy(k).eq.0)goto 11961 jb=ix(k) je=ix(k+1)-1 gk=dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(k) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11961 if(mm(k) .ne. 0)goto 11981 nin=nin+1 if(nin.gt.nx)goto 11962 mm(k)=nin ia(nin)=k 11981 continue del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y(jx(jb:je))=y(jx(jb:je))-del*x(jb:je)/xs(k) o=o+del*xm(k)/xs(k) dlx=max(xv(k)*del**2,dlx) 11961 continue 11962 continue if(nin.gt.nx)goto 11932 if(dlx .ge. thr)goto 12001 ixx=0 do 12011 j=1,ni if(iy(j).eq.1)goto 12011 if(ju(j).eq.0)goto 12011 jb=ix(j) je=ix(j+1)-1 g(j)=abs(dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(j)) if(g(j) .le. ab*vp(j))goto 12031 iy(j)=1 ixx=1 12031 continue 12011 continue continue if(ixx.eq.1) go to 10880 goto 11932 12001 continue if(nlp .le. maxit)goto 12051 jerr=-m return 12051 continue 10360 continue iz=1 continue 12061 continue nlp=nlp+1 dlx=0.0 do 12071 l=1,nin k=ia(l) jb=ix(k) je=ix(k+1)-1 gk=dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(k) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 12071 del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y(jx(jb:je))=y(jx(jb:je))-del*x(jb:je)/xs(k) o=o+del*xm(k)/xs(k) dlx=max(xv(k)*del**2,dlx) 12071 continue continue if(dlx.lt.thr)goto 12062 if(nlp .le. maxit)goto 12091 jerr=-m return 12091 continue goto 12061 12062 continue jz=0 goto 11931 11932 continue if(nin .le. nx)goto 12111 jerr=-10000-m goto 11852 12111 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 11851 if(flmin.ge.1.0)goto 11851 me=0 do 12121 j=1,nin if(ao(j,m).ne.0.0) me=me+1 12121 continue continue if(me.gt.ne)goto 11852 if(rsq-rsq0.lt.sml*rsq)goto 11852 if(rsq.gt.rsqmax)goto 11852 11851 continue 11852 continue deallocate(a,mm,g,iy) return end subroutine spchkvars(no,ni,x,ix,ju) implicit double precision(a-h,o-z) double precision x(*) integer ix(*),ju(ni) do 12131 j=1,ni ju(j)=0 jb=ix(j) nj=ix(j+1)-jb if(nj.eq.0)goto 12131 je=ix(j+1)-1 if(nj .ge. no)goto 12151 do 12161 i=jb,je if(x(i).eq.0.0)goto 12161 ju(j)=1 goto 12162 12161 continue 12162 continue goto 12171 12151 continue t=x(jb) do 12181 i=jb+1,je if(x(i).eq.t)goto 12181 ju(j)=1 goto 12182 12181 continue 12182 continue 12171 continue continue 12131 continue continue return end subroutine cmodval(a0,ca,ia,nin,x,ix,jx,n,f) implicit double precision(a-h,o-z) double precision ca(*),x(*),f(n) integer ia(*),ix(*),jx(*) f=a0 do 12191 j=1,nin k=ia(j) kb=ix(k) ke=ix(k+1)-1 f(jx(kb:ke))=f(jx(kb:ke))+ca(j)*x(kb:ke) 12191 continue continue return end function row_prod(i,j,ia,ja,ra,w) implicit double precision(a-h,o-z) integer ia(*),ja(*) double precision ra(*),w(*) row_prod=dot(ra(ia(i)),ra(ia(j)),ja(ia(i)),ja(ia(j)), ia(i+1)-ia( *i),ia(j+1)-ia(j),w) return end function dot(x,y,mx,my,nx,ny,w) implicit double precision(a-h,o-z) double precision x(*),y(*),w(*) integer mx(*),my(*) i=1 j=i s=0.0 continue 12201 continue continue 12211 if(mx(i).ge.my(j))goto 12212 i=i+1 if(i.gt.nx) go to 12220 goto 12211 12212 continue if(mx(i).eq.my(j)) go to 12230 continue 12241 if(my(j).ge.mx(i))goto 12242 j=j+1 if(j.gt.ny) go to 12220 goto 12241 12242 continue if(mx(i).eq.my(j)) go to 12230 goto 12201 12230 continue s=s+w(mx(i))*x(i)*y(j) i=i+1 if(i.gt.nx)goto 12202 j=j+1 if(j.gt.ny)goto 12202 goto 12201 12202 continue 12220 continue dot=s return end subroutine lognet(parm,no,ni,nc,x,y,g,jd,vp,cl,ne,nx,nlam,flmin,ul *am,thr, isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,max(2,nc)),g(no,nc),vp(ni),ulam(nla *m) double precision ca(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl *(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq,xv integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 12261 jerr=10000 return 12261 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return if(kopt .ne. 2)goto 12281 allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return 12281 continue if(isd .le. 0)goto 12301 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 12301 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 12321 jerr=7777 return 12321 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) do 12331 i=1,no ww(i)=sum(y(i,:)) if(ww(i).gt.0.0) y(i,:)=y(i,:)/ww(i) 12331 continue continue sw=sum(ww) ww=ww/sw if(nc .ne. 1)goto 12351 call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 12371 do 12381 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12381 continue continue 12371 continue call lognet2n(parm,no,ni,x,y(:,1),g(:,1),ww,ju,vq,cl,ne,nx,nlam,fl *min,ulam, thr,isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,n *lp,jerr) goto 12341 12351 if(kopt .ne. 2)goto 12391 call multlstandard1(no,ni,x,ww,ju,isd,intr,xm,xs,xv) if(isd .le. 0)goto 12411 do 12421 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12421 continue continue 12411 continue call multlognetn(parm,no,ni,nc,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin, *ulam,thr, intr,maxit,xv,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) goto 12431 12391 continue call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 12451 do 12461 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12461 continue continue 12451 continue call lognetn(parm,no,ni,nc,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam *,thr, isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) 12431 continue 12341 continue if(jerr.gt.0) return dev0=2.0*sw*dev0 do 12471 k=1,lmu nk=nin(k) do 12481 ic=1,nc if(isd .le. 0)goto 12501 do 12511 l=1,nk ca(l,ic,k)=ca(l,ic,k)/xs(ia(l)) 12511 continue continue 12501 continue if(intr .ne. 0)goto 12531 a0(ic,k)=0.0 goto 12541 12531 continue a0(ic,k)=a0(ic,k)-dot_product(ca(1:nk,ic,k),xm(ia(1:nk))) 12541 continue continue 12481 continue continue 12471 continue continue deallocate(ww,ju,vq,xm) if(isd.gt.0) deallocate(xs) if(kopt.eq.2) deallocate(xv) return end subroutine lstandard1(no,ni,x,w,ju,isd,intr,xm,xs) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xm(ni),xs(ni) integer ju(ni) if(intr .ne. 0)goto 12561 do 12571 j=1,ni if(ju(j).eq.0)goto 12571 xm(j)=0.0 if(isd .eq. 0)goto 12591 vc=dot_product(w,x(:,j)**2)-dot_product(w,x(:,j))**2 xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) 12591 continue 12571 continue continue return 12561 continue do 12601 j=1,ni if(ju(j).eq.0)goto 12601 xm(j)=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm(j) if(isd .le. 0)goto 12621 xs(j)=sqrt(dot_product(w,x(:,j)**2)) x(:,j)=x(:,j)/xs(j) 12621 continue 12601 continue continue return end subroutine multlstandard1(no,ni,x,w,ju,isd,intr,xm,xs,xv) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xm(ni),xs(ni),xv(ni) integer ju(ni) if(intr .ne. 0)goto 12641 do 12651 j=1,ni if(ju(j).eq.0)goto 12651 xm(j)=0.0 xv(j)=dot_product(w,x(:,j)**2) if(isd .eq. 0)goto 12671 xbq=dot_product(w,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc 12671 continue 12651 continue continue return 12641 continue do 12681 j=1,ni if(ju(j).eq.0)goto 12681 xm(j)=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm(j) xv(j)=dot_product(w,x(:,j)**2) if(isd .le. 0)goto 12701 xs(j)=sqrt(xv(j)) x(:,j)=x(:,j)/xs(j) xv(j)=1.0 12701 continue 12681 continue continue return end subroutine lognet2n(parm,no,ni,x,y,g,w,ju,vp,cl,ne,nx,nlam,flmin,u *lam,shri, isd,intr,maxit,kopt,lmu,a0,a,m,kin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2 *,ni) double precision a(nx,nlam),a0(nlam),dev(nlam),alm(nlam) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: b,bs,v,r,xv,q,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) allocate(b(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return fmax=log(1.0/pmin-1.0) fmin=-fmax vmin=(1.0+pmin)*pmin*(1.0-pmin) bta=parm omb=1.0-bta q0=dot_product(w,y) if(q0 .gt. pmin)goto 12721 jerr=8001 return 12721 continue if(q0 .lt. 1.0-pmin)goto 12741 jerr=9001 return 12741 continue if(intr.eq.0.0) q0=0.5 ixx=0 al=0.0 bz=0.0 if(intr.ne.0) bz=log(q0/(1.0-q0)) if(nonzero(no,g) .ne. 0)goto 12761 vi=q0*(1.0-q0) b(0)=bz v=vi*w r=w*(y-q0) q=q0 xmz=vi dev1=-(bz*q0+log(1.0-q0)) goto 12771 12761 continue b(0)=0.0 if(intr .eq. 0)goto 12791 b(0)=azero(no,y,g,w,jerr) if(jerr.ne.0) return 12791 continue q=1.0/(1.0+exp(-b(0)-g)) v=w*q*(1.0-q) r=w*(y-q) xmz=sum(v) dev1=-(b(0)*q0+dot_product(w,y*g+log(1.0-q))) 12771 continue continue if(kopt .le. 0)goto 12811 if(isd .le. 0 .or. intr .eq. 0)goto 12831 xv=0.25 goto 12841 12831 continue do 12851 j=1,ni if(ju(j).ne.0) xv(j)=0.25*dot_product(w,x(:,j)**2) 12851 continue continue 12841 continue continue 12811 continue dev0=dev1 do 12861 i=1,no if(y(i).gt.0.0) dev0=dev0+w(i)*y(i)*log(y(i)) if(y(i).lt.1.0) dev0=dev0+w(i)*(1.0-y(i))*log(1.0-y(i)) 12861 continue continue alf=1.0 if(flmin .ge. 1.0)goto 12881 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 12881 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) bs=0.0 b(1:ni)=0.0 shr=shri*dev0 do 12891 j=1,ni if(ju(j).eq.0)goto 12891 ga(j)=abs(dot_product(r,x(:,j))) 12891 continue continue do 12901 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 12921 al=ulam(ilm) goto 12911 12921 if(ilm .le. 2)goto 12931 al=al*alf goto 12911 12931 if(ilm .ne. 1)goto 12941 al=big goto 12951 12941 continue al0=0.0 do 12961 j=1,ni if(ju(j).eq.0)goto 12961 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 12961 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 12951 continue 12911 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 12971 k=1,ni if(ixx(k).eq.1)goto 12971 if(ju(k).eq.0)goto 12971 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 12971 continue continue 10880 continue continue 12981 continue if(nlp .le. maxit)goto 13001 jerr=-ilm return 13001 continue bs(0)=b(0) if(nin.gt.0) bs(m(1:nin))=b(m(1:nin)) if(kopt .ne. 0)goto 13021 do 13031 j=1,ni if(ixx(j).gt.0) xv(j)=dot_product(v,x(:,j)**2) 13031 continue continue 13021 continue continue 13041 continue nlp=nlp+1 dlx=0.0 do 13051 k=1,ni if(ixx(k).eq.0)goto 13051 bk=b(k) gk=dot_product(r,x(:,k)) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13071 b(k)=0.0 goto 13081 13071 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 13081 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 13051 dlx=max(dlx,xv(k)*d**2) r=r-d*v*x(:,k) if(mm(k) .ne. 0)goto 13101 nin=nin+1 if(nin.gt.nx)goto 13052 mm(k)=nin m(nin)=k 13101 continue 13051 continue 13052 continue if(nin.gt.nx)goto 13042 d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 13121 b(0)=b(0)+d dlx=max(dlx,xmz*d**2) r=r-d*v 13121 continue if(dlx.lt.shr)goto 13042 if(nlp .le. maxit)goto 13141 jerr=-ilm return 13141 continue continue 13151 continue nlp=nlp+1 dlx=0.0 do 13161 l=1,nin k=m(l) bk=b(k) gk=dot_product(r,x(:,k)) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13181 b(k)=0.0 goto 13191 13181 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 13191 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 13161 dlx=max(dlx,xv(k)*d**2) r=r-d*v*x(:,k) 13161 continue continue d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 13211 b(0)=b(0)+d dlx=max(dlx,xmz*d**2) r=r-d*v 13211 continue if(dlx.lt.shr)goto 13152 if(nlp .le. maxit)goto 13231 jerr=-ilm return 13231 continue goto 13151 13152 continue goto 13041 13042 continue if(nin.gt.nx)goto 12982 do 13241 i=1,no fi=b(0)+g(i) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin)),x(i,m(1:nin))) if(fi .ge. fmin)goto 13261 q(i)=0.0 goto 13251 13261 if(fi .le. fmax)goto 13271 q(i)=1.0 goto 13281 13271 continue q(i)=1.0/(1.0+exp(-fi)) 13281 continue 13251 continue 13241 continue continue v=w*q*(1.0-q) xmz=sum(v) if(xmz.le.vmin)goto 12982 r=w*(y-q) if(xmz*(b(0)-bs(0))**2 .ge. shr)goto 13301 ix=0 do 13311 j=1,nin k=m(j) if(xv(k)*(b(k)-bs(k))**2.lt.shr)goto 13311 ix=1 goto 13312 13311 continue 13312 continue if(ix .ne. 0)goto 13331 do 13341 k=1,ni if(ixx(k).eq.1)goto 13341 if(ju(k).eq.0)goto 13341 ga(k)=abs(dot_product(r,x(:,k))) if(ga(k) .le. al1*vp(k))goto 13361 ixx(k)=1 ix=1 13361 continue 13341 continue continue if(ix.eq.1) go to 10880 goto 12982 13331 continue 13301 continue goto 12981 12982 continue if(nin .le. nx)goto 13381 jerr=-10000-ilm goto 12902 13381 continue if(nin.gt.0) a(1:nin,ilm)=b(m(1:nin)) kin(ilm)=nin a0(ilm)=b(0) alm(ilm)=al lmu=ilm devi=dev2(no,w,y,q,pmin) dev(ilm)=(dev1-devi)/dev0 if(xmz.le.vmin)goto 12902 if(ilm.lt.mnl)goto 12901 if(flmin.ge.1.0)goto 12901 me=0 do 13391 j=1,nin if(a(j,ilm).ne.0.0) me=me+1 13391 continue continue if(me.gt.ne)goto 12902 if(dev(ilm).gt.devmax)goto 12902 if(dev(ilm)-dev(ilm-1).lt.sml)goto 12902 12901 continue 12902 continue g=log(q/(1.0-q)) deallocate(b,bs,v,r,xv,q,mm,ga,ixx) return end function dev2(n,w,y,p,pmin) implicit double precision(a-h,o-z) double precision w(n),y(n),p(n) pmax=1.0-pmin s=0.0 do 13401 i=1,n pi=min(max(pmin,p(i)),pmax) s=s-w(i)*(y(i)*log(pi)+(1.0-y(i))*log(1.0-pi)) 13401 continue continue dev2=s return end function azero(n,y,g,q,jerr) implicit double precision(a-h,o-z) parameter(eps=1.0d-7) double precision y(n),g(n),q(n) double precision, dimension (:), allocatable :: e,p,w azero = 0.0 allocate(e(1:n),stat=jerr) if(jerr.ne.0) return allocate(p(1:n),stat=jerr) if(jerr.ne.0) return allocate(w(1:n),stat=jerr) if(jerr.ne.0) return az=0.0 e=exp(-g) qy=dot_product(q,y) p=1.0/(1.0+e) continue 13411 continue w=q*p*(1.0-p) d=(qy-dot_product(q,p))/sum(w) az=az+d if(abs(d).lt.eps)goto 13412 ea0=exp(-az) p=1.0/(1.0+ea0*e) goto 13411 13412 continue azero=az deallocate(e,p,w) return end subroutine lognetn(parm,no,ni,nc,x,y,g,w,ju,vp,cl,ne,nx,nlam,flmin *,ulam,shri, isd,intr,maxit,kopt,lmu,a0,a,m,kin,dev0,dev,alm,nlp,j *err) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam *) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q double precision, dimension (:), allocatable :: sxp,sxpl double precision, dimension (:), allocatable :: di,v,r,ga double precision, dimension (:,:), allocatable :: b,bs,xv integer, dimension (:), allocatable :: mm,is,ixx allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(di(1:no),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin pfm=(1.0+pmin)*pmin pfx=(1.0-pmin)*pmax vmin=pfm*pmax bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 13421 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 13441 jerr =8000+ic return 13441 continue if(q0 .lt. 1.0-pmin)goto 13461 jerr =9000+ic return 13461 continue if(intr .ne. 0)goto 13481 q0=1.0/nc b(0,ic)=0.0 goto 13491 13481 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 13491 continue continue b(1:ni,ic)=0.0 13421 continue continue if(intr.eq.0) dev1=log(float(nc)) ixx=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 13511 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 13521 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 13521 continue continue goto 13531 13511 continue do 13541 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 13541 continue continue sxp=0.0 if(intr .ne. 0)goto 13561 b(0,:)=0.0 goto 13571 13561 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 13571 continue continue dev1=0.0 do 13581 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 13581 continue continue sxpl=w*log(sxp) do 13591 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 13591 continue continue 13531 continue continue do 13601 ic=1,nc do 13611 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 13611 continue continue 13601 continue continue dev0=dev0+dev1 if(kopt .le. 0)goto 13631 if(isd .le. 0 .or. intr .eq. 0)goto 13651 xv=0.25 goto 13661 13651 continue do 13671 j=1,ni if(ju(j).ne.0) xv(j,:)=0.25*dot_product(w,x(:,j)**2) 13671 continue continue 13661 continue continue 13631 continue alf=1.0 if(flmin .ge. 1.0)goto 13691 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 13691 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 13701 ic=1,nc r=w*(y(:,ic)-q(:,ic)/sxp) do 13711 j=1,ni if(ju(j).ne.0) ga(j)=max(ga(j),abs(dot_product(r,x(:,j)))) 13711 continue continue 13701 continue continue do 13721 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 13741 al=ulam(ilm) goto 13731 13741 if(ilm .le. 2)goto 13751 al=al*alf goto 13731 13751 if(ilm .ne. 1)goto 13761 al=big goto 13771 13761 continue al0=0.0 do 13781 j=1,ni if(ju(j).eq.0)goto 13781 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 13781 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 13771 continue 13731 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 13791 k=1,ni if(ixx(k).eq.1)goto 13791 if(ju(k).eq.0)goto 13791 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 13791 continue continue 10880 continue continue 13801 continue ix=0 jx=ix ig=0 if(nlp .le. maxit)goto 13821 jerr=-ilm return 13821 continue do 13831 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) xmz=0.0 do 13841 i=1,no pic=q(i,ic)/sxp(i) if(pic .ge. pfm)goto 13861 pic=0.0 v(i)=0.0 goto 13851 13861 if(pic .le. pfx)goto 13871 pic=1.0 v(i)=0.0 goto 13881 13871 continue v(i)=w(i)*pic*(1.0-pic) xmz=xmz+v(i) 13881 continue 13851 continue r(i)=w(i)*(y(i,ic)-pic) 13841 continue continue if(xmz.le.vmin)goto 13831 ig=1 if(kopt .ne. 0)goto 13901 do 13911 j=1,ni if(ixx(j).gt.0) xv(j,ic)=dot_product(v,x(:,j)**2) 13911 continue continue 13901 continue continue 13921 continue nlp=nlp+1 dlx=0.0 do 13931 k=1,ni if(ixx(k).eq.0)goto 13931 bk=b(k,ic) gk=dot_product(r,x(:,k)) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13951 b(k,ic)=0.0 goto 13961 13951 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 13961 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 13931 dlx=max(dlx,xv(k,ic)*d**2) r=r-d*v*x(:,k) if(mm(k) .ne. 0)goto 13981 nin=nin+1 if(nin .le. nx)goto 14001 jx=1 goto 13932 14001 continue mm(k)=nin m(nin)=k 13981 continue 13931 continue 13932 continue if(jx.gt.0)goto 13922 d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 14021 b(0,ic)=b(0,ic)+d dlx=max(dlx,xmz*d**2) r=r-d*v 14021 continue if(dlx.lt.shr)goto 13922 if(nlp .le. maxit)goto 14041 jerr=-ilm return 14041 continue continue 14051 continue nlp=nlp+1 dlx=0.0 do 14061 l=1,nin k=m(l) bk=b(k,ic) gk=dot_product(r,x(:,k)) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 14081 b(k,ic)=0.0 goto 14091 14081 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 14091 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 14061 dlx=max(dlx,xv(k,ic)*d**2) r=r-d*v*x(:,k) 14061 continue continue d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 14111 b(0,ic)=b(0,ic)+d dlx=max(dlx,xmz*d**2) r=r-d*v 14111 continue if(dlx.lt.shr)goto 14052 if(nlp .le. maxit)goto 14131 jerr=-ilm return 14131 continue goto 14051 14052 continue goto 13921 13922 continue if(jx.gt.0)goto 13832 if(xmz*(b(0,ic)-bs(0,ic))**2.gt.shr) ix=1 if(ix .ne. 0)goto 14151 do 14161 j=1,nin k=m(j) if(xv(k,ic)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 14181 ix=1 goto 14162 14181 continue 14161 continue 14162 continue 14151 continue do 14191 i=1,no fi=b(0,ic)+g(i,ic) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin),ic),x(i,m(1:nin))) fi=min(max(exmn,fi),exmx) sxp(i)=sxp(i)-q(i,ic) q(i,ic)=min(max(emin*sxp(i),exp(fi)),emax*sxp(i)) sxp(i)=sxp(i)+q(i,ic) 14191 continue continue 13831 continue 13832 continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s di=s do 14201 j=1,nin l=m(j) if(vp(l) .gt. 0.0)goto 14221 s=sum(b(l,:))/nc goto 14231 14221 continue s=elc(parm,nc,cl(:,l),b(l,:),is) 14231 continue continue b(l,:)=b(l,:)-s di=di-s*x(:,l) 14201 continue continue di=exp(di) sxp=sxp*di do 14241 ic=1,nc q(:,ic)=q(:,ic)*di 14241 continue continue if(jx.gt.0)goto 13802 if(ig.eq.0)goto 13802 if(ix .ne. 0)goto 14261 do 14271 k=1,ni if(ixx(k).eq.1)goto 14271 if(ju(k).eq.0)goto 14271 ga(k)=0.0 14271 continue continue do 14281 ic=1,nc r=w*(y(:,ic)-q(:,ic)/sxp) do 14291 k=1,ni if(ixx(k).eq.1)goto 14291 if(ju(k).eq.0)goto 14291 ga(k)=max(ga(k),abs(dot_product(r,x(:,k)))) 14291 continue continue 14281 continue continue do 14301 k=1,ni if(ixx(k).eq.1)goto 14301 if(ju(k).eq.0)goto 14301 if(ga(k) .le. al1*vp(k))goto 14321 ixx(k)=1 ix=1 14321 continue 14301 continue continue if(ix.eq.1) go to 10880 goto 13802 14261 continue goto 13801 13802 continue if(jx .le. 0)goto 14341 jerr=-10000-ilm goto 13722 14341 continue devi=0.0 do 14351 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 14361 i=1,no if(y(i,ic).le.0.0)goto 14361 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 14361 continue continue 14351 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ig.eq.0)goto 13722 if(ilm.lt.mnl)goto 13721 if(flmin.ge.1.0)goto 13721 if(nintot(ni,nx,nc,a(1,1,ilm),m,nin,is).gt.ne)goto 13722 if(dev(ilm).gt.devmax)goto 13722 if(dev(ilm)-dev(ilm-1).lt.sml)goto 13722 13721 continue 13722 continue g=log(q) do 14371 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 14371 continue continue deallocate(sxp,b,bs,v,r,xv,q,mm,is,ga,ixx) return end subroutine kazero(kk,n,y,g,q,az,jerr) implicit double precision(a-h,o-z) parameter(eps=1.0d-7) double precision y(n,kk),g(n,kk),q(n),az(kk) double precision, dimension (:), allocatable :: s double precision, dimension (:,:), allocatable :: e allocate(e(1:n,1:kk),stat=jerr) if(jerr.ne.0) return allocate(s(1:n),stat=jerr) if(jerr.ne.0) return az=0.0 e=exp(g) do 14381 i=1,n s(i)=sum(e(i,:)) 14381 continue continue continue 14391 continue dm=0.0 do 14401 k=1,kk t=0.0 u=t do 14411 i=1,n pik=e(i,k)/s(i) t=t+q(i)*(y(i,k)-pik) u=u+q(i)*pik*(1.0-pik) 14411 continue continue d=t/u az(k)=az(k)+d ed=exp(d) dm=max(dm,abs(d)) do 14421 i=1,n z=e(i,k) e(i,k)=z*ed s(i)=s(i)-z+e(i,k) 14421 continue continue 14401 continue continue if(dm.lt.eps)goto 14392 goto 14391 14392 continue az=az-sum(az)/kk deallocate(e,s) return end function elc(parm,n,cl,a,m) implicit double precision(a-h,o-z) double precision a(n),cl(2) integer m(n) fn=n am=sum(a)/fn if((parm .ne. 0.0) .and. (n .ne. 2))goto 14441 elc=am go to 14450 14441 continue do 14461 i=1,n m(i)=i 14461 continue continue call psort7(a,m,1,n) if(a(m(1)) .ne. a(m(n)))goto 14481 elc=a(1) go to 14450 14481 continue if(mod(n,2) .ne. 1)goto 14501 ad=a(m(n/2+1)) goto 14511 14501 continue ad=0.5*(a(m(n/2+1))+a(m(n/2))) 14511 continue continue if(parm .ne. 1.0)goto 14531 elc=ad go to 14450 14531 continue b1=min(am,ad) b2=max(am,ad) k2=1 continue 14541 if(a(m(k2)).gt.b1)goto 14542 k2=k2+1 goto 14541 14542 continue k1=k2-1 continue 14551 if(a(m(k2)).ge.b2)goto 14552 k2=k2+1 goto 14551 14552 continue r=parm/((1.0-parm)*fn) is=0 sm=n-2*(k1-1) do 14561 k=k1,k2-1 sm=sm-2.0 s=r*sm+am if(s .le. a(m(k)) .or. s .gt. a(m(k+1)))goto 14581 is=k goto 14562 14581 continue 14561 continue 14562 continue if(is .eq. 0)goto 14601 elc=s go to 14450 14601 continue r2=2.0*r s1=a(m(k1)) am2=2.0*am cri=r2*sum(abs(a-s1))+s1*(s1-am2) elc=s1 do 14611 k=k1+1,k2 s=a(m(k)) if(s.eq.s1)goto 14611 c=r2*sum(abs(a-s))+s*(s-am2) if(c .ge. cri)goto 14631 cri=c elc=s 14631 continue s1=s 14611 continue continue 14450 continue elc=max(maxval(a-cl(2)),min(minval(a-cl(1)),elc)) return end function nintot(ni,nx,nc,a,m,nin,is) implicit double precision(a-h,o-z) double precision a(nx,nc) integer m(nx),is(ni) is=0 nintot=0 do 14641 ic=1,nc do 14651 j=1,nin k=m(j) if(is(k).ne.0)goto 14651 if(a(j,ic).eq.0.0)goto 14651 is(k)=k nintot=nintot+1 14651 continue continue 14641 continue continue return end subroutine luncomp(ni,nx,nc,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(nx,nc),a(ni,nc) integer ia(nx) a=0.0 do 14661 ic=1,nc if(nin.gt.0) a(ia(1:nin),ic)=ca(1:nin,ic) 14661 continue continue return end subroutine lmodval(nt,x,nc,nx,a0,ca,ia,nin,ans) implicit double precision(a-h,o-z) double precision a0(nc),ca(nx,nc),x(nt,*),ans(nc,nt) integer ia(nx) do 14671 i=1,nt do 14681 ic=1,nc ans(ic,i)=a0(ic) if(nin.gt.0) ans(ic,i)=ans(ic,i)+dot_product(ca(1:nin,ic),x(i,ia(1 *:nin))) 14681 continue continue 14671 continue continue return end subroutine splognet(parm,no,ni,nc,x,ix,jx,y,g,jd,vp,cl,ne,nx,nlam, *flmin, ulam,thr,isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm *,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,max(2,nc)),g(no,nc),vp(ni),ulam(nlam) double precision ca(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl *(2,ni) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq,xv integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 14701 jerr=10000 return 14701 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return if(kopt .ne. 2)goto 14721 allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return 14721 continue call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 14741 jerr=7777 return 14741 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) do 14751 i=1,no ww(i)=sum(y(i,:)) if(ww(i).gt.0.0) y(i,:)=y(i,:)/ww(i) 14751 continue continue sw=sum(ww) ww=ww/sw if(nc .ne. 1)goto 14771 call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 14791 do 14801 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14801 continue continue 14791 continue call sprlognet2n(parm,no,ni,x,ix,jx,y(:,1),g(:,1),ww,ju,vq,cl,ne,n *x,nlam, flmin,ulam,thr,isd,intr,maxit,kopt,xm,xs,lmu,a0,ca,ia,nin *,dev0,dev, alm,nlp,jerr) goto 14761 14771 if(kopt .ne. 2)goto 14811 call multsplstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs,xv) if(isd .le. 0)goto 14831 do 14841 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14841 continue continue 14831 continue call multsprlognetn(parm,no,ni,nc,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nl *am,flmin, ulam,thr,intr,maxit,xv,xm,xs,lmu,a0,ca,ia,nin,dev0,dev, *alm,nlp,jerr) goto 14851 14811 continue call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 14871 do 14881 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14881 continue continue 14871 continue call sprlognetn(parm,no,ni,nc,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nlam,f *lmin, ulam,thr,isd,intr,maxit,kopt,xm,xs,lmu,a0,ca, ia,nin,dev0, *dev,alm,nlp,jerr) 14851 continue 14761 continue if(jerr.gt.0) return dev0=2.0*sw*dev0 do 14891 k=1,lmu nk=nin(k) do 14901 ic=1,nc if(isd .le. 0)goto 14921 do 14931 l=1,nk ca(l,ic,k)=ca(l,ic,k)/xs(ia(l)) 14931 continue continue 14921 continue if(intr .ne. 0)goto 14951 a0(ic,k)=0.0 goto 14961 14951 continue a0(ic,k)=a0(ic,k)-dot_product(ca(1:nk,ic,k),xm(ia(1:nk))) 14961 continue continue 14901 continue continue 14891 continue continue deallocate(ww,ju,vq,xm,xs) if(kopt.eq.2) deallocate(xv) return end subroutine multsplstandard2(no,ni,x,ix,jx,w,ju,isd,intr,xm,xs,xv) implicit double precision(a-h,o-z) double precision x(*),w(no),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) if(intr .ne. 0)goto 14981 do 14991 j=1,ni if(ju(j).eq.0)goto 14991 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 15011 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 15021 15011 continue xs(j)=1.0 15021 continue continue 14991 continue continue return 14981 continue do 15031 j=1,ni if(ju(j).eq.0)goto 15031 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd .le. 0)goto 15051 xs(j)=sqrt(xv(j)) xv(j)=1.0 15051 continue 15031 continue continue if(isd.eq.0) xs=1.0 return end subroutine splstandard2(no,ni,x,ix,jx,w,ju,isd,intr,xm,xs) implicit double precision(a-h,o-z) double precision x(*),w(no),xm(ni),xs(ni) integer ix(*),jx(*),ju(ni) if(intr .ne. 0)goto 15071 do 15081 j=1,ni if(ju(j).eq.0)goto 15081 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 if(isd .eq. 0)goto 15101 vc=dot_product(w(jx(jb:je)),x(jb:je)**2) -dot_product(w(jx(jb:je) *),x(jb:je))**2 xs(j)=sqrt(vc) goto 15111 15101 continue xs(j)=1.0 15111 continue continue 15081 continue continue return 15071 continue do 15121 j=1,ni if(ju(j).eq.0)goto 15121 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) if(isd.ne.0) xs(j)=sqrt(dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j *)**2) 15121 continue continue if(isd.eq.0) xs=1.0 return end subroutine sprlognet2n(parm,no,ni,x,ix,jx,y,g,w,ju,vp,cl,ne,nx,nla *m, flmin,ulam,shri,isd,intr,maxit,kopt,xb,xs, lmu,a0,a,m,kin,dev *0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision a(nx,nlam),a0(nlam),dev(nlam),alm(nlam) double precision xb(ni),xs(ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: xm,b,bs,v,r double precision, dimension (:), allocatable :: sc,xv,q,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) allocate(b(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return fmax=log(1.0/pmin-1.0) fmin=-fmax vmin=(1.0+pmin)*pmin*(1.0-pmin) bta=parm omb=1.0-bta q0=dot_product(w,y) if(q0 .gt. pmin)goto 15141 jerr=8001 return 15141 continue if(q0 .lt. 1.0-pmin)goto 15161 jerr=9001 return 15161 continue if(intr.eq.0) q0=0.5 bz=0.0 if(intr.ne.0) bz=log(q0/(1.0-q0)) if(nonzero(no,g) .ne. 0)goto 15181 vi=q0*(1.0-q0) b(0)=bz v=vi*w r=w*(y-q0) q=q0 xm(0)=vi dev1=-(bz*q0+log(1.0-q0)) goto 15191 15181 continue b(0)=0.0 if(intr .eq. 0)goto 15211 b(0)=azero(no,y,g,w,jerr) if(jerr.ne.0) return 15211 continue q=1.0/(1.0+exp(-b(0)-g)) v=w*q*(1.0-q) r=w*(y-q) xm(0)=sum(v) dev1=-(b(0)*q0+dot_product(w,y*g+log(1.0-q))) 15191 continue continue if(kopt .le. 0)goto 15231 if(isd .le. 0 .or. intr .eq. 0)goto 15251 xv=0.25 goto 15261 15251 continue do 15271 j=1,ni if(ju(j).eq.0)goto 15271 jb=ix(j) je=ix(j+1)-1 xv(j)=0.25*(dot_product(w(jx(jb:je)),x(jb:je)**2)-xb(j)**2) 15271 continue continue 15261 continue continue 15231 continue b(1:ni)=0.0 dev0=dev1 do 15281 i=1,no if(y(i).gt.0.0) dev0=dev0+w(i)*y(i)*log(y(i)) if(y(i).lt.1.0) dev0=dev0+w(i)*(1.0-y(i))*log(1.0-y(i)) 15281 continue continue alf=1.0 if(flmin .ge. 1.0)goto 15301 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 15301 continue m=0 mm=0 nin=0 o=0.0 svr=o mnl=min(mnlam,nlam) bs=0.0 nlp=0 nin=nlp shr=shri*dev0 al=0.0 ixx=0 do 15311 j=1,ni if(ju(j).eq.0)goto 15311 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=abs((gj-svr*xb(j))/xs(j)) 15311 continue continue do 15321 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 15341 al=ulam(ilm) goto 15331 15341 if(ilm .le. 2)goto 15351 al=al*alf goto 15331 15351 if(ilm .ne. 1)goto 15361 al=big goto 15371 15361 continue al0=0.0 do 15381 j=1,ni if(ju(j).eq.0)goto 15381 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 15381 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 15371 continue 15331 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 15391 k=1,ni if(ixx(k).eq.1)goto 15391 if(ju(k).eq.0)goto 15391 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 15391 continue continue 10880 continue continue 15401 continue if(nlp .le. maxit)goto 15421 jerr=-ilm return 15421 continue bs(0)=b(0) if(nin.gt.0) bs(m(1:nin))=b(m(1:nin)) do 15431 j=1,ni if(ixx(j).eq.0)goto 15431 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=v(jx(jb:je)) xm(j)=dot_product(sc(1:jn),x(jb:je)) if(kopt .ne. 0)goto 15451 xv(j)=dot_product(sc(1:jn),x(jb:je)**2) xv(j)=(xv(j)-2.0*xb(j)*xm(j)+xm(0)*xb(j)**2)/xs(j)**2 15451 continue 15431 continue continue continue 15461 continue nlp=nlp+1 dlx=0.0 do 15471 k=1,ni if(ixx(k).eq.0)goto 15471 jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 15491 b(k)=0.0 goto 15501 15491 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 15501 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 15471 dlx=max(dlx,xv(k)*d**2) if(mm(k) .ne. 0)goto 15521 nin=nin+1 if(nin.gt.nx)goto 15472 mm(k)=nin m(nin)=k sc(1:jn)=v(jx(jb:je)) xm(k)=dot_product(sc(1:jn),x(jb:je)) 15521 continue r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 15471 continue 15472 continue if(nin.gt.nx)goto 15462 d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 15541 b(0)=b(0)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 15541 continue if(dlx.lt.shr)goto 15462 if(nlp .le. maxit)goto 15561 jerr=-ilm return 15561 continue continue 15571 continue nlp=nlp+1 dlx=0.0 do 15581 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 15601 b(k)=0.0 goto 15611 15601 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 15611 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 15581 dlx=max(dlx,xv(k)*d**2) r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 15581 continue continue d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 15631 b(0)=b(0)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 15631 continue if(dlx.lt.shr)goto 15572 if(nlp .le. maxit)goto 15651 jerr=-ilm return 15651 continue goto 15571 15572 continue goto 15461 15462 continue if(nin.gt.nx)goto 15402 sc=b(0) b0=0.0 do 15661 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l)*x(jb:je)/xs(l) b0=b0-b(l)*xb(l)/xs(l) 15661 continue continue sc=sc+b0 do 15671 i=1,no fi=sc(i)+g(i) if(fi .ge. fmin)goto 15691 q(i)=0.0 goto 15681 15691 if(fi .le. fmax)goto 15701 q(i)=1.0 goto 15711 15701 continue q(i)=1.0/(1.0+exp(-fi)) 15711 continue 15681 continue 15671 continue continue v=w*q*(1.0-q) xm(0)=sum(v) if(xm(0).lt.vmin)goto 15402 r=w*(y-q) svr=sum(r) o=0.0 if(xm(0)*(b(0)-bs(0))**2 .ge. shr)goto 15731 kx=0 do 15741 j=1,nin k=m(j) if(xv(k)*(b(k)-bs(k))**2.lt.shr)goto 15741 kx=1 goto 15742 15741 continue 15742 continue if(kx .ne. 0)goto 15761 do 15771 j=1,ni if(ixx(j).eq.1)goto 15771 if(ju(j).eq.0)goto 15771 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=abs((gj-svr*xb(j))/xs(j)) if(ga(j) .le. al1*vp(j))goto 15791 ixx(j)=1 kx=1 15791 continue 15771 continue continue if(kx.eq.1) go to 10880 goto 15402 15761 continue 15731 continue goto 15401 15402 continue if(nin .le. nx)goto 15811 jerr=-10000-ilm goto 15322 15811 continue if(nin.gt.0) a(1:nin,ilm)=b(m(1:nin)) kin(ilm)=nin a0(ilm)=b(0) alm(ilm)=al lmu=ilm devi=dev2(no,w,y,q,pmin) dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 15321 if(flmin.ge.1.0)goto 15321 me=0 do 15821 j=1,nin if(a(j,ilm).ne.0.0) me=me+1 15821 continue continue if(me.gt.ne)goto 15322 if(dev(ilm).gt.devmax)goto 15322 if(dev(ilm)-dev(ilm-1).lt.sml)goto 15322 if(xm(0).lt.vmin)goto 15322 15321 continue 15322 continue g=log(q/(1.0-q)) deallocate(xm,b,bs,v,r,sc,xv,q,mm,ga,ixx) return end subroutine sprlognetn(parm,no,ni,nc,x,ix,jx,y,g,w,ju,vp,cl,ne,nx,n *lam,flmin, ulam,shri,isd,intr,maxit,kopt,xb,xs,lmu,a0,a,m,kin,dev *0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam),xb *(ni),xs(ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q double precision, dimension (:), allocatable :: sxp,sxpl double precision, dimension (:), allocatable :: sc,xm,v,r,ga double precision, dimension (:,:), allocatable :: b,bs,xv integer, dimension (:), allocatable :: mm,is,iy allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(xm(0:ni),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin pfm=(1.0+pmin)*pmin pfx=(1.0-pmin)*pmax vmin=pfm*pmax bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 15831 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 15851 jerr =8000+ic return 15851 continue if(q0 .lt. 1.0-pmin)goto 15871 jerr =9000+ic return 15871 continue if(intr.eq.0) q0=1.0/nc b(1:ni,ic)=0.0 b(0,ic)=0.0 if(intr .eq. 0)goto 15891 b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 15891 continue 15831 continue continue if(intr.eq.0) dev1=log(float(nc)) iy=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 15911 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 15921 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 15921 continue continue goto 15931 15911 continue do 15941 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 15941 continue continue sxp=0.0 if(intr .ne. 0)goto 15961 b(0,:)=0.0 goto 15971 15961 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 15971 continue continue dev1=0.0 do 15981 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 15981 continue continue sxpl=w*log(sxp) do 15991 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 15991 continue continue 15931 continue continue do 16001 ic=1,nc do 16011 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 16011 continue continue 16001 continue continue dev0=dev0+dev1 if(kopt .le. 0)goto 16031 if(isd .le. 0 .or. intr .eq. 0)goto 16051 xv=0.25 goto 16061 16051 continue do 16071 j=1,ni if(ju(j).eq.0)goto 16071 jb=ix(j) je=ix(j+1)-1 xv(j,:)=0.25*(dot_product(w(jx(jb:je)),x(jb:je)**2)-xb(j)**2) 16071 continue continue 16061 continue continue 16031 continue alf=1.0 if(flmin .ge. 1.0)goto 16091 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 16091 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 svr=0.0 o=0.0 shr=shri*dev0 ga=0.0 do 16101 ic=1,nc v=q(:,ic)/sxp r=w*(y(:,ic)-v) v=w*v*(1.0-v) do 16111 j=1,ni if(ju(j).eq.0)goto 16111 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=max(ga(j),abs(gj-svr*xb(j))/xs(j)) 16111 continue continue 16101 continue continue do 16121 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 16141 al=ulam(ilm) goto 16131 16141 if(ilm .le. 2)goto 16151 al=al*alf goto 16131 16151 if(ilm .ne. 1)goto 16161 al=big goto 16171 16161 continue al0=0.0 do 16181 j=1,ni if(ju(j).eq.0)goto 16181 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 16181 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 16171 continue 16131 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 16191 k=1,ni if(iy(k).eq.1)goto 16191 if(ju(k).eq.0)goto 16191 if(ga(k).gt.tlam*vp(k)) iy(k)=1 16191 continue continue 10880 continue continue 16201 continue ixx=0 jxx=ixx ig=0 if(nlp .le. maxit)goto 16221 jerr=-ilm return 16221 continue do 16231 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) xm(0)=0.0 svr=0.0 o=0.0 do 16241 i=1,no pic=q(i,ic)/sxp(i) if(pic .ge. pfm)goto 16261 pic=0.0 v(i)=0.0 goto 16251 16261 if(pic .le. pfx)goto 16271 pic=1.0 v(i)=0.0 goto 16281 16271 continue v(i)=w(i)*pic*(1.0-pic) xm(0)=xm(0)+v(i) 16281 continue 16251 continue r(i)=w(i)*(y(i,ic)-pic) svr=svr+r(i) 16241 continue continue if(xm(0).le.vmin)goto 16231 ig=1 do 16291 j=1,ni if(iy(j).eq.0)goto 16291 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(v(jx(jb:je)),x(jb:je)) if(kopt .ne. 0)goto 16311 xv(j,ic)=dot_product(v(jx(jb:je)),x(jb:je)**2) xv(j,ic)=(xv(j,ic)-2.0*xb(j)*xm(j)+xm(0)*xb(j)**2)/xs(j)**2 16311 continue 16291 continue continue continue 16321 continue nlp=nlp+1 dlx=0.0 do 16331 k=1,ni if(iy(k).eq.0)goto 16331 jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k,ic) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 16351 b(k,ic)=0.0 goto 16361 16351 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 16361 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 16331 dlx=max(dlx,xv(k,ic)*d**2) if(mm(k) .ne. 0)goto 16381 nin=nin+1 if(nin .le. nx)goto 16401 jxx=1 goto 16332 16401 continue mm(k)=nin m(nin)=k xm(k)=dot_product(v(jx(jb:je)),x(jb:je)) 16381 continue r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 16331 continue 16332 continue if(jxx.gt.0)goto 16322 d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 16421 b(0,ic)=b(0,ic)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 16421 continue if(dlx.lt.shr)goto 16322 if(nlp .le. maxit)goto 16441 jerr=-ilm return 16441 continue continue 16451 continue nlp=nlp+1 dlx=0.0 do 16461 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k,ic) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 16481 b(k,ic)=0.0 goto 16491 16481 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 16491 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 16461 dlx=max(dlx,xv(k,ic)*d**2) r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 16461 continue continue d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 16511 b(0,ic)=b(0,ic)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 16511 continue if(dlx.lt.shr)goto 16452 if(nlp .le. maxit)goto 16531 jerr=-ilm return 16531 continue goto 16451 16452 continue goto 16321 16322 continue if(jxx.gt.0)goto 16232 if(xm(0)*(b(0,ic)-bs(0,ic))**2.gt.shr) ixx=1 if(ixx .ne. 0)goto 16551 do 16561 j=1,nin k=m(j) if(xv(k,ic)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 16581 ixx=1 goto 16562 16581 continue 16561 continue 16562 continue 16551 continue sc=b(0,ic)+g(:,ic) b0=0.0 do 16591 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l,ic)*x(jb:je)/xs(l) b0=b0-b(l,ic)*xb(l)/xs(l) 16591 continue continue sc=min(max(exmn,sc+b0),exmx) sxp=sxp-q(:,ic) q(:,ic)=min(max(emin*sxp,exp(sc)),emax*sxp) sxp=sxp+q(:,ic) 16231 continue 16232 continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s sc=s b0=0.0 do 16601 j=1,nin l=m(j) if(vp(l) .gt. 0.0)goto 16621 s=sum(b(l,:))/nc goto 16631 16621 continue s=elc(parm,nc,cl(:,l),b(l,:),is) 16631 continue continue b(l,:)=b(l,:)-s jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))-s*x(jb:je)/xs(l) b0=b0+s*xb(l)/xs(l) 16601 continue continue sc=sc+b0 sc=exp(sc) sxp=sxp*sc do 16641 ic=1,nc q(:,ic)=q(:,ic)*sc 16641 continue continue if(jxx.gt.0)goto 16202 if(ig.eq.0)goto 16202 if(ixx .ne. 0)goto 16661 do 16671 j=1,ni if(iy(j).eq.1)goto 16671 if(ju(j).eq.0)goto 16671 ga(j)=0.0 16671 continue continue do 16681 ic=1,nc v=q(:,ic)/sxp r=w*(y(:,ic)-v) v=w*v*(1.0-v) do 16691 j=1,ni if(iy(j).eq.1)goto 16691 if(ju(j).eq.0)goto 16691 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=max(ga(j),abs(gj-svr*xb(j))/xs(j)) 16691 continue continue 16681 continue continue do 16701 k=1,ni if(iy(k).eq.1)goto 16701 if(ju(k).eq.0)goto 16701 if(ga(k) .le. al1*vp(k))goto 16721 iy(k)=1 ixx=1 16721 continue 16701 continue continue if(ixx.eq.1) go to 10880 goto 16202 16661 continue goto 16201 16202 continue if(jxx .le. 0)goto 16741 jerr=-10000-ilm goto 16122 16741 continue devi=0.0 do 16751 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 16761 i=1,no if(y(i,ic).le.0.0)goto 16761 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 16761 continue continue 16751 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ig.eq.0)goto 16122 if(ilm.lt.mnl)goto 16121 if(flmin.ge.1.0)goto 16121 if(nintot(ni,nx,nc,a(1,1,ilm),m,nin,is).gt.ne)goto 16122 if(dev(ilm).gt.devmax)goto 16122 if(dev(ilm)-dev(ilm-1).lt.sml)goto 16122 16121 continue 16122 continue g=log(q) do 16771 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 16771 continue continue deallocate(sxp,b,bs,v,r,xv,q,mm,is,xm,sc,ga,iy) return end subroutine lcmodval(nc,nx,a0,ca,ia,nin,x,ix,jx,n,f) implicit double precision(a-h,o-z) double precision a0(nc),ca(nx,nc),x(*),f(nc,n) integer ia(*),ix(*),jx(*) do 16781 ic=1,nc f(ic,:)=a0(ic) 16781 continue continue do 16791 j=1,nin k=ia(j) kb=ix(k) ke=ix(k+1)-1 do 16801 ic=1,nc f(ic,jx(kb:ke))=f(ic,jx(kb:ke))+ca(j,ic)*x(kb:ke) 16801 continue continue 16791 continue continue return end subroutine coxnet(parm,no,ni,x,y,d,g,w,jd,vp,cl,ne,nx,nlam,flmin,u *lam,thr, maxit,isd,lmu,ca,ia,nin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),d(no),g(no),w(no),vp(ni),ulam(nlam *) double precision ca(nx,nlam),dev(nlam),alm(nlam),cl(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 16821 jerr=10000 return 16821 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return if(isd .le. 0)goto 16841 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 16841 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 16861 jerr=7777 return 16861 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 16881 jerr=9999 return 16881 continue ww=ww/sw call cstandard(no,ni,x,ww,ju,isd,xs) if(isd .le. 0)goto 16901 do 16911 j=1,ni cl(:,j)=cl(:,j)*xs(j) 16911 continue continue 16901 continue call coxnet1(parm,no,ni,x,y,d,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam, *thr, isd,maxit,lmu,ca,ia,nin,dev0,dev,alm,nlp,jerr) if(jerr.gt.0) return dev0=2.0*sw*dev0 if(isd .le. 0)goto 16931 do 16941 k=1,lmu nk=nin(k) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) 16941 continue continue 16931 continue deallocate(ww,ju,vq) if(isd.gt.0) deallocate(xs) return end subroutine cstandard(no,ni,x,w,ju,isd,xs) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xs(ni) integer ju(ni) do 16951 j=1,ni if(ju(j).eq.0)goto 16951 xm=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm if(isd .le. 0)goto 16971 xs(j)=sqrt(dot_product(w,x(:,j)**2)) x(:,j)=x(:,j)/xs(j) 16971 continue 16951 continue continue return end subroutine coxnet1(parm,no,ni,x,y,d,g,q,ju,vp,cl,ne,nx,nlam,flmin, *ulam,cthri, isd,maxit,lmu,ao,m,kin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),q(no),d(no),g(no),vp(ni),ulam(nlam *) double precision ao(nx,nlam),dev(nlam),alm(nlam),cl(2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: w,dk,v,xs,wr double precision, dimension (:), allocatable :: a,as,f,dq double precision, dimension (:), allocatable :: e,uu,ga integer, dimension (:), allocatable :: jp,kp,mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) isd = isd*1 sml=sml*100.0 devmax=devmax*0.99/0.999 allocate(e(1:no),stat=jerr) if(jerr.ne.0) return allocate(uu(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(jp(1:no),stat=jerr) if(jerr.ne.0) return allocate(kp(1:no),stat=jerr) if(jerr.ne.0) return allocate(dk(1:no),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(dq(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return call groups(no,y,d,q,nk,kp,jp,t0,jerr) if(jerr.ne.0) go to 12220 alpha=parm oma=1.0-alpha nlm=0 ixx=0 al=0.0 dq=d*q call died(no,nk,dq,kp,jp,dk) a=0.0 f(1)=0.0 fmax=log(huge(f(1))*0.1) if(nonzero(no,g) .eq. 0)goto 16991 f=g-dot_product(q,g) e=q*exp(sign(min(abs(f),fmax),f)) goto 17001 16991 continue f=0.0 e=q 17001 continue continue r0=risk(no,ni,nk,dq,dk,f,e,kp,jp,uu) rr=-(dot_product(dk(1:nk),log(dk(1:nk)))+r0) dev0=rr do 17011 i=1,no if((y(i) .ge. t0) .and. (q(i) .gt. 0.0))goto 17031 w(i)=0.0 wr(i)=w(i) 17031 continue 17011 continue continue call outer(no,nk,dq,dk,kp,jp,e,wr,w,jerr,uu) if(jerr.ne.0) go to 12220 alf=1.0 if(flmin .ge. 1.0)goto 17051 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 17051 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) as=0.0 cthr=cthri*dev0 do 17061 j=1,ni if(ju(j).eq.0)goto 17061 ga(j)=abs(dot_product(wr,x(:,j))) 17061 continue continue do 17071 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 17091 al=ulam(ilm) goto 17081 17091 if(ilm .le. 2)goto 17101 al=al*alf goto 17081 17101 if(ilm .ne. 1)goto 17111 al=big goto 17121 17111 continue al0=0.0 do 17131 j=1,ni if(ju(j).eq.0)goto 17131 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 17131 continue continue al0=al0/max(parm,1.0d-3) al=alf*al0 17121 continue 17081 continue sa=alpha*al omal=oma*al tlam=alpha*(2.0*al-al0) do 17141 k=1,ni if(ixx(k).eq.1)goto 17141 if(ju(k).eq.0)goto 17141 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 17141 continue continue 10880 continue continue 17151 continue if(nlp .le. maxit)goto 17171 jerr=-ilm return 17171 continue if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) call vars(no,ni,x,w,ixx,v) continue 17181 continue nlp=nlp+1 dli=0.0 do 17191 j=1,ni if(ixx(j).eq.0)goto 17191 u=a(j)*v(j)+dot_product(wr,x(:,j)) if(abs(u) .gt. vp(j)*sa)goto 17211 at=0.0 goto 17221 17211 continue at=max(cl(1,j),min(cl(2,j),sign(abs(u)-vp(j)*sa,u)/ (v(j)+vp(j)*o *mal))) 17221 continue continue if(at .eq. a(j))goto 17241 del=at-a(j) a(j)=at dli=max(dli,v(j)*del**2) wr=wr-del*w*x(:,j) f=f+del*x(:,j) if(mm(j) .ne. 0)goto 17261 nin=nin+1 if(nin.gt.nx)goto 17192 mm(j)=nin m(nin)=j 17261 continue 17241 continue 17191 continue 17192 continue if(nin.gt.nx)goto 17182 if(dli.lt.cthr)goto 17182 if(nlp .le. maxit)goto 17281 jerr=-ilm return 17281 continue continue 17291 continue nlp=nlp+1 dli=0.0 do 17301 l=1,nin j=m(l) u=a(j)*v(j)+dot_product(wr,x(:,j)) if(abs(u) .gt. vp(j)*sa)goto 17321 at=0.0 goto 17331 17321 continue at=max(cl(1,j),min(cl(2,j),sign(abs(u)-vp(j)*sa,u)/ (v(j)+vp(j)*o *mal))) 17331 continue continue if(at .eq. a(j))goto 17351 del=at-a(j) a(j)=at dli=max(dli,v(j)*del**2) wr=wr-del*w*x(:,j) f=f+del*x(:,j) 17351 continue 17301 continue continue if(dli.lt.cthr)goto 17292 if(nlp .le. maxit)goto 17371 jerr=-ilm return 17371 continue goto 17291 17292 continue goto 17181 17182 continue if(nin.gt.nx)goto 17152 e=q*exp(sign(min(abs(f),fmax),f)) call outer(no,nk,dq,dk,kp,jp,e,wr,w,jerr,uu) if(jerr .eq. 0)goto 17391 jerr=jerr-ilm go to 12220 17391 continue ix=0 do 17401 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.cthr)goto 17401 ix=1 goto 17402 17401 continue 17402 continue if(ix .ne. 0)goto 17421 do 17431 k=1,ni if(ixx(k).eq.1)goto 17431 if(ju(k).eq.0)goto 17431 ga(k)=abs(dot_product(wr,x(:,k))) if(ga(k) .le. sa*vp(k))goto 17451 ixx(k)=1 ix=1 17451 continue 17431 continue continue if(ix.eq.1) go to 10880 goto 17152 17421 continue goto 17151 17152 continue if(nin .le. nx)goto 17471 jerr=-10000-ilm goto 17072 17471 continue if(nin.gt.0) ao(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(risk(no,ni,nk,dq,dk,f,e,kp,jp,uu)-r0)/rr if(ilm.lt.mnl)goto 17071 if(flmin.ge.1.0)goto 17071 me=0 do 17481 j=1,nin if(ao(j,ilm).ne.0.0) me=me+1 17481 continue continue if(me.gt.ne)goto 17072 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 17072 if(dev(ilm).gt.devmax)goto 17072 17071 continue 17072 continue g=f 12220 continue deallocate(e,uu,w,dk,v,xs,f,wr,a,as,jp,kp,dq,mm,ga,ixx) return end subroutine cxmodval(ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision ca(nin),x(n,*),f(n) integer ia(nin) f=0.0 if(nin.le.0) return do 17491 i=1,n f(i)=f(i)+dot_product(ca(1:nin),x(i,ia(1:nin))) 17491 continue continue return end subroutine groups(no,y,d,q,nk,kp,jp,t0,jerr) implicit double precision(a-h,o-z) double precision y(no),d(no),q(no) integer jp(no),kp(*) do 17501 j=1,no jp(j)=j 17501 continue continue call psort7(y,jp,1,no) nj=0 do 17511 j=1,no if(q(jp(j)).le.0.0)goto 17511 nj=nj+1 jp(nj)=jp(j) 17511 continue continue if(nj .ne. 0)goto 17531 jerr=20000 return 17531 continue j=1 continue 17541 if(d(jp(j)).gt.0.0)goto 17542 j=j+1 if(j.gt.nj)goto 17542 goto 17541 17542 continue if(j .lt. nj-1)goto 17561 jerr=30000 return 17561 continue t0=y(jp(j)) j0=j-1 if(j0 .le. 0)goto 17581 continue 17591 if(y(jp(j0)).lt.t0)goto 17592 j0=j0-1 if(j0.eq.0)goto 17592 goto 17591 17592 continue if(j0 .le. 0)goto 17611 nj=nj-j0 do 17621 j=1,nj jp(j)=jp(j+j0) 17621 continue continue 17611 continue 17581 continue jerr=0 nk=0 yk=t0 j=2 continue 17631 continue continue 17641 if(d(jp(j)).gt.0.0.and.y(jp(j)).gt.yk)goto 17642 j=j+1 if(j.gt.nj)goto 17642 goto 17641 17642 continue nk=nk+1 kp(nk)=j-1 if(j.gt.nj)goto 17632 if(j .ne. nj)goto 17661 nk=nk+1 kp(nk)=nj goto 17632 17661 continue yk=y(jp(j)) j=j+1 goto 17631 17632 continue return end subroutine outer(no,nk,d,dk,kp,jp,e,wr,w,jerr,u) implicit double precision(a-h,o-z) double precision d(no),dk(nk),wr(no),w(no) double precision e(no),u(no),b,c integer kp(nk),jp(no) call usk(no,nk,kp,jp,e,u) b=dk(1)/u(1) c=dk(1)/u(1)**2 jerr=0 do 17671 j=1,kp(1) i=jp(j) w(i)=e(i)*(b-e(i)*c) if(w(i) .gt. 0.0)goto 17691 jerr=-30000 return 17691 continue wr(i)=d(i)-e(i)*b 17671 continue continue do 17701 k=2,nk j1=kp(k-1)+1 j2=kp(k) b=b+dk(k)/u(k) c=c+dk(k)/u(k)**2 do 17711 j=j1,j2 i=jp(j) w(i)=e(i)*(b-e(i)*c) if(w(i) .gt. 0.0)goto 17731 jerr=-30000 return 17731 continue wr(i)=d(i)-e(i)*b 17711 continue continue 17701 continue continue return end subroutine vars(no,ni,x,w,ixx,v) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),v(ni) integer ixx(ni) do 17741 j=1,ni if(ixx(j).gt.0) v(j)=dot_product(w,x(:,j)**2) 17741 continue continue return end subroutine died(no,nk,d,kp,jp,dk) implicit double precision(a-h,o-z) double precision d(no),dk(nk) integer kp(nk),jp(no) dk(1)=sum(d(jp(1:kp(1)))) do 17751 k=2,nk dk(k)=sum(d(jp((kp(k-1)+1):kp(k)))) 17751 continue continue return end subroutine usk(no,nk,kp,jp,e,u) implicit double precision(a-h,o-z) double precision e(no),u(nk),h integer kp(nk),jp(no) h=0.0 do 17761 k=nk,1,-1 j2=kp(k) j1=1 if(k.gt.1) j1=kp(k-1)+1 do 17771 j=j2,j1,-1 h=h+e(jp(j)) 17771 continue continue u(k)=h 17761 continue continue return end function risk(no,ni,nk,d,dk,f,e,kp,jp,u) implicit double precision(a-h,o-z) double precision d(no),dk(nk),f(no) integer kp(nk),jp(no) double precision e(no),u(nk) ni = ni*1 call usk(no,nk,kp,jp,e,u) u=log(u) risk=dot_product(d,f)-dot_product(dk,u) return end subroutine loglike(no,ni,x,y,d,g,w,nlam,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),d(no),g(no),w(no),a(ni,nlam),flog( *nlam) double precision, dimension (:), allocatable :: dk,f,xm,dq,q double precision, dimension (:), allocatable :: e,uu integer, dimension (:), allocatable :: jp,kp allocate(e(1:no),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return allocate(uu(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return allocate(dk(1:no),stat=jerr) if(jerr.ne.0) return allocate(jp(1:no),stat=jerr) if(jerr.ne.0) return allocate(kp(1:no),stat=jerr) if(jerr.ne.0) return allocate(dq(1:no),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return q=max(0d0,w) sw=sum(q) if(sw .gt. 0.0)goto 17791 jerr=9999 go to 12220 17791 continue call groups(no,y,d,q,nk,kp,jp,t0,jerr) if(jerr.ne.0) go to 12220 fmax=log(huge(e(1))*0.1) dq=d*q call died(no,nk,dq,kp,jp,dk) gm=dot_product(q,g)/sw do 17801 j=1,ni xm(j)=dot_product(q,x(:,j))/sw 17801 continue continue do 17811 lam=1,nlam do 17821 i=1,no f(i)=g(i)-gm+dot_product(a(:,lam),(x(i,:)-xm)) e(i)=q(i)*exp(sign(min(abs(f(i)),fmax),f(i))) 17821 continue continue flog(lam)=risk(no,ni,nk,dq,dk,f,e,kp,jp,uu) 17811 continue continue 12220 continue deallocate(e,uu,dk,f,jp,kp,dq) return end subroutine fishnet(parm,no,ni,x,y,g,w,jd,vp,cl,ne,nx,nlam,flmin,ul *am,thr, isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),w(no),vp(ni),ulam(nlam) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 17841 jerr=10000 return 17841 continue if(minval(y) .ge. 0.0)goto 17861 jerr=8888 return 17861 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return if(isd .le. 0)goto 17881 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 17881 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 17901 jerr=7777 go to 12220 17901 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 17921 jerr=9999 go to 12220 17921 continue ww=ww/sw call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 17941 do 17951 j=1,ni cl(:,j)=cl(:,j)*xs(j) 17951 continue continue 17941 continue call fishnet1(parm,no,ni,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam,t *hr, isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) if(jerr.gt.0) go to 12220 dev0=2.0*sw*dev0 do 17961 k=1,lmu nk=nin(k) if(isd.gt.0) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) if(intr .ne. 0)goto 17981 a0(k)=0.0 goto 17991 17981 continue a0(k)=a0(k)-dot_product(ca(1:nk,k),xm(ia(1:nk))) 17991 continue continue 17961 continue continue 12220 continue deallocate(ww,ju,vq,xm) if(isd.gt.0) deallocate(xs) return end subroutine fishnet1(parm,no,ni,x,y,g,q,ju,vp,cl,ne,nx,nlam,flmin,u *lam,shri, isd,intr,maxit,lmu,a0,ca,m,kin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),q(no),vp(ni),ulam(nlam) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: t,w,wr,v,a,f,as,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) sml=sml*10.0 isd = isd*1 allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(t(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return bta=parm omb=1.0-bta t=q*y yb=sum(t) fmax=log(huge(bta)*0.1) if(nonzero(no,g) .ne. 0)goto 18011 if(intr .eq. 0)goto 18031 w=q*yb az=log(yb) f=az dv0=yb*(az-1.0) goto 18041 18031 continue w=q az=0.0 f=az dv0=-1.0 18041 continue continue goto 18051 18011 continue w=q*exp(sign(min(abs(g),fmax),g)) v0=sum(w) if(intr .eq. 0)goto 18071 eaz=yb/v0 w=eaz*w az=log(eaz) f=az+g dv0=dot_product(t,g)-yb*(1.0-az) goto 18081 18071 continue az=0.0 f=g dv0=dot_product(t,g)-v0 18081 continue continue 18051 continue continue a=0.0 as=0.0 wr=t-w v0=1.0 if(intr.ne.0) v0=yb dvr=-yb do 18091 i=1,no if(t(i).gt.0.0) dvr=dvr+t(i)*log(y(i)) 18091 continue continue dvr=dvr-dv0 dev0=dvr alf=1.0 if(flmin .ge. 1.0)goto 18111 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 18111 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) shr=shri*dev0 ixx=0 al=0.0 do 18121 j=1,ni if(ju(j).eq.0)goto 18121 ga(j)=abs(dot_product(wr,x(:,j))) 18121 continue continue do 18131 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 18151 al=ulam(ilm) goto 18141 18151 if(ilm .le. 2)goto 18161 al=al*alf goto 18141 18161 if(ilm .ne. 1)goto 18171 al=big goto 18181 18171 continue al0=0.0 do 18191 j=1,ni if(ju(j).eq.0)goto 18191 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 18191 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 18181 continue 18141 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 18201 k=1,ni if(ixx(k).eq.1)goto 18201 if(ju(k).eq.0)goto 18201 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 18201 continue continue 10880 continue continue 18211 continue if(nlp .le. maxit)goto 18231 jerr=-ilm return 18231 continue az0=az if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) do 18241 j=1,ni if(ixx(j).ne.0) v(j)=dot_product(w,x(:,j)**2) 18241 continue continue continue 18251 continue nlp=nlp+1 dlx=0.0 do 18261 k=1,ni if(ixx(k).eq.0)goto 18261 ak=a(k) u=dot_product(wr,x(:,k))+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 18281 a(k)=0.0 goto 18291 18281 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 18291 continue continue if(a(k).eq.ak)goto 18261 d=a(k)-ak dlx=max(dlx,v(k)*d**2) wr=wr-d*w*x(:,k) f=f+d*x(:,k) if(mm(k) .ne. 0)goto 18311 nin=nin+1 if(nin.gt.nx)goto 18262 mm(k)=nin m(nin)=k 18311 continue 18261 continue 18262 continue if(nin.gt.nx)goto 18252 if(intr .eq. 0)goto 18331 d=sum(wr)/v0 az=az+d dlx=max(dlx,v0*d**2) wr=wr-d*w f=f+d 18331 continue if(dlx.lt.shr)goto 18252 if(nlp .le. maxit)goto 18351 jerr=-ilm return 18351 continue continue 18361 continue nlp=nlp+1 dlx=0.0 do 18371 l=1,nin k=m(l) ak=a(k) u=dot_product(wr,x(:,k))+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 18391 a(k)=0.0 goto 18401 18391 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 18401 continue continue if(a(k).eq.ak)goto 18371 d=a(k)-ak dlx=max(dlx,v(k)*d**2) wr=wr-d*w*x(:,k) f=f+d*x(:,k) 18371 continue continue if(intr .eq. 0)goto 18421 d=sum(wr)/v0 az=az+d dlx=max(dlx,v0*d**2) wr=wr-d*w f=f+d 18421 continue if(dlx.lt.shr)goto 18362 if(nlp .le. maxit)goto 18441 jerr=-ilm return 18441 continue goto 18361 18362 continue goto 18251 18252 continue if(nin.gt.nx)goto 18212 w=q*exp(sign(min(abs(f),fmax),f)) v0=sum(w) wr=t-w if(v0*(az-az0)**2 .ge. shr)goto 18461 ix=0 do 18471 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.shr)goto 18471 ix=1 goto 18472 18471 continue 18472 continue if(ix .ne. 0)goto 18491 do 18501 k=1,ni if(ixx(k).eq.1)goto 18501 if(ju(k).eq.0)goto 18501 ga(k)=abs(dot_product(wr,x(:,k))) if(ga(k) .le. al1*vp(k))goto 18521 ixx(k)=1 ix=1 18521 continue 18501 continue continue if(ix.eq.1) go to 10880 goto 18212 18491 continue 18461 continue goto 18211 18212 continue if(nin .le. nx)goto 18541 jerr=-10000-ilm goto 18132 18541 continue if(nin.gt.0) ca(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin a0(ilm)=az alm(ilm)=al lmu=ilm dev(ilm)=(dot_product(t,f)-v0-dv0)/dvr if(ilm.lt.mnl)goto 18131 if(flmin.ge.1.0)goto 18131 me=0 do 18551 j=1,nin if(ca(j,ilm).ne.0.0) me=me+1 18551 continue continue if(me.gt.ne)goto 18132 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 18132 if(dev(ilm).gt.devmax)goto 18132 18131 continue 18132 continue g=f continue deallocate(t,w,wr,v,a,f,as,mm,ga,ixx) return end function nonzero(n,v) implicit double precision(a-h,o-z) double precision v(n) nonzero=0 do 18561 i=1,n if(v(i) .eq. 0.0)goto 18581 nonzero=1 return 18581 continue 18561 continue continue return end subroutine solns(ni,nx,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,lmu),b(ni,lmu) integer ia(nx),nin(lmu) do 18591 lam=1,lmu call uncomp(ni,a(:,lam),ia,nin(lam),b(:,lam)) 18591 continue continue return end subroutine lsolns(ni,nx,nc,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,nc,lmu),b(ni,nc,lmu) integer ia(nx),nin(lmu) do 18601 lam=1,lmu call luncomp(ni,nx,nc,a(1,1,lam),ia,nin(lam),b(1,1,lam)) 18601 continue continue return end subroutine deviance(no,ni,x,y,g,q,nlam,a0,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),q(no),a(ni,nlam),a0(nlam),fl *og(nlam) double precision, dimension (:), allocatable :: w if(minval(y) .ge. 0.0)goto 18621 jerr=8888 return 18621 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 18641 jerr=9999 go to 12220 18641 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 18651 lam=1,nlam s=0.0 do 18661 i=1,no if(w(i).le.0.0)goto 18661 f=g(i)+a0(lam)+dot_product(a(:,lam),x(i,:)) s=s+w(i)*(y(i)*f-exp(sign(min(abs(f),fmax),f))) 18661 continue continue flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 18651 continue continue 12220 continue deallocate(w) return end subroutine spfishnet(parm,no,ni,x,ix,jx,y,g,w,jd,vp,cl,ne,nx,nlam, *flmin, ulam,thr,isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp, *jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 18681 jerr=10000 return 18681 continue if(minval(y) .ge. 0.0)goto 18701 jerr=8888 return 18701 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 18721 jerr=7777 go to 12220 18721 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 18741 jerr=9999 go to 12220 18741 continue ww=ww/sw call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 18761 do 18771 j=1,ni cl(:,j)=cl(:,j)*xs(j) 18771 continue continue 18761 continue call spfishnet1(parm,no,ni,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nlam,flmi *n,ulam,thr, isd,intr,maxit,xm,xs,lmu,a0,ca,ia,nin,dev0,dev,alm,nl *p,jerr) if(jerr.gt.0) go to 12220 dev0=2.0*sw*dev0 do 18781 k=1,lmu nk=nin(k) if(isd.gt.0) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) if(intr .ne. 0)goto 18801 a0(k)=0.0 goto 18811 18801 continue a0(k)=a0(k)-dot_product(ca(1:nk,k),xm(ia(1:nk))) 18811 continue continue 18781 continue continue 12220 continue deallocate(ww,ju,vq,xm,xs) return end subroutine spfishnet1(parm,no,ni,x,ix,jx,y,g,q,ju,vp,cl,ne,nx,nlam *,flmin,ulam, shri,isd,intr,maxit,xb,xs,lmu,a0,ca,m,kin,dev0,dev,a *lm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),vp(ni),ulam(nlam),xb(ni),x *s(ni) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: qy,t,w,wr,v double precision, dimension (:), allocatable :: a,as,xm,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) sml=sml*10.0 isd = isd*1 allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(t(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(qy(1:no),stat=jerr) if(jerr.ne.0) return bta=parm omb=1.0-bta fmax=log(huge(bta)*0.1) qy=q*y yb=sum(qy) if(nonzero(no,g) .ne. 0)goto 18831 t=0.0 if(intr .eq. 0)goto 18851 w=q*yb az=log(yb) uu=az xm=yb*xb dv0=yb*(az-1.0) goto 18861 18851 continue w=q xm=0.0 uu=0.0 az=uu dv0=-1.0 18861 continue continue goto 18871 18831 continue w=q*exp(sign(min(abs(g),fmax),g)) ww=sum(w) t=g if(intr .eq. 0)goto 18891 eaz=yb/ww w=eaz*w az=log(eaz) uu=az dv0=dot_product(qy,g)-yb*(1.0-az) goto 18901 18891 continue uu=0.0 az=uu dv0=dot_product(qy,g)-ww 18901 continue continue do 18911 j=1,ni if(ju(j).eq.0)goto 18911 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) 18911 continue continue 18871 continue continue tt=yb*uu ww=1.0 if(intr.ne.0) ww=yb wr=qy-q*(yb*(1.0-uu)) a=0.0 as=0.0 dvr=-yb do 18921 i=1,no if(qy(i).gt.0.0) dvr=dvr+qy(i)*log(y(i)) 18921 continue continue dvr=dvr-dv0 dev0=dvr alf=1.0 if(flmin .ge. 1.0)goto 18941 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 18941 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) shr=shri*dev0 al=0.0 ixx=0 do 18951 j=1,ni if(ju(j).eq.0)goto 18951 jb=ix(j) je=ix(j+1)-1 ga(j)=abs(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(j)-ww*xb(j) *)-xb(j)*tt)/xs(j) 18951 continue continue do 18961 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 18981 al=ulam(ilm) goto 18971 18981 if(ilm .le. 2)goto 18991 al=al*alf goto 18971 18991 if(ilm .ne. 1)goto 19001 al=big goto 19011 19001 continue al0=0.0 do 19021 j=1,ni if(ju(j).eq.0)goto 19021 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 19021 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 19011 continue 18971 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 19031 k=1,ni if(ixx(k).eq.1)goto 19031 if(ju(k).eq.0)goto 19031 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 19031 continue continue 10880 continue continue 19041 continue if(nlp .le. maxit)goto 19061 jerr=-ilm return 19061 continue az0=az if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) do 19071 j=1,ni if(ixx(j).eq.0)goto 19071 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) v(j)=(dot_product(w(jx(jb:je)),x(jb:je)**2) -2.0*xb(j)*xm(j)+ww*x *b(j)**2)/xs(j)**2 19071 continue continue continue 19081 continue nlp=nlp+1 dlx=0.0 do 19091 k=1,ni if(ixx(k).eq.0)goto 19091 jb=ix(k) je=ix(k+1)-1 ak=a(k) u=(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(k)-ww*xb(k))-xb(k) **tt)/xs(k)+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 19111 a(k)=0.0 goto 19121 19111 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 19121 continue continue if(a(k).eq.ak)goto 19091 if(mm(k) .ne. 0)goto 19141 nin=nin+1 if(nin.gt.nx)goto 19092 mm(k)=nin m(nin)=k 19141 continue d=a(k)-ak dlx=max(dlx,v(k)*d**2) dv=d/xs(k) wr(jx(jb:je))=wr(jx(jb:je))-dv*w(jx(jb:je))*x(jb:je) t(jx(jb:je))=t(jx(jb:je))+dv*x(jb:je) uu=uu-dv*xb(k) tt=tt-dv*xm(k) 19091 continue 19092 continue if(nin.gt.nx)goto 19082 if(intr .eq. 0)goto 19161 d=tt/ww-uu az=az+d dlx=max(dlx,ww*d**2) uu=uu+d 19161 continue if(dlx.lt.shr)goto 19082 if(nlp .le. maxit)goto 19181 jerr=-ilm return 19181 continue continue 19191 continue nlp=nlp+1 dlx=0.0 do 19201 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 ak=a(k) u=(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(k)-ww*xb(k))-xb(k) **tt)/xs(k)+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 19221 a(k)=0.0 goto 19231 19221 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 19231 continue continue if(a(k).eq.ak)goto 19201 d=a(k)-ak dlx=max(dlx,v(k)*d**2) dv=d/xs(k) wr(jx(jb:je))=wr(jx(jb:je))-dv*w(jx(jb:je))*x(jb:je) t(jx(jb:je))=t(jx(jb:je))+dv*x(jb:je) uu=uu-dv*xb(k) tt=tt-dv*xm(k) 19201 continue continue if(intr .eq. 0)goto 19251 d=tt/ww-uu az=az+d dlx=max(dlx,ww*d**2) uu=uu+d 19251 continue if(dlx.lt.shr)goto 19192 if(nlp .le. maxit)goto 19271 jerr=-ilm return 19271 continue goto 19191 19192 continue goto 19081 19082 continue if(nin.gt.nx)goto 19042 euu=exp(sign(min(abs(uu),fmax),uu)) w=euu*q*exp(sign(min(abs(t),fmax),t)) ww=sum(w) wr=qy-w*(1.0-uu) tt=sum(wr) if(ww*(az-az0)**2 .ge. shr)goto 19291 kx=0 do 19301 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.shr)goto 19301 kx=1 goto 19302 19301 continue 19302 continue if(kx .ne. 0)goto 19321 do 19331 j=1,ni if(ixx(j).eq.1)goto 19331 if(ju(j).eq.0)goto 19331 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) ga(j)=abs(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(j)-ww*xb(j) *)-xb(j)*tt)/xs(j) if(ga(j) .le. al1*vp(j))goto 19351 ixx(j)=1 kx=1 19351 continue 19331 continue continue if(kx.eq.1) go to 10880 goto 19042 19321 continue 19291 continue goto 19041 19042 continue if(nin .le. nx)goto 19371 jerr=-10000-ilm goto 18962 19371 continue if(nin.gt.0) ca(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin a0(ilm)=az alm(ilm)=al lmu=ilm dev(ilm)=(dot_product(qy,t)+yb*uu-ww-dv0)/dvr if(ilm.lt.mnl)goto 18961 if(flmin.ge.1.0)goto 18961 me=0 do 19381 j=1,nin if(ca(j,ilm).ne.0.0) me=me+1 19381 continue continue if(me.gt.ne)goto 18962 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 18962 if(dev(ilm).gt.devmax)goto 18962 18961 continue 18962 continue g=t+uu continue deallocate(t,w,wr,v,a,qy,xm,as,mm,ga,ixx) return end subroutine spdeviance(no,ni,x,ix,jx,y,g,q,nlam,a0,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),a(ni,nlam),a0(nlam),flog(n *lam) integer ix(*),jx(*) double precision, dimension (:), allocatable :: w,f if(minval(y) .ge. 0.0)goto 19401 jerr=8888 return 19401 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 19421 jerr=9999 go to 12220 19421 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 19431 lam=1,nlam f=a0(lam) do 19441 j=1,ni if(a(j,lam).eq.0.0)goto 19441 jb=ix(j) je=ix(j+1)-1 f(jx(jb:je))=f(jx(jb:je))+a(j,lam)*x(jb:je) 19441 continue continue f=f+g s=dot_product(w,y*f-exp(sign(min(abs(f),fmax),f))) flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 19431 continue continue 12220 continue deallocate(w,f) return end subroutine cspdeviance(no,x,ix,jx,y,g,q,nx,nlam,a0,ca,ia,nin,flog, *jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),ca(nx,nlam),a0(nlam),flog( *nlam) integer ix(*),jx(*),nin(nlam),ia(nx) double precision, dimension (:), allocatable :: w,f if(minval(y) .ge. 0.0)goto 19461 jerr=8888 return 19461 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 19481 jerr=9999 go to 12220 19481 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 19491 lam=1,nlam f=a0(lam) do 19501 k=1,nin(lam) j=ia(k) jb=ix(j) je=ix(j+1)-1 f(jx(jb:je))=f(jx(jb:je))+ca(k,lam)*x(jb:je) 19501 continue continue f=f+g s=dot_product(w,y*f-exp(sign(min(abs(f),fmax),f))) flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 19491 continue continue 12220 continue deallocate(w,f) return end subroutine multelnet(parm,no,ni,nr,x,y,w,jd,vp,cl,ne,nx,nlam, flm *in,ulam,thr,isd,jsd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr *) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nr),w(no),vp(ni),ca(nx,nr,nlam) double precision ulam(nlam),a0(nr,nlam),rsq(nlam),alm(nlam),cl(2,n *i) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 19521 jerr=10000 return 19521 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) call multelnetn(parm,no,ni,nr,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam *,thr,isd, jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) deallocate(vq) return end subroutine multelnetn(parm,no,ni,nr,x,y,w,jd,vp,cl,ne,nx,nlam,flmi *n,ulam,thr, isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),x(no,ni),y(no,nr),w(no),ulam(nlam),cl(2,ni *) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,ym,ys integer, dimension (:), allocatable :: ju double precision, dimension (:,:,:), allocatable :: clt allocate(clt(1:2,1:nr,1:ni),stat=jerr); if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ym(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ys(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 19541 jerr=7777 return 19541 continue call multstandard1(no,ni,nr,x,y,w,isd,jsd,intr,ju,xm,xs,ym,ys,xv,y *s0,jerr) if(jerr.ne.0) return do 19551 j=1,ni do 19561 k=1,nr do 19571 i=1,2 clt(i,k,j)=cl(i,j) 19571 continue continue 19561 continue continue 19551 continue continue if(isd .le. 0)goto 19591 do 19601 j=1,ni do 19611 k=1,nr do 19621 i=1,2 clt(i,k,j)=clt(i,k,j)*xs(j) 19621 continue continue 19611 continue continue 19601 continue continue 19591 continue if(jsd .le. 0)goto 19641 do 19651 j=1,ni do 19661 k=1,nr do 19671 i=1,2 clt(i,k,j)=clt(i,k,j)/ys(k) 19671 continue continue 19661 continue continue 19651 continue continue 19641 continue call multelnet2(parm,ni,nr,ju,vp,clt,y,no,ne,nx,x,nlam,flmin,ulam, *thr,maxit,xv, ys0,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 19681 k=1,lmu nk=nin(k) do 19691 j=1,nr do 19701 l=1,nk ca(l,j,k)=ys(j)*ca(l,j,k)/xs(ia(l)) 19701 continue continue if(intr .ne. 0)goto 19721 a0(j,k)=0.0 goto 19731 19721 continue a0(j,k)=ym(j)-dot_product(ca(1:nk,j,k),xm(ia(1:nk))) 19731 continue continue 19691 continue continue 19681 continue continue deallocate(xm,xs,ym,ys,ju,xv,clt) return end subroutine multstandard1(no,ni,nr,x,y,w,isd,jsd,intr,ju, xm,xs,ym *,ys,xv,ys0,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nr),w(no),xm(ni),xs(ni),xv(ni),ym(n *r),ys(nr) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 19751 do 19761 j=1,ni if(ju(j).eq.0)goto 19761 xm(j)=0.0 x(:,j)=v*x(:,j) z=dot_product(x(:,j),x(:,j)) if(isd .le. 0)goto 19781 xbq=dot_product(v,x(:,j))**2 vc=z-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 19791 19781 continue xs(j)=1.0 xv(j)=z 19791 continue continue 19761 continue continue ys0=0.0 do 19801 j=1,nr ym(j)=0.0 y(:,j)=v*y(:,j) z=dot_product(y(:,j),y(:,j)) if(jsd .le. 0)goto 19821 u=z-dot_product(v,y(:,j))**2 ys0=ys0+z/u ys(j)=sqrt(u) y(:,j)=y(:,j)/ys(j) goto 19831 19821 continue ys(j)=1.0 ys0=ys0+z 19831 continue continue 19801 continue continue go to 10700 19751 continue do 19841 j=1,ni if(ju(j).eq.0)goto 19841 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 19841 continue continue if(isd .ne. 0)goto 19861 xs=1.0 goto 19871 19861 continue do 19881 j=1,ni if(ju(j).eq.0)goto 19881 x(:,j)=x(:,j)/xs(j) 19881 continue continue xv=1.0 19871 continue continue ys0=0.0 do 19891 j=1,nr ym(j)=dot_product(w,y(:,j)) y(:,j)=v*(y(:,j)-ym(j)) z=dot_product(y(:,j),y(:,j)) if(jsd .le. 0)goto 19911 ys(j)=sqrt(z) y(:,j)=y(:,j)/ys(j) goto 19921 19911 continue ys0=ys0+z 19921 continue continue 19891 continue continue if(jsd .ne. 0)goto 19941 ys=1.0 goto 19951 19941 continue ys0=nr 19951 continue continue 10700 continue deallocate(v) return end subroutine multelnet2(beta,ni,nr,ju,vp,cl,y,no,ne,nx,x,nlam,flmin, *ulam,thri, maxit,xv,ys0,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),y(no,nr),x(no,ni),ulam(nlam),ao(nx,nr,nlam *) double precision rsqo(nlam),almo(nlam),xv(ni),cl(2,nr,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: g,gk,del,gj integer, dimension (:), allocatable :: mm,ix,isc double precision, dimension (:,:), allocatable :: a allocate(a(1:nr,1:ni),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(gj(1:nr),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nr),stat=jerr) if(jerr.ne.0) return allocate(del(1:nr),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ix(1:ni),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nr),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta ix=0 thr=thri*ys0/nr alf=1.0 if(flmin .ge. 1.0)goto 19971 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 19971 continue rsq=ys0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) alm=0.0 do 19981 j=1,ni if(ju(j).eq.0)goto 19981 g(j)=0.0 do 19991 k=1,nr g(j)=g(j)+dot_product(y(:,k),x(:,j))**2 19991 continue continue g(j)=sqrt(g(j)) 19981 continue continue do 20001 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 20021 alm=ulam(m) goto 20011 20021 if(m .le. 2)goto 20031 alm=alm*alf goto 20011 20031 if(m .ne. 1)goto 20041 alm=big goto 20051 20041 continue alm0=0.0 do 20061 j=1,ni if(ju(j).eq.0)goto 20061 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 20061 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 20051 continue 20011 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 20071 k=1,ni if(ix(k).eq.1)goto 20071 if(ju(k).eq.0)goto 20071 if(g(k).gt.tlam*vp(k)) ix(k)=1 20071 continue continue continue 20081 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 20101 jerr=-m return 20101 continue nlp=nlp+1 dlx=0.0 do 20111 k=1,ni if(ix(k).eq.0)goto 20111 gkn=0.0 do 20121 j=1,nr gj(j)=dot_product(y(:,j),x(:,k)) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 20121 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 20141 a(:,k)=0.0 goto 20151 20141 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 20151 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 20111 do 20161 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(:,j)=y(:,j)-del(j)*x(:,k) dlx=max(dlx,xv(k)*del(j)**2) 20161 continue continue if(mm(k) .ne. 0)goto 20181 nin=nin+1 if(nin.gt.nx)goto 20112 mm(k)=nin ia(nin)=k 20181 continue 20111 continue 20112 continue if(nin.gt.nx)goto 20082 if(dlx .ge. thr)goto 20201 ixx=0 do 20211 k=1,ni if(ix(k).eq.1)goto 20211 if(ju(k).eq.0)goto 20211 g(k)=0.0 do 20221 j=1,nr g(k)=g(k)+dot_product(y(:,j),x(:,k))**2 20221 continue continue g(k)=sqrt(g(k)) if(g(k) .le. ab*vp(k))goto 20241 ix(k)=1 ixx=1 20241 continue 20211 continue continue if(ixx.eq.1) go to 10880 goto 20082 20201 continue if(nlp .le. maxit)goto 20261 jerr=-m return 20261 continue 10360 continue iz=1 continue 20271 continue nlp=nlp+1 dlx=0.0 do 20281 l=1,nin k=ia(l) gkn=0.0 do 20291 j=1,nr gj(j)=dot_product(y(:,j),x(:,k)) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 20291 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 20311 a(:,k)=0.0 goto 20321 20311 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 20321 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 20281 do 20331 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(:,j)=y(:,j)-del(j)*x(:,k) dlx=max(dlx,xv(k)*del(j)**2) 20331 continue continue 20281 continue continue if(dlx.lt.thr)goto 20272 if(nlp .le. maxit)goto 20351 jerr=-m return 20351 continue goto 20271 20272 continue jz=0 goto 20081 20082 continue if(nin .le. nx)goto 20371 jerr=-10000-m goto 20002 20371 continue if(nin .le. 0)goto 20391 do 20401 j=1,nr ao(1:nin,j,m)=a(j,ia(1:nin)) 20401 continue continue 20391 continue kin(m)=nin rsqo(m)=1.0-rsq/ys0 almo(m)=alm lmu=m if(m.lt.mnl)goto 20001 if(flmin.ge.1.0)goto 20001 me=0 do 20411 j=1,nin if(ao(j,1,m).ne.0.0) me=me+1 20411 continue continue if(me.gt.ne)goto 20002 if(rsq0-rsq.lt.sml*rsq)goto 20002 if(rsqo(m).gt.rsqmax)goto 20002 20001 continue 20002 continue deallocate(a,mm,g,ix,del,gj,gk) return end subroutine chkbnds(nr,gk,gkn,xv,cl,al1,al2,a,isc,jerr) implicit double precision(a-h,o-z) double precision gk(nr),cl(2,nr),a(nr) integer isc(nr) kerr=0 al1p=1.0+al1/xv al2p=al2/xv isc=0 gsq=gkn**2 asq=dot_product(a,a) usq=0.0 u=0.0 kn=-1 continue 20421 continue vmx=0.0 do 20431 k=1,nr v=max(a(k)-cl(2,k),cl(1,k)-a(k)) if(v .le. vmx)goto 20451 vmx=v kn=k 20451 continue 20431 continue continue if(vmx.le.0.0)goto 20422 if(isc(kn).ne.0)goto 20422 gsq=gsq-gk(kn)**2 g=sqrt(gsq)/xv if(a(kn).lt.cl(1,kn)) u=cl(1,kn) if(a(kn).gt.cl(2,kn)) u=cl(2,kn) usq=usq+u**2 if(usq .ne. 0.0)goto 20471 b=max(0d0,(g-al2p)/al1p) goto 20481 20471 continue b0=sqrt(asq-a(kn)**2) b=bnorm(b0,al1p,al2p,g,usq,kerr) if(kerr.ne.0)goto 20422 20481 continue continue asq=usq+b**2 if(asq .gt. 0.0)goto 20501 a=0.0 goto 20422 20501 continue a(kn)=u isc(kn)=1 f=1.0/(xv*(al1p+al2p/sqrt(asq))) do 20511 j=1,nr if(isc(j).eq.0) a(j)=f*gk(j) 20511 continue continue goto 20421 20422 continue if(kerr.ne.0) jerr=kerr return end subroutine chkbnds1(nr,gk,gkn,xv,cl1,cl2,al1,al2,a,isc,jerr) implicit double precision(a-h,o-z) double precision gk(nr),a(nr) integer isc(nr) kerr=0 al1p=1.0+al1/xv al2p=al2/xv isc=0 gsq=gkn**2 asq=dot_product(a,a) usq=0.0 u=0.0 kn=-1 continue 20521 continue vmx=0.0 do 20531 k=1,nr v=max(a(k)-cl2,cl1-a(k)) if(v .le. vmx)goto 20551 vmx=v kn=k 20551 continue 20531 continue continue if(vmx.le.0.0)goto 20522 if(isc(kn).ne.0)goto 20522 gsq=gsq-gk(kn)**2 g=sqrt(gsq)/xv if(a(kn).lt.cl1) u=cl1 if(a(kn).gt.cl2) u=cl2 usq=usq+u**2 if(usq .ne. 0.0)goto 20571 b=max(0d0,(g-al2p)/al1p) goto 20581 20571 continue b0=sqrt(asq-a(kn)**2) b=bnorm(b0,al1p,al2p,g,usq,kerr) if(kerr.ne.0)goto 20522 20581 continue continue asq=usq+b**2 if(asq .gt. 0.0)goto 20601 a=0.0 goto 20522 20601 continue a(kn)=u isc(kn)=1 f=1.0/(xv*(al1p+al2p/sqrt(asq))) do 20611 j=1,nr if(isc(j).eq.0) a(j)=f*gk(j) 20611 continue continue goto 20521 20522 continue if(kerr.ne.0) jerr=kerr return end function bnorm(b0,al1p,al2p,g,usq,jerr) implicit double precision(a-h,o-z) data thr,mxit /1.0d-10,100/ b=b0 zsq=b**2+usq if(zsq .gt. 0.0)goto 20631 bnorm=0.0 return 20631 continue z=sqrt(zsq) f=b*(al1p+al2p/z)-g jerr=0 do 20641 it=1,mxit b=b-f/(al1p+al2p*usq/(z*zsq)) zsq=b**2+usq if(zsq .gt. 0.0)goto 20661 bnorm=0.0 return 20661 continue z=sqrt(zsq) f=b*(al1p+al2p/z)-g if(abs(f).le.thr)goto 20642 if(b .gt. 0.0)goto 20681 b=0.0 goto 20642 20681 continue 20641 continue 20642 continue bnorm=b if(it.ge.mxit) jerr=90000 return entry chg_bnorm(arg,irg) bnorm = 0.0 thr=arg mxit=irg return entry get_bnorm(arg,irg) bnorm = 0.0 arg=thr irg=mxit return end subroutine multsolns(ni,nx,nr,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,nr,lmu),b(ni,nr,lmu) integer ia(nx),nin(lmu) do 20691 lam=1,lmu call multuncomp(ni,nr,nx,a(1,1,lam),ia,nin(lam),b(1,1,lam)) 20691 continue continue return end subroutine multuncomp(ni,nr,nx,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(nx,nr),a(ni,nr) integer ia(nx) a=0.0 if(nin .le. 0)goto 20711 do 20721 j=1,nr a(ia(1:nin),j)=ca(1:nin,j) 20721 continue continue 20711 continue return end subroutine multmodval(nx,nr,a0,ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision a0(nr),ca(nx,nr),x(n,*),f(nr,n) integer ia(nx) do 20731 i=1,n f(:,i)=a0 20731 continue continue if(nin.le.0) return do 20741 i=1,n do 20751 j=1,nr f(j,i)=f(j,i)+dot_product(ca(1:nin,j),x(i,ia(1:nin))) 20751 continue continue 20741 continue continue return end subroutine multspelnet(parm,no,ni,nr,x,ix,jx,y,w,jd,vp,cl,ne,nx, *nlam,flmin,ulam,thr,isd, jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm, *nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nr),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 20771 jerr=10000 return 20771 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) call multspelnetn(parm,no,ni,nr,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,fl *min, ulam,thr,isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jer *r) deallocate(vq) return end subroutine multspelnetn(parm,no,ni,nr,x,ix,jx,y,w,jd,vp,cl,ne,nx,n *lam,flmin, ulam,thr,isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,n *lp,jerr) implicit double precision(a-h,o-z) double precision x(*),vp(ni),y(no,nr),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,ym,ys integer, dimension (:), allocatable :: ju double precision, dimension (:,:,:), allocatable :: clt allocate(clt(1:2,1:nr,1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ym(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ys(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 20791 jerr=7777 return 20791 continue call multspstandard1(no,ni,nr,x,ix,jx,y,w,ju,isd,jsd,intr, xm,xs, *ym,ys,xv,ys0,jerr) if(jerr.ne.0) return do 20801 j=1,ni do 20811 k=1,nr do 20821 i=1,2 clt(i,k,j)=cl(i,j) 20821 continue continue 20811 continue continue 20801 continue continue if(isd .le. 0)goto 20841 do 20851 j=1,ni do 20861 k=1,nr do 20871 i=1,2 clt(i,k,j)=clt(i,k,j)*xs(j) 20871 continue continue 20861 continue continue 20851 continue continue 20841 continue if(jsd .le. 0)goto 20891 do 20901 j=1,ni do 20911 k=1,nr do 20921 i=1,2 clt(i,k,j)=clt(i,k,j)/ys(k) 20921 continue continue 20911 continue continue 20901 continue continue 20891 continue call multspelnet2(parm,ni,nr,y,w,no,ne,nx,x,ix,jx,ju,vp,clt,nlam,f *lmin, ulam,thr,maxit,xm,xs,xv,ys0,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 20931 k=1,lmu nk=nin(k) do 20941 j=1,nr do 20951 l=1,nk ca(l,j,k)=ys(j)*ca(l,j,k)/xs(ia(l)) 20951 continue continue if(intr .ne. 0)goto 20971 a0(j,k)=0.0 goto 20981 20971 continue a0(j,k)=ym(j)-dot_product(ca(1:nk,j,k),xm(ia(1:nk))) 20981 continue continue 20941 continue continue 20931 continue continue deallocate(xm,xs,ym,ys,ju,xv,clt) return end subroutine multspstandard1(no,ni,nr,x,ix,jx,y,w,ju,isd,jsd,intr, *xm,xs,ym,ys,xv,ys0,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nr),w(no),xm(ni),xs(ni),xv(ni),ym(nr),y *s(nr) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 21001 do 21011 j=1,ni if(ju(j).eq.0)goto 21011 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 z=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .le. 0)goto 21031 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=z-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 21041 21031 continue xs(j)=1.0 xv(j)=z 21041 continue continue 21011 continue continue ys0=0.0 do 21051 j=1,nr ym(j)=0.0 z=dot_product(w,y(:,j)**2) if(jsd .le. 0)goto 21071 u=z-dot_product(w,y(:,j))**2 ys0=ys0+z/u ys(j)=sqrt(u) y(:,j)=y(:,j)/ys(j) goto 21081 21071 continue ys(j)=1.0 ys0=ys0+z 21081 continue continue 21051 continue continue return 21001 continue do 21091 j=1,ni if(ju(j).eq.0)goto 21091 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 21091 continue continue if(isd .ne. 0)goto 21111 xs=1.0 goto 21121 21111 continue xv=1.0 21121 continue continue ys0=0.0 do 21131 j=1,nr ym(j)=dot_product(w,y(:,j)) y(:,j)=y(:,j)-ym(j) z=dot_product(w,y(:,j)**2) if(jsd .le. 0)goto 21151 ys(j)=sqrt(z) y(:,j)=y(:,j)/ys(j) goto 21161 21151 continue ys0=ys0+z 21161 continue continue 21131 continue continue if(jsd .ne. 0)goto 21181 ys=1.0 goto 21191 21181 continue ys0=nr 21191 continue continue return end subroutine multspelnet2(beta,ni,nr,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,n *lam,flmin, ulam,thri,maxit,xm,xs,xv,ys0,lmu,ao,ia,kin,rsqo,almo,n *lp,jerr) implicit double precision(a-h,o-z) double precision y(no,nr),w(no),x(*),vp(ni),ulam(nlam),cl(2,nr,ni) double precision ao(nx,nr,nlam),rsqo(nlam),almo(nlam),xm(ni),xs(ni *),xv(ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: g,gj,gk,del,o integer, dimension (:), allocatable :: mm,iy,isc double precision, dimension (:,:), allocatable :: a allocate(a(1:nr,1:ni),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gj(1:nr),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nr),stat=jerr) if(jerr.ne.0) return allocate(del(1:nr),stat=jerr) if(jerr.ne.0) return allocate(o(1:nr),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nr),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 iy=0 thr=thri*ys0/nr alf=1.0 if(flmin .ge. 1.0)goto 21211 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 21211 continue rsq=ys0 a=0.0 mm=0 o=0.0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 21221 j=1,ni if(ju(j).eq.0)goto 21221 jb=ix(j) je=ix(j+1)-1 g(j)=0.0 do 21231 k=1,nr g(j)=g(j)+(dot_product(y(jx(jb:je),k),w(jx(jb:je))*x(jb:je))/xs(j) *)**2 21231 continue continue g(j)=sqrt(g(j)) 21221 continue continue do 21241 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 21261 alm=ulam(m) goto 21251 21261 if(m .le. 2)goto 21271 alm=alm*alf goto 21251 21271 if(m .ne. 1)goto 21281 alm=big goto 21291 21281 continue alm0=0.0 do 21301 j=1,ni if(ju(j).eq.0)goto 21301 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 21301 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 21291 continue 21251 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 21311 k=1,ni if(iy(k).eq.1)goto 21311 if(ju(k).eq.0)goto 21311 if(g(k).gt.tlam*vp(k)) iy(k)=1 21311 continue continue continue 21321 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 21341 jerr=-m return 21341 continue nlp=nlp+1 dlx=0.0 do 21351 k=1,ni if(iy(k).eq.0)goto 21351 jb=ix(k) je=ix(k+1)-1 gkn=0.0 do 21361 j=1,nr gj(j)=dot_product(y(jx(jb:je),j)+o(j),w(jx(jb:je))*x(jb:je))/xs(k) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 21361 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 21381 a(:,k)=0.0 goto 21391 21381 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 21391 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 21351 if(mm(k) .ne. 0)goto 21411 nin=nin+1 if(nin.gt.nx)goto 21352 mm(k)=nin ia(nin)=k 21411 continue do 21421 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(jx(jb:je),j)=y(jx(jb:je),j)-del(j)*x(jb:je)/xs(k) o(j)=o(j)+del(j)*xm(k)/xs(k) dlx=max(xv(k)*del(j)**2,dlx) 21421 continue continue 21351 continue 21352 continue if(nin.gt.nx)goto 21322 if(dlx .ge. thr)goto 21441 ixx=0 do 21451 j=1,ni if(iy(j).eq.1)goto 21451 if(ju(j).eq.0)goto 21451 jb=ix(j) je=ix(j+1)-1 g(j)=0.0 do 21461 k=1,nr g(j)=g(j)+ (dot_product(y(jx(jb:je),k)+o(k),w(jx(jb:je))*x(jb:je) *)/xs(j))**2 21461 continue continue g(j)=sqrt(g(j)) if(g(j) .le. ab*vp(j))goto 21481 iy(j)=1 ixx=1 21481 continue 21451 continue continue if(ixx.eq.1) go to 10880 goto 21322 21441 continue if(nlp .le. maxit)goto 21501 jerr=-m return 21501 continue 10360 continue iz=1 continue 21511 continue nlp=nlp+1 dlx=0.0 do 21521 l=1,nin k=ia(l) jb=ix(k) je=ix(k+1)-1 gkn=0.0 do 21531 j=1,nr gj(j)= dot_product(y(jx(jb:je),j)+o(j),w(jx(jb:je))*x(jb:je))/xs( *k) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 21531 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 21551 a(:,k)=0.0 goto 21561 21551 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 21561 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 21521 do 21571 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(jx(jb:je),j)=y(jx(jb:je),j)-del(j)*x(jb:je)/xs(k) o(j)=o(j)+del(j)*xm(k)/xs(k) dlx=max(xv(k)*del(j)**2,dlx) 21571 continue continue 21521 continue continue if(dlx.lt.thr)goto 21512 if(nlp .le. maxit)goto 21591 jerr=-m return 21591 continue goto 21511 21512 continue jz=0 goto 21321 21322 continue if(nin .le. nx)goto 21611 jerr=-10000-m goto 21242 21611 continue if(nin .le. 0)goto 21631 do 21641 j=1,nr ao(1:nin,j,m)=a(j,ia(1:nin)) 21641 continue continue 21631 continue kin(m)=nin rsqo(m)=1.0-rsq/ys0 almo(m)=alm lmu=m if(m.lt.mnl)goto 21241 if(flmin.ge.1.0)goto 21241 me=0 do 21651 j=1,nin if(ao(j,1,m).ne.0.0) me=me+1 21651 continue continue if(me.gt.ne)goto 21242 if(rsq0-rsq.lt.sml*rsq)goto 21242 if(rsqo(m).gt.rsqmax)goto 21242 21241 continue 21242 continue deallocate(a,mm,g,iy,gj,gk,del,o) return end subroutine multlognetn(parm,no,ni,nc,x,y,g,w,ju,vp,cl,ne,nx,nlam,f *lmin,ulam, shri,intr,maxit,xv,lmu,a0,a,m,kin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam *),cl(2,ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),xv( *ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q,r,b,bs double precision, dimension (:), allocatable :: sxp,sxpl,ga,gk,del integer, dimension (:), allocatable :: mm,is,ixx,isc allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return allocate(r(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nc),stat=jerr) if(jerr.ne.0) return allocate(del(1:nc),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nc),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 21661 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 21681 jerr =8000+ic return 21681 continue if(q0 .lt. pmax)goto 21701 jerr =9000+ic return 21701 continue if(intr .ne. 0)goto 21721 q0=1.0/nc b(0,ic)=0.0 goto 21731 21721 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 21731 continue continue b(1:ni,ic)=0.0 21661 continue continue if(intr.eq.0) dev1=log(float(nc)) ixx=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 21751 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 21761 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 21761 continue continue goto 21771 21751 continue do 21781 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 21781 continue continue sxp=0.0 if(intr .ne. 0)goto 21801 b(0,:)=0.0 goto 21811 21801 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 21811 continue continue dev1=0.0 do 21821 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 21821 continue continue sxpl=w*log(sxp) do 21831 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 21831 continue continue 21771 continue continue do 21841 ic=1,nc do 21851 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 21851 continue continue 21841 continue continue dev0=dev0+dev1 alf=1.0 if(flmin .ge. 1.0)goto 21871 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 21871 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 21881 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 21891 j=1,ni if(ju(j).ne.0) ga(j)=ga(j)+dot_product(r(:,ic),x(:,j))**2 21891 continue continue 21881 continue continue ga=sqrt(ga) do 21901 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 21921 al=ulam(ilm) goto 21911 21921 if(ilm .le. 2)goto 21931 al=al*alf goto 21911 21931 if(ilm .ne. 1)goto 21941 al=big goto 21951 21941 continue al0=0.0 do 21961 j=1,ni if(ju(j).eq.0)goto 21961 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 21961 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 21951 continue 21911 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 21971 k=1,ni if(ixx(k).eq.1)goto 21971 if(ju(k).eq.0)goto 21971 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 21971 continue continue 10880 continue continue 21981 continue ix=0 jx=ix kx=jx t=0.0 if(nlp .le. maxit)goto 22001 jerr=-ilm return 22001 continue do 22011 ic=1,nc t=max(t,maxval(q(:,ic)*(1.0-q(:,ic)/sxp)/sxp)) 22011 continue continue if(t .ge. eps)goto 22031 kx=1 goto 21982 22031 continue t=2.0*t alt=al1/t al2t=al2/t do 22041 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp)/t d=0.0 if(intr.ne.0) d=sum(r(:,ic)) if(d .eq. 0.0)goto 22061 b(0,ic)=b(0,ic)+d r(:,ic)=r(:,ic)-d*w dlx=max(dlx,d**2) 22061 continue 22041 continue continue continue 22071 continue nlp=nlp+nc dlx=0.0 do 22081 k=1,ni if(ixx(k).eq.0)goto 22081 gkn=0.0 do 22091 ic=1,nc gk(ic)=dot_product(r(:,ic),x(:,k))+b(k,ic)*xv(k) gkn=gkn+gk(ic)**2 22091 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn del=b(k,:) if(u .gt. 0.0)goto 22111 b(k,:)=0.0 goto 22121 22111 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k), cl(2,k),vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22121 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22081 do 22131 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(:,ic)=r(:,ic)-del(ic)*w*x(:,k) 22131 continue continue if(mm(k) .ne. 0)goto 22151 nin=nin+1 if(nin .le. nx)goto 22171 jx=1 goto 22082 22171 continue mm(k)=nin m(nin)=k 22151 continue 22081 continue 22082 continue if(jx.gt.0)goto 22072 if(dlx.lt.shr)goto 22072 if(nlp .le. maxit)goto 22191 jerr=-ilm return 22191 continue continue 22201 continue nlp=nlp+nc dlx=0.0 do 22211 l=1,nin k=m(l) gkn=0.0 do 22221 ic=1,nc gk(ic)=dot_product(r(:,ic),x(:,k))+b(k,ic)*xv(k) gkn=gkn+gk(ic)**2 22221 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn del=b(k,:) if(u .gt. 0.0)goto 22241 b(k,:)=0.0 goto 22251 22241 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k), cl(2,k),vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22251 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22211 do 22261 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(:,ic)=r(:,ic)-del(ic)*w*x(:,k) 22261 continue continue 22211 continue continue if(dlx.lt.shr)goto 22202 if(nlp .le. maxit)goto 22281 jerr=-ilm return 22281 continue goto 22201 22202 continue goto 22071 22072 continue if(jx.gt.0)goto 21982 do 22291 ic=1,nc if((b(0,ic)-bs(0,ic))**2.gt.shr) ix=1 if(ix .ne. 0)goto 22311 do 22321 j=1,nin k=m(j) if(xv(k)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 22341 ix=1 goto 22322 22341 continue 22321 continue 22322 continue 22311 continue do 22351 i=1,no fi=b(0,ic)+g(i,ic) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin),ic),x(i,m(1:nin))) fi=min(max(exmn,fi),exmx) sxp(i)=sxp(i)-q(i,ic) q(i,ic)=min(max(emin*sxp(i),exp(fi)),emax*sxp(i)) sxp(i)=sxp(i)+q(i,ic) 22351 continue continue 22291 continue continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s if(jx.gt.0)goto 21982 if(ix .ne. 0)goto 22371 do 22381 k=1,ni if(ixx(k).eq.1)goto 22381 if(ju(k).eq.0)goto 22381 ga(k)=0.0 22381 continue continue do 22391 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 22401 k=1,ni if(ixx(k).eq.1)goto 22401 if(ju(k).eq.0)goto 22401 ga(k)=ga(k)+dot_product(r(:,ic),x(:,k))**2 22401 continue continue 22391 continue continue ga=sqrt(ga) do 22411 k=1,ni if(ixx(k).eq.1)goto 22411 if(ju(k).eq.0)goto 22411 if(ga(k) .le. al1*vp(k))goto 22431 ixx(k)=1 ix=1 22431 continue 22411 continue continue if(ix.eq.1) go to 10880 goto 21982 22371 continue goto 21981 21982 continue if(kx .le. 0)goto 22451 jerr=-20000-ilm goto 21902 22451 continue if(jx .le. 0)goto 22471 jerr=-10000-ilm goto 21902 22471 continue devi=0.0 do 22481 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 22491 i=1,no if(y(i,ic).le.0.0)goto 22491 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 22491 continue continue 22481 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 21901 if(flmin.ge.1.0)goto 21901 me=0 do 22501 j=1,nin if(a(j,1,ilm).ne.0.0) me=me+1 22501 continue continue if(me.gt.ne)goto 21902 if(dev(ilm).gt.devmax)goto 21902 if(dev(ilm)-dev(ilm-1).lt.sml)goto 21902 21901 continue 21902 continue g=log(q) do 22511 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 22511 continue continue deallocate(sxp,b,bs,r,q,mm,is,ga,ixx,gk,del,sxpl) return end subroutine multsprlognetn(parm,no,ni,nc,x,ix,jx,y,g,w,ju,vp,cl,ne, *nx,nlam, flmin,ulam,shri,intr,maxit,xv,xb,xs,lmu,a0,a,m,kin,dev0, *dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nc),g(no,nc),w(no),vp(ni) double precision ulam(nlam),xb(ni),xs(ni),xv(ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q,r,b,bs double precision, dimension (:), allocatable :: sxp,sxpl,ga,gk double precision, dimension (:), allocatable :: del,sc,svr integer, dimension (:), allocatable :: mm,is,iy,isc allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return allocate(r(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nc),stat=jerr) if(jerr.ne.0) return allocate(del(1:nc),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(svr(1:nc),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nc),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 22521 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 22541 jerr =8000+ic return 22541 continue if(q0 .lt. pmax)goto 22561 jerr =9000+ic return 22561 continue b(1:ni,ic)=0.0 if(intr .ne. 0)goto 22581 q0=1.0/nc b(0,ic)=0.0 goto 22591 22581 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 22591 continue continue 22521 continue continue if(intr.eq.0) dev1=log(float(nc)) iy=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 22611 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 22621 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 22621 continue continue goto 22631 22611 continue do 22641 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 22641 continue continue sxp=0.0 if(intr .ne. 0)goto 22661 b(0,:)=0.0 goto 22671 22661 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 22671 continue continue dev1=0.0 do 22681 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 22681 continue continue sxpl=w*log(sxp) do 22691 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 22691 continue continue 22631 continue continue do 22701 ic=1,nc do 22711 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 22711 continue continue 22701 continue continue dev0=dev0+dev1 alf=1.0 if(flmin .ge. 1.0)goto 22731 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 22731 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 22741 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) svr(ic)=sum(r(:,ic)) do 22751 j=1,ni if(ju(j).eq.0)goto 22751 jb=ix(j) je=ix(j+1)-1 gj=dot_product(r(jx(jb:je),ic),x(jb:je)) ga(j)=ga(j)+((gj-svr(ic)*xb(j))/xs(j))**2 22751 continue continue 22741 continue continue ga=sqrt(ga) do 22761 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 22781 al=ulam(ilm) goto 22771 22781 if(ilm .le. 2)goto 22791 al=al*alf goto 22771 22791 if(ilm .ne. 1)goto 22801 al=big goto 22811 22801 continue al0=0.0 do 22821 j=1,ni if(ju(j).eq.0)goto 22821 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 22821 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 22811 continue 22771 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 22831 k=1,ni if(iy(k).eq.1)goto 22831 if(ju(k).eq.0)goto 22831 if(ga(k).gt.tlam*vp(k)) iy(k)=1 22831 continue continue 10880 continue continue 22841 continue ixx=0 jxx=ixx kxx=jxx t=0.0 if(nlp .le. maxit)goto 22861 jerr=-ilm return 22861 continue do 22871 ic=1,nc t=max(t,maxval(q(:,ic)*(1.0-q(:,ic)/sxp)/sxp)) 22871 continue continue if(t .ge. eps)goto 22891 kxx=1 goto 22842 22891 continue t=2.0*t alt=al1/t al2t=al2/t do 22901 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp)/t svr(ic)=sum(r(:,ic)) if(intr .eq. 0)goto 22921 b(0,ic)=b(0,ic)+svr(ic) r(:,ic)=r(:,ic)-svr(ic)*w dlx=max(dlx,svr(ic)**2) 22921 continue 22901 continue continue continue 22931 continue nlp=nlp+nc dlx=0.0 do 22941 k=1,ni if(iy(k).eq.0)goto 22941 jb=ix(k) je=ix(k+1)-1 del=b(k,:) gkn=0.0 do 22951 ic=1,nc u=(dot_product(r(jx(jb:je),ic),x(jb:je))-svr(ic)*xb(k))/xs(k) gk(ic)=u+del(ic)*xv(k) gkn=gkn+gk(ic)**2 22951 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn if(u .gt. 0.0)goto 22971 b(k,:)=0.0 goto 22981 22971 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k),cl(2,k), vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22981 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22941 do 22991 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(jx(jb:je),ic)=r(jx(jb:je),ic) -del(ic)*w(jx(jb:je))*(x(jb:je)-x *b(k))/xs(k) 22991 continue continue if(mm(k) .ne. 0)goto 23011 nin=nin+1 if(nin .le. nx)goto 23031 jxx=1 goto 22942 23031 continue mm(k)=nin m(nin)=k 23011 continue 22941 continue 22942 continue if(jxx.gt.0)goto 22932 if(dlx.lt.shr)goto 22932 if(nlp .le. maxit)goto 23051 jerr=-ilm return 23051 continue continue 23061 continue nlp=nlp+nc dlx=0.0 do 23071 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 del=b(k,:) gkn=0.0 do 23081 ic=1,nc u=(dot_product(r(jx(jb:je),ic),x(jb:je)) -svr(ic)*xb(k))/xs(k) gk(ic)=u+del(ic)*xv(k) gkn=gkn+gk(ic)**2 23081 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn if(u .gt. 0.0)goto 23101 b(k,:)=0.0 goto 23111 23101 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k),cl(2,k), vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 23111 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 23071 do 23121 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(jx(jb:je),ic)=r(jx(jb:je),ic) -del(ic)*w(jx(jb:je))*(x(jb:je)-x *b(k))/xs(k) 23121 continue continue 23071 continue continue if(dlx.lt.shr)goto 23062 if(nlp .le. maxit)goto 23141 jerr=-ilm return 23141 continue goto 23061 23062 continue goto 22931 22932 continue if(jxx.gt.0)goto 22842 do 23151 ic=1,nc if((b(0,ic)-bs(0,ic))**2.gt.shr) ixx=1 if(ixx .ne. 0)goto 23171 do 23181 j=1,nin k=m(j) if(xv(k)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 23201 ixx=1 goto 23182 23201 continue 23181 continue 23182 continue 23171 continue sc=b(0,ic)+g(:,ic) b0=0.0 do 23211 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l,ic)*x(jb:je)/xs(l) b0=b0-b(l,ic)*xb(l)/xs(l) 23211 continue continue sc=min(max(exmn,sc+b0),exmx) sxp=sxp-q(:,ic) q(:,ic)=min(max(emin*sxp,exp(sc)),emax*sxp) sxp=sxp+q(:,ic) 23151 continue continue s=sum(b(0,:))/nc b(0,:)=b(0,:)-s if(jxx.gt.0)goto 22842 if(ixx .ne. 0)goto 23231 do 23241 j=1,ni if(iy(j).eq.1)goto 23241 if(ju(j).eq.0)goto 23241 ga(j)=0.0 23241 continue continue do 23251 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 23261 j=1,ni if(iy(j).eq.1)goto 23261 if(ju(j).eq.0)goto 23261 jb=ix(j) je=ix(j+1)-1 gj=dot_product(r(jx(jb:je),ic),x(jb:je)) ga(j)=ga(j)+((gj-svr(ic)*xb(j))/xs(j))**2 23261 continue continue 23251 continue continue ga=sqrt(ga) do 23271 k=1,ni if(iy(k).eq.1)goto 23271 if(ju(k).eq.0)goto 23271 if(ga(k) .le. al1*vp(k))goto 23291 iy(k)=1 ixx=1 23291 continue 23271 continue continue if(ixx.eq.1) go to 10880 goto 22842 23231 continue goto 22841 22842 continue if(kxx .le. 0)goto 23311 jerr=-20000-ilm goto 22762 23311 continue if(jxx .le. 0)goto 23331 jerr=-10000-ilm goto 22762 23331 continue devi=0.0 do 23341 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 23351 i=1,no if(y(i,ic).le.0.0)goto 23351 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 23351 continue continue 23341 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 22761 if(flmin.ge.1.0)goto 22761 me=0 do 23361 j=1,nin if(a(j,1,ilm).ne.0.0) me=me+1 23361 continue continue if(me.gt.ne)goto 22762 if(dev(ilm).gt.devmax)goto 22762 if(dev(ilm)-dev(ilm-1).lt.sml)goto 22762 22761 continue 22762 continue g=log(q) do 23371 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 23371 continue continue deallocate(sxp,b,bs,r,q,mm,is,sc,ga,iy,gk,del,sxpl) return end subroutine psort7(v,a,ii,jj) implicit double precision(a-h,o-z) c c puts into a the permutation vector which sorts v into c increasing order. the array v is not modified. c only elements from ii to jj are considered. c arrays iu(k) and il(k) permit sorting up to 2**(k+1)-1 elements c c this is a modification of cacm algorithm #347 by r. c. singleton, c which is a modified hoare quicksort. c dimension a(jj),v(jj),iu(20),il(20) integer t,tt integer a double precision v m=1 i=ii j=jj 10 if (i.ge.j) go to 80 20 k=i ij=(j+i)/2 t=a(ij) vt=v(t) if (v(a(i)).le.vt) go to 30 a(ij)=a(i) a(i)=t t=a(ij) vt=v(t) 30 l=j if (v(a(j)).ge.vt) go to 50 a(ij)=a(j) a(j)=t t=a(ij) vt=v(t) if (v(a(i)).le.vt) go to 50 a(ij)=a(i) a(i)=t t=a(ij) vt=v(t) go to 50 40 a(l)=a(k) a(k)=tt 50 l=l-1 if (v(a(l)).gt.vt) go to 50 tt=a(l) vtt=v(tt) 60 k=k+1 if (v(a(k)).lt.vt) go to 60 if (k.le.l) go to 40 if (l-i.le.j-k) go to 70 il(m)=i iu(m)=l i=k m=m+1 go to 90 70 il(m)=k iu(m)=j j=l m=m+1 go to 90 80 m=m-1 if (m.eq.0) return i=il(m) j=iu(m) 90 if (j-i.gt.10) go to 20 if (i.eq.ii) go to 10 i=i-1 100 i=i+1 if (i.eq.j) go to 80 t=a(i+1) vt=v(t) if (v(a(i)).le.vt) go to 100 k=i 110 a(k+1)=a(k) k=k-1 if (vt.lt.v(a(k))) go to 110 a(k+1)=t go to 100 end glmnet/src/Makevars0000644000175000017500000000007714140040573014157 0ustar nileshnileshCXX_STD = CXX14 PKG_CXXFLAGS=-Iglmnetpp/include -Iglmnetpp/src glmnet/src/RcppExports.cpp0000644000175000017500000003271414140043554015465 0ustar nileshnilesh// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // elnet_exp List elnet_exp(int ka, double parm, Eigen::MatrixXd x, Eigen::VectorXd y, Eigen::VectorXd w, const Eigen::Map jd, const Eigen::Map vp, Eigen::MatrixXd cl, int ne, int nx, int nlam, double flmin, const Eigen::Map ulam, double thr, int isd, int intr, int maxit, SEXP pb, int lmu, Eigen::Map a0, Eigen::Map ca, Eigen::Map ia, Eigen::Map nin, Eigen::Map rsq, Eigen::Map alm, int nlp, int jerr); RcppExport SEXP _glmnet_elnet_exp(SEXP kaSEXP, SEXP parmSEXP, SEXP xSEXP, SEXP ySEXP, SEXP wSEXP, SEXP jdSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP neSEXP, SEXP nxSEXP, SEXP nlamSEXP, SEXP flminSEXP, SEXP ulamSEXP, SEXP thrSEXP, SEXP isdSEXP, SEXP intrSEXP, SEXP maxitSEXP, SEXP pbSEXP, SEXP lmuSEXP, SEXP a0SEXP, SEXP caSEXP, SEXP iaSEXP, SEXP ninSEXP, SEXP rsqSEXP, SEXP almSEXP, SEXP nlpSEXP, SEXP jerrSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type ka(kaSEXP); Rcpp::traits::input_parameter< double >::type parm(parmSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type x(xSEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type y(ySEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type w(wSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type jd(jdSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type vp(vpSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type cl(clSEXP); Rcpp::traits::input_parameter< int >::type ne(neSEXP); Rcpp::traits::input_parameter< int >::type nx(nxSEXP); Rcpp::traits::input_parameter< int >::type nlam(nlamSEXP); Rcpp::traits::input_parameter< double >::type flmin(flminSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type ulam(ulamSEXP); Rcpp::traits::input_parameter< double >::type thr(thrSEXP); Rcpp::traits::input_parameter< int >::type isd(isdSEXP); Rcpp::traits::input_parameter< int >::type intr(intrSEXP); Rcpp::traits::input_parameter< int >::type maxit(maxitSEXP); Rcpp::traits::input_parameter< SEXP >::type pb(pbSEXP); Rcpp::traits::input_parameter< int >::type lmu(lmuSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type a0(a0SEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ca(caSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ia(iaSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type nin(ninSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type rsq(rsqSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type alm(almSEXP); Rcpp::traits::input_parameter< int >::type nlp(nlpSEXP); Rcpp::traits::input_parameter< int >::type jerr(jerrSEXP); rcpp_result_gen = Rcpp::wrap(elnet_exp(ka, parm, x, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, pb, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr)); return rcpp_result_gen; END_RCPP } // spelnet_exp List spelnet_exp(int ka, double parm, const Eigen::Map > x, Eigen::VectorXd y, Eigen::VectorXd w, const Eigen::Map jd, const Eigen::Map vp, Eigen::MatrixXd cl, int ne, int nx, int nlam, double flmin, const Eigen::Map ulam, double thr, int isd, int intr, int maxit, SEXP pb, int lmu, Eigen::Map a0, Eigen::Map ca, Eigen::Map ia, Eigen::Map nin, Eigen::Map rsq, Eigen::Map alm, int nlp, int jerr); RcppExport SEXP _glmnet_spelnet_exp(SEXP kaSEXP, SEXP parmSEXP, SEXP xSEXP, SEXP ySEXP, SEXP wSEXP, SEXP jdSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP neSEXP, SEXP nxSEXP, SEXP nlamSEXP, SEXP flminSEXP, SEXP ulamSEXP, SEXP thrSEXP, SEXP isdSEXP, SEXP intrSEXP, SEXP maxitSEXP, SEXP pbSEXP, SEXP lmuSEXP, SEXP a0SEXP, SEXP caSEXP, SEXP iaSEXP, SEXP ninSEXP, SEXP rsqSEXP, SEXP almSEXP, SEXP nlpSEXP, SEXP jerrSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type ka(kaSEXP); Rcpp::traits::input_parameter< double >::type parm(parmSEXP); Rcpp::traits::input_parameter< const Eigen::Map > >::type x(xSEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type y(ySEXP); Rcpp::traits::input_parameter< Eigen::VectorXd >::type w(wSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type jd(jdSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type vp(vpSEXP); Rcpp::traits::input_parameter< Eigen::MatrixXd >::type cl(clSEXP); Rcpp::traits::input_parameter< int >::type ne(neSEXP); Rcpp::traits::input_parameter< int >::type nx(nxSEXP); Rcpp::traits::input_parameter< int >::type nlam(nlamSEXP); Rcpp::traits::input_parameter< double >::type flmin(flminSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type ulam(ulamSEXP); Rcpp::traits::input_parameter< double >::type thr(thrSEXP); Rcpp::traits::input_parameter< int >::type isd(isdSEXP); Rcpp::traits::input_parameter< int >::type intr(intrSEXP); Rcpp::traits::input_parameter< int >::type maxit(maxitSEXP); Rcpp::traits::input_parameter< SEXP >::type pb(pbSEXP); Rcpp::traits::input_parameter< int >::type lmu(lmuSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type a0(a0SEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ca(caSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ia(iaSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type nin(ninSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type rsq(rsqSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type alm(almSEXP); Rcpp::traits::input_parameter< int >::type nlp(nlpSEXP); Rcpp::traits::input_parameter< int >::type jerr(jerrSEXP); rcpp_result_gen = Rcpp::wrap(spelnet_exp(ka, parm, x, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, pb, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr)); return rcpp_result_gen; END_RCPP } // wls_exp List wls_exp(double alm0, double almc, double alpha, int m, int no, int ni, const Eigen::Map x, Eigen::Map r, Eigen::Map xv, const Eigen::Map v, int intr, const Eigen::Map ju, const Eigen::Map vp, const Eigen::Map cl, int nx, double thr, int maxit, Eigen::Map a, double aint, Eigen::Map g, Eigen::Map ia, Eigen::Map iy, int iz, Eigen::Map mm, int nino, double rsqc, int nlp, int jerr); RcppExport SEXP _glmnet_wls_exp(SEXP alm0SEXP, SEXP almcSEXP, SEXP alphaSEXP, SEXP mSEXP, SEXP noSEXP, SEXP niSEXP, SEXP xSEXP, SEXP rSEXP, SEXP xvSEXP, SEXP vSEXP, SEXP intrSEXP, SEXP juSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP nxSEXP, SEXP thrSEXP, SEXP maxitSEXP, SEXP aSEXP, SEXP aintSEXP, SEXP gSEXP, SEXP iaSEXP, SEXP iySEXP, SEXP izSEXP, SEXP mmSEXP, SEXP ninoSEXP, SEXP rsqcSEXP, SEXP nlpSEXP, SEXP jerrSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type alm0(alm0SEXP); Rcpp::traits::input_parameter< double >::type almc(almcSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< int >::type m(mSEXP); Rcpp::traits::input_parameter< int >::type no(noSEXP); Rcpp::traits::input_parameter< int >::type ni(niSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type x(xSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type r(rSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type xv(xvSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type v(vSEXP); Rcpp::traits::input_parameter< int >::type intr(intrSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type ju(juSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type vp(vpSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type cl(clSEXP); Rcpp::traits::input_parameter< int >::type nx(nxSEXP); Rcpp::traits::input_parameter< double >::type thr(thrSEXP); Rcpp::traits::input_parameter< int >::type maxit(maxitSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type a(aSEXP); Rcpp::traits::input_parameter< double >::type aint(aintSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type g(gSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ia(iaSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type iy(iySEXP); Rcpp::traits::input_parameter< int >::type iz(izSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type mm(mmSEXP); Rcpp::traits::input_parameter< int >::type nino(ninoSEXP); Rcpp::traits::input_parameter< double >::type rsqc(rsqcSEXP); Rcpp::traits::input_parameter< int >::type nlp(nlpSEXP); Rcpp::traits::input_parameter< int >::type jerr(jerrSEXP); rcpp_result_gen = Rcpp::wrap(wls_exp(alm0, almc, alpha, m, no, ni, x, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp, jerr)); return rcpp_result_gen; END_RCPP } // spwls_exp List spwls_exp(double alm0, double almc, double alpha, int m, int no, int ni, const Eigen::Map> x, const Eigen::Map xm, const Eigen::Map xs, Eigen::Map r, Eigen::Map xv, const Eigen::Map v, int intr, const Eigen::Map ju, const Eigen::Map vp, const Eigen::Map cl, int nx, double thr, int maxit, Eigen::Map a, double aint, Eigen::Map g, Eigen::Map ia, Eigen::Map iy, int iz, Eigen::Map mm, int nino, double rsqc, int nlp, int jerr); RcppExport SEXP _glmnet_spwls_exp(SEXP alm0SEXP, SEXP almcSEXP, SEXP alphaSEXP, SEXP mSEXP, SEXP noSEXP, SEXP niSEXP, SEXP xSEXP, SEXP xmSEXP, SEXP xsSEXP, SEXP rSEXP, SEXP xvSEXP, SEXP vSEXP, SEXP intrSEXP, SEXP juSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP nxSEXP, SEXP thrSEXP, SEXP maxitSEXP, SEXP aSEXP, SEXP aintSEXP, SEXP gSEXP, SEXP iaSEXP, SEXP iySEXP, SEXP izSEXP, SEXP mmSEXP, SEXP ninoSEXP, SEXP rsqcSEXP, SEXP nlpSEXP, SEXP jerrSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type alm0(alm0SEXP); Rcpp::traits::input_parameter< double >::type almc(almcSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< int >::type m(mSEXP); Rcpp::traits::input_parameter< int >::type no(noSEXP); Rcpp::traits::input_parameter< int >::type ni(niSEXP); Rcpp::traits::input_parameter< const Eigen::Map> >::type x(xSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type xm(xmSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type xs(xsSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type r(rSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type xv(xvSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type v(vSEXP); Rcpp::traits::input_parameter< int >::type intr(intrSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type ju(juSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type vp(vpSEXP); Rcpp::traits::input_parameter< const Eigen::Map >::type cl(clSEXP); Rcpp::traits::input_parameter< int >::type nx(nxSEXP); Rcpp::traits::input_parameter< double >::type thr(thrSEXP); Rcpp::traits::input_parameter< int >::type maxit(maxitSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type a(aSEXP); Rcpp::traits::input_parameter< double >::type aint(aintSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type g(gSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type ia(iaSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type iy(iySEXP); Rcpp::traits::input_parameter< int >::type iz(izSEXP); Rcpp::traits::input_parameter< Eigen::Map >::type mm(mmSEXP); Rcpp::traits::input_parameter< int >::type nino(ninoSEXP); Rcpp::traits::input_parameter< double >::type rsqc(rsqcSEXP); Rcpp::traits::input_parameter< int >::type nlp(nlpSEXP); Rcpp::traits::input_parameter< int >::type jerr(jerrSEXP); rcpp_result_gen = Rcpp::wrap(spwls_exp(alm0, almc, alpha, m, no, ni, x, xm, xs, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp, jerr)); return rcpp_result_gen; END_RCPP } glmnet/src/glmnet_init.c0000644000175000017500000003527114140040573015144 0ustar nileshnilesh// Automatically generated, editing not advised. #ifndef R_GLMNET_H #define R_GLMNET_H #include #include #include #ifdef ENABLE_NLS #include #define _(String) dgettext ("glmnet", String) #else #define _(String) (String) #endif /* New addition */ extern SEXP storePB(SEXP tpb); extern SEXP _glmnet_wls_exp(SEXP alm0SEXP, SEXP almcSEXP, SEXP alphaSEXP, SEXP mSEXP, SEXP noSEXP, SEXP niSEXP, SEXP xSEXP, SEXP rSEXP, SEXP xvSEXP, SEXP vSEXP, SEXP intrSEXP, SEXP juSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP nxSEXP, SEXP thrSEXP, SEXP maxitSEXP, SEXP aSEXP, SEXP aintSEXP, SEXP gSEXP, SEXP iaSEXP, SEXP iySEXP, SEXP izSEXP, SEXP mmSEXP, SEXP ninoSEXP, SEXP rsqcSEXP, SEXP nlpSEXP, SEXP jerrSEXP); extern SEXP _glmnet_spwls_exp(SEXP alm0SEXP, SEXP almcSEXP, SEXP alphaSEXP, SEXP mSEXP, SEXP noSEXP, SEXP niSEXP, SEXP xSEXP, SEXP xmSEXP, SEXP xsSEXP, SEXP rSEXP, SEXP xvSEXP, SEXP vSEXP, SEXP intrSEXP, SEXP juSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP nxSEXP, SEXP thrSEXP, SEXP maxitSEXP, SEXP aSEXP, SEXP aintSEXP, SEXP gSEXP, SEXP iaSEXP, SEXP iySEXP, SEXP izSEXP, SEXP mmSEXP, SEXP ninoSEXP, SEXP rsqcSEXP, SEXP nlpSEXP, SEXP jerrSEXP); extern SEXP _glmnet_elnet_exp(SEXP kaSEXP, SEXP parmSEXP, SEXP xSEXP, SEXP ySEXP, SEXP wSEXP, SEXP jdSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP neSEXP, SEXP nxSEXP, SEXP nlamSEXP, SEXP flminSEXP, SEXP ulamSEXP, SEXP thrSEXP, SEXP isdSEXP, SEXP intrSEXP, SEXP maxitSEXP, SEXP pbSEXP, SEXP lmuSEXP, SEXP a0SEXP, SEXP caSEXP, SEXP iaSEXP, SEXP ninSEXP, SEXP rsqSEXP, SEXP almSEXP, SEXP nlpSEXP, SEXP jerrSEXP); extern SEXP _glmnet_spelnet_exp(SEXP kaSEXP, SEXP parmSEXP, SEXP xSEXP, SEXP ySEXP, SEXP wSEXP, SEXP jdSEXP, SEXP vpSEXP, SEXP clSEXP, SEXP neSEXP, SEXP nxSEXP, SEXP nlamSEXP, SEXP flminSEXP, SEXP ulamSEXP, SEXP thrSEXP, SEXP isdSEXP, SEXP intrSEXP, SEXP maxitSEXP, SEXP pbSEXP, SEXP lmuSEXP, SEXP a0SEXP, SEXP caSEXP, SEXP iaSEXP, SEXP ninSEXP, SEXP rsqSEXP, SEXP almSEXP, SEXP nlpSEXP, SEXP jerrSEXP); static const R_CallMethodDef CallEntries[] = { {"storePB", (DL_FUNC) &storePB, 1}, {"_glmnet_wls_exp", (DL_FUNC) &_glmnet_wls_exp, 28}, {"_glmnet_spwls_exp", (DL_FUNC) &_glmnet_spwls_exp, 30}, {"_glmnet_elnet_exp", (DL_FUNC) &_glmnet_elnet_exp, 27}, {"_glmnet_spelnet_exp", (DL_FUNC) &_glmnet_spelnet_exp, 27}, {NULL, NULL, 0} }; /* End of new addition */ #define FDEF(name) {#name, (DL_FUNC) &F77_SUB(name), sizeof(name ## _t)/sizeof(name ## _t[0]), name ##_t} void F77_SUB(coxnet)( double *parm, int *no, int *ni, double *x, double *y, double *d, double *o, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *maxit, int *isd, int *lmu, double *ca, int *ia, int *nin, double *dev0, double *fdev, double *alm, int *nlp, int *jerrc ); static R_NativePrimitiveArgType coxnet_t[] = { REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(spelnet)( int *ka, double *parm, int *no, int *ni, double *x, int *ix, int *jx, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType spelnet_t[] = { INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(elnet)( int *ka, double *parm, int *no, int *ni, double *x, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType elnet_t[] = { INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(fishnet)( double *parm, int *no, int *ni, double *x, double *y, double *g, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *dev0, double *dev, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType fishnet_t[] = { REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(spfishnet)( double *parm, int *no, int *ni, double *x, int *ix, int *jx, double *y, double *g, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *dev0, double *dev, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType spfishnet_t[] = { REALSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(splognet)( double *parm, int *no, int *ni, int *nc, double *x, int *ix, int *jx, double *y, double *g, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *kopt, int *lmu, double *a0, double *ca, int *ia, int *nin, double *dev0, double *dev, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType splognet_t[] = { REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(lognet)( double *parm, int *no, int *ni, int *nc, double *x, double *y, double *g, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *kopt, int *lmu, double *a0, double *ca, int *ia, int *nin, double *dev0, double *dev, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType lognet_t[] = { REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(multspelnet)( double *parm, int *no, int *ni, int *nr, double *x, int *ix, int *jx, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *jsd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType multspelnet_t[] = { REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(multelnet)( double *parm, int *no, int *ni, int *nr, double *x, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *jsd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); static R_NativePrimitiveArgType multelnet_t[] = { REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(loglike)( int *no, int *ni, double *x, double *y, double *d, double *g, double *w, int *nlam, double *a, double *flog, int *jerr ); static R_NativePrimitiveArgType loglike_t[] = { INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP }; void F77_SUB(get_int_parms)( double *sml, double *eps, double *big, int *mnlam, double *rsqmax, double *pmin, double *exmx, int *itrace ); static R_NativePrimitiveArgType get_int_parms_t[] = { REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP }; void F77_SUB(chg_fract_dev)( double *fdev ); static R_NativePrimitiveArgType chg_fract_dev_t[] = { REALSXP }; void F77_SUB(chg_dev_max)( double *devmax ); static R_NativePrimitiveArgType chg_dev_max_t[] = { REALSXP }; void F77_SUB(chg_min_flmin)( double *eps ); static R_NativePrimitiveArgType chg_min_flmin_t[] = { REALSXP }; void F77_SUB(chg_big)( double *big ); static R_NativePrimitiveArgType chg_big_t[] = { REALSXP }; void F77_SUB(chg_min_lambdas)( int *mnlam ); static R_NativePrimitiveArgType chg_min_lambdas_t[] = { INTSXP }; void F77_SUB(chg_min_null_prob)( double *pmin ); static R_NativePrimitiveArgType chg_min_null_prob_t[] = { REALSXP }; void F77_SUB(chg_max_exp)( double *exmx ); static R_NativePrimitiveArgType chg_max_exp_t[] = { REALSXP }; void F77_SUB(chg_itrace)( int *itrace ); static R_NativePrimitiveArgType chg_itrace_t[] = { INTSXP }; void F77_SUB(chg_bnorm)( double *prec, int *mxit ); static R_NativePrimitiveArgType chg_bnorm_t[] = { REALSXP, INTSXP }; void F77_SUB(get_bnorm)( double *arg, int *irg ); static R_NativePrimitiveArgType get_bnorm_t[] = { REALSXP, INTSXP }; void F77_SUB(wls)( double *alm0, double *almc, double *alpha, int *m, int *no, int *ni, double *x, double *r, double *v, int *intr, int *ju, double *vp, double *cl, int *nx, double *thr, int *maxit, double *a, double *aint, double *g, int *ia, int *iy, int *iz, int *mm, int *nino, double *rsqc, int *nlp, int *jerr ); static R_NativePrimitiveArgType wls_t[] = { REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(spwls)( double *alm0, double *almc, double *alpha, int *m, int *no, int *ni, double *x, int *ix, int *jx, double *xm, double *xs, double *r, double *v, int *intr, int *ju, double *vp, double *cl, int *nx, double *thr, int *maxit, double *a, double *aint, double *g, int *ia, int *iy, int *iz, int *mm, int *nino, double *rsqc, int *nlp, int *jerr ); static R_NativePrimitiveArgType spwls_t[] = { REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, INTSXP, REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP }; void F77_SUB(get_int_parms2)( double *espnr, int *mxitnr ); static R_NativePrimitiveArgType get_int_parms2_t[] = { REALSXP, INTSXP }; void F77_SUB(chg_epsnr)( double *epsnr ); static R_NativePrimitiveArgType chg_epsnr_t[] = { REALSXP }; void F77_SUB(chg_mxitnr)( int *mxitnr ); static R_NativePrimitiveArgType chg_mxitnr_t[] = { INTSXP }; static R_FortranMethodDef fMethods[] = { FDEF(coxnet) , FDEF(spelnet) , FDEF(elnet) , FDEF(fishnet) , FDEF(spfishnet) , FDEF(splognet) , FDEF(lognet) , FDEF(multspelnet) , FDEF(multelnet) , FDEF(loglike) , FDEF(get_int_parms) , FDEF(chg_fract_dev) , FDEF(chg_dev_max) , FDEF(chg_min_flmin) , FDEF(chg_big) , FDEF(chg_min_lambdas) , FDEF(chg_min_null_prob) , FDEF(chg_max_exp) , FDEF(chg_itrace) , FDEF(chg_bnorm) , FDEF(get_bnorm) , FDEF(wls) , FDEF(spwls) , FDEF(get_int_parms2) , FDEF(chg_epsnr) , FDEF(chg_mxitnr) , {NULL, NULL, 0} }; void R_init_glmnet(DllInfo *dll){ R_registerRoutines(dll, NULL, CallEntries, fMethods, NULL); R_useDynamicSymbols(dll, FALSE); } #endif glmnet/src/wls_exp.cpp0000644000175000017500000000671014140271007014646 0ustar nileshnilesh#include #include #include #include using namespace Rcpp; using namespace glmnetpp; // WLS for dense X. // [[Rcpp::export]] List wls_exp( double alm0, double almc, double alpha, int m, int no, int ni, const Eigen::Map x, Eigen::Map r, Eigen::Map xv, const Eigen::Map v, int intr, const Eigen::Map ju, const Eigen::Map vp, const Eigen::Map cl, int nx, double thr, int maxit, Eigen::Map a, double aint, Eigen::Map g, Eigen::Map ia, Eigen::Map iy, int iz, Eigen::Map mm, int nino, double rsqc, int nlp, int jerr ) { try { wls( alm0, almc, alpha, m, no, ni, x, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp, jerr); } catch (const std::bad_alloc&) { jerr = util::bad_alloc_error().err_code(); } return List::create( Named("almc")=almc, Named("r")=r, Named("xv")=xv, Named("ju")=ju, Named("vp")=vp, Named("cl")=cl, Named("nx")=nx, Named("a")=a, Named("aint")=aint, Named("g")=g, Named("ia")=ia, Named("iy")=iy, Named("iz")=iz, Named("mm")=mm, Named("nino")=nino, Named("rsqc")=rsqc, Named("nlp")=nlp, Named("jerr")=jerr); } // WLS for sparse X. // [[Rcpp::export]] List spwls_exp( double alm0, double almc, double alpha, int m, int no, int ni, const Eigen::Map> x, const Eigen::Map xm, const Eigen::Map xs, Eigen::Map r, Eigen::Map xv, const Eigen::Map v, int intr, const Eigen::Map ju, const Eigen::Map vp, const Eigen::Map cl, int nx, double thr, int maxit, Eigen::Map a, double aint, Eigen::Map g, Eigen::Map ia, Eigen::Map iy, int iz, Eigen::Map mm, int nino, double rsqc, int nlp, int jerr ) { auto x_wrap = make_mapped_sparse_matrix_wrapper(x, xm, xs); try { wls( alm0, almc, alpha, m, no, ni, x_wrap, r, xv, v, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp, jerr); } catch (const std::bad_alloc&) { jerr = util::bad_alloc_error().err_code(); } return List::create( Named("almc")=almc, Named("r")=r, Named("xv")=xv, Named("ju")=ju, Named("vp")=vp, Named("cl")=cl, Named("nx")=nx, Named("a")=a, Named("aint")=aint, Named("g")=g, Named("ia")=ia, Named("iy")=iy, Named("iz")=iz, Named("mm")=mm, Named("nino")=nino, Named("rsqc")=rsqc, Named("nlp")=nlp, Named("jerr")=jerr); } glmnet/src/glmnetpp/0000755000175000017500000000000014140271174014310 5ustar nileshnileshglmnet/src/glmnetpp/CMakeLists.txt0000644000175000017500000001002314140040573017041 0ustar nileshnileshcmake_minimum_required(VERSION 3.7) project("glmnetpp" VERSION 4.1.3 DESCRIPTION "A C++ implementation of elastic net solver.") option(GLMNETPP_ENABLE_TEST "Enable unit tests to be built." ON) option(GLMNETPP_ENABLE_BENCHMARK "Enable benchmarks to be built." OFF) option(GLMNETPP_ENABLE_COVERAGE "Build glmnetpp with coverage" OFF) option(GLMNETPP_MOCK_LEGACY "Build glmnetpp with mock versions of the fortran." OFF) # Add global flags if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always") endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "CLANG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcolor-diagnostics") endif() # Stoopid hack on windows if (WIN32) SET(CMAKE_FIND_LIBRARY_PREFIXES "") SET(CMAKE_FIND_LIBRARY_SUFFIXES ".lib" ".dll") endif() # Dependency on Eigen find_package(Eigen3 3.3 CONFIG REQUIRED NO_MODULE) message(STATUS "Eigen3 found at ${EIGEN3_INCLUDE_DIR}") # Set include dirs set(GLMNETPP_INCLUDEDIR "${${PROJECT_NAME}_SOURCE_DIR}/include") set(GLMNETPP_SOURCEDIR "${${PROJECT_NAME}_SOURCE_DIR}/src") # Add this library as shared library. add_library(${PROJECT_NAME} SHARED) target_include_directories(${PROJECT_NAME} SYSTEM INTERFACE $ $) # Set C++14 standard for project target target_compile_features(${PROJECT_NAME} INTERFACE cxx_std_14) # Set install destinations install(TARGETS ${PROJECT_NAME} EXPORT ${PROJECT_NAME}_Targets ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) # Create GlmnetppConfigVersion.cmake which contains current project version # This is supposed to help with (major) version compatibility. include(CMakePackageConfigHelpers) write_basic_package_version_file("${PROJECT_NAME}ConfigVersion.cmake" VERSION ${PROJECT_VERSION} COMPATIBILITY SameMajorVersion) configure_package_config_file( "${PROJECT_SOURCE_DIR}/cmake/${PROJECT_NAME}Config.cmake.in" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(EXPORT ${PROJECT_NAME}_Targets FILE ${PROJECT_NAME}Targets.cmake NAMESPACE ${PROJECT_NAME}:: DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}/cmake) install(DIRECTORY ${PROJECT_SOURCE_DIR}/include DESTINATION ${CMAKE_INSTALL_PREFIX}) # Build source library if (GLMNETPP_ENABLE_TEST OR GLMNETPP_ENABLE_BENCHMARK) if (NOT GLMNETPP_MOCK_LEGACY) # MUST be called at the top-level CMakeLists.txt enable_language(Fortran) endif() endif() add_subdirectory(${PROJECT_SOURCE_DIR}/src) # Automate the choosing of config # if CMAKE_BUILD_TYPE not defined if (NOT CMAKE_BUILD_TYPE) # if binary directory ends with "release", use release mode if (${PROJECT_BINARY_DIR} MATCHES "release$") set(CMAKE_BUILD_TYPE RELEASE) # otherwise, use debug mode else() set(CMAKE_BUILD_TYPE DEBUG) endif() endif() message(STATUS "Compiling in ${CMAKE_BUILD_TYPE} mode") if (GLMNETPP_ENABLE_TEST) # This will perform memcheck include(CTest) enable_testing() # Find googletest find_package(GTest 1.11 CONFIG REQUIRED) # add test subdirectory add_subdirectory(${PROJECT_SOURCE_DIR}/test ${PROJECT_BINARY_DIR}/test) endif() if (GLMNETPP_ENABLE_BENCHMARK) find_package(benchmark REQUIRED HINTS ${CMAKE_CURRENT_SOURCE_DIR}/../../benchmark/build) # add benchmark subdirectory add_subdirectory(${PROJECT_SOURCE_DIR}/benchmark ${PROJECT_BINARY_DIR}/benchmark) endif() glmnet/src/glmnetpp/src/0000755000175000017500000000000014140040573015074 5ustar nileshnileshglmnet/src/glmnetpp/src/CMakeLists.txt0000644000175000017500000000304714140040573017640 0ustar nileshnilesh######################################################################## # Legacy Library for compatability tests/benchmarks ######################################################################## if (NOT GLMNETPP_MOCK_LEGACY) add_library(legacy ${CMAKE_CURRENT_SOURCE_DIR}/legacy/wls.f ${CMAKE_CURRENT_SOURCE_DIR}/legacy/glmnet5dpclean.f ${CMAKE_CURRENT_SOURCE_DIR}/legacy/pb.c ) if (NOT R_INCLUDE_DIR) message(FATAL_ERROR "R include directory must be provided by the CMake variable R_INCLUDE_DIR. " "Run Sys.getenv('R_INCLUDE_DIR') in R interpreter and provide this path. ") endif() if (NOT R_LIB_DIR) message(FATAL_ERROR "R lib directory must be provided by the CMake variable R_LIB_DIR. " "Run Sys.getenv('R_HOME') in R interpreter, append /lib, " "and provide the resulting path. ") endif() target_compile_options(legacy PRIVATE -Wall -Wextra) target_include_directories(legacy PRIVATE ${R_INCLUDE_DIR}) find_library(RLIB R REQUIRED HINTS ${R_LIB_DIR}) target_link_libraries(legacy PRIVATE ${RLIB}) endif() ######################################################################## # Add source files to project ######################################################################## target_sources(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/internal.cpp) target_include_directories(${PROJECT_NAME} PRIVATE ${GLMNETPP_INCLUDEDIR}) set_target_properties(${PROJECT_NAME} PROPERTIES VERSION ${PROJECT_VERSION}) glmnet/src/glmnetpp/src/legacy/0000755000175000017500000000000014140040573016340 5ustar nileshnileshglmnet/src/glmnetpp/src/legacy/legacy.h0000644000175000017500000001120714140040573017756 0ustar nileshnilesh#pragma once #ifndef GLMNETPP_MOCK_LEGACY extern "C" { void wls_( double *alm0, double *almc, double *alpha, int *m, int *no, int *ni, double *x, double *r, double *v, int *intr, int *ju, double *vp, double *cl, int *nx, double *thr, int *maxit, double *a, double *aint, double *g, int *ia, int *iy, int *iz, int *mm, int *nino, double *rsqc, int *nlp, int *jerr ); void elnet_( int *ka, double *parm, int *no, int *ni, double *x, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); void spelnet_( int *ka, double *parm, int *no, int *ni, double *x, int *ix, int *jx, double *y, double *w, int *jd, double *vp, double *cl, int *ne, int *nx, int *nlam, double *flmin, double *ulam, double *thr, int *isd, int *intr, int *maxit, int *lmu, double *a0, double *ca, int *ia, int *nin, double *rsq, double *alm, int *nlp, int *jerr ); void elnet1_( double *beta, int *ni, int *ju, double *vp, double *cl, double *g, int *no, int *ne, int *nx, double *x, int *nlam, double *flmin, double *ulam, double *thr, int *maxit, double *xv, int *lmu, double *ao, int *ia, int *kin, double *rsqo, double *almo, int *nlp, int *jerr ); void elnet2_( double *beta, int *ni, int *ju, double *vp, double *cl, double *y, int *no, int *ne, int *nx, double *x, int *nlam, double *flmin, double *ulam, double *thr, int *maxit, double *xv, int *lmu, double *ao, int *ia, int *kin, double *rsqo, double *almo, int *nlp, int *jerr ); void spelnet1_( double *beta, int *ni, double *g, int *no, double *w, int *ne, int *nx, double *x, int *ix, int *jx, int *ju, double *vp, double *cl, int *nlam, double *flmin, double *ulam, double *thr, int *maxit, double *xm, double *xs, double *xv, int *lmu, double *ao, int *ia, int *kin, double *rsqo, double *almo, int *nlp, int *jerr ); void spelnet2_( double *beta, int *ni, double *gy, double *w, int *no, int *ne, int *nx, double *x, int *ix, int *jx, int *ju, double *vp, double *cl, int *nlam, double *flmin, double *ulam, double *thr, int *maxit, double *xm, double *xs, double *xv, int *lmu, double *ao, int *ia, int *kin, double *rsqo, double *almo, int *nlp, int *jerr ); void standard_( int *no, int *ni, double *x, double *y, double *w, int *isd, int *intr, int *ju, double *g, double *xm, double *xs, double *ym, double *ys, double *xv, int *jerr ); void standard1_( int *no, int *ni, double *x, double *y, double *w, int *isd, int *intr, int *ju, double *xm, double *xs, double *ym, double *ys, double *xv, int *jerr ); void spstandard_( int *no, int *ni, double *x, int *ix, int *jx, double *y, double *w, int *ju, int *isd, int *intr, double *g, double *xm, double *xs, double *ym, double *ys, double *xv, int *jerr ); void spstandard1_( int *no, int *ni, double *x, int *ix, int *jx, double *y, double *w, int *ju, int *isd, int *intr, double *xm, double *xs, double *ym, double *ys, double *xv, int *jerr ); void chkvars_( int *no, int *ni, double *x, int *ju ); void spchkvars_( int *no, int *ni, double *x, int *ix, int *ju ); void get_int_parms_( double *sml, double *eps, double *big, int *mnlam, double *rsqmax, double *pmin, double *exmx, int *itrace ); void chg_fract_dev_( double *fdev ); void chg_dev_max_( double *devmax ); void chg_min_flmin_( double *eps ); void chg_big_( double *big ); void chg_min_lambdas_( int *mnlam ); void chg_min_null_prob_( double *pmin ); void chg_max_exp_( double *exmx ); void chg_itrace_( int *itrace ); void chg_bnorm_( double *prec, int *mxit ); void chg_epsnr_( double *epsnr ); void chg_mxitnr_( int *mxitnr ); void setpb_( int *val ); void get_int_parms_( double* sml, double* eps, double* big, int* mnlam, double* rsqmax, double* pmin, double* exmx, int* itrace ); } // end extern "C" #else inline void setpb_(int *m) {} inline void elnet1_( double* beta, int* ni, int* ju, double* vp, double* cl, double* g, int* no, int* ne, int* nx, double* x, int* nlam, double* flmin, double* ulam, double* thr, int* maxit, double* xv, int* lmu, double* ao, int* ia, int* kin, double* rsqo, double* almo, int* nlp, int* jerr ) {} inline void elnet2_( double* beta, int* ni, int* ju, double* vp, double* cl, double* y, int* no, int* ne, int* nx, double* x, int* nlam, double* flmin, double* ulam, double* thr, int* maxit, double* xv, int* lmu, double* ao, int* ia, int* kin, double* rsqo, double* almo, int* nlp, int* jerr ) {} #endif glmnet/src/glmnetpp/src/legacy/glmnet5dpclean.f0000644000175000017500000173723414065203754021442 0ustar nileshnileshc mortran 2.0 (version of 7/04/75 mod 7/4/87 (ajc)) subroutine get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace *) implicit double precision(a-h,o-z) data sml0,eps0,big0,mnlam0,rsqmax0,pmin0,exmx0,itrace0 /1.0d-5,1. *0d-6,9.9d35,5,0.999,1.0d-9,250.0,0/ sml=sml0 eps=eps0 big=big0 mnlam=mnlam0 rsqmax=rsqmax0 pmin=pmin0 exmx=exmx0 itrace=itrace0 return entry chg_fract_dev(arg) sml0=arg return entry chg_dev_max(arg) rsqmax0=arg return entry chg_min_flmin(arg) eps0=arg return entry chg_big(arg) big0=arg return entry chg_min_lambdas(irg) mnlam0=irg return entry chg_min_null_prob(arg) pmin0=arg return entry chg_max_exp(arg) exmx0=arg return entry chg_itrace(irg) itrace0=irg return end subroutine elnet(ka,parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam, flmin,u *lam,thr,isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),vp(ni),ca(nx,nlam),cl(2,ni) double precision ulam(nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 10021 jerr=10000 return 10021 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) if(ka .ne. 1)goto 10041 call elnetu (parm,no,ni,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam,thr, *isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) goto 10051 10041 continue call elnetn (parm,no,ni,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam,thr,i *sd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) 10051 continue continue deallocate(vq) return end subroutine elnetu(parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam, flmin,ula *m,thr,isd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,g,xv,vlam integer, dimension (:), allocatable :: ju allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 10071 jerr=7777 return 10071 continue call standard(no,ni,x,y,w,isd,intr,ju,g,xm,xs,ym,ys,xv,jerr) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 10091 do 10101 j=1,ni cl(:,j)=cl(:,j)*xs(j) 10101 continue continue 10091 continue if(flmin.ge.1.0) vlam=ulam/ys call elnet1(parm,ni,ju,vp,cl,g,no,ne,nx,x,nlam,flmin,vlam,thr,maxi *t,xv, lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 10111 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 10121 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 10121 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 10111 continue continue deallocate(xm,xs,g,ju,xv,vlam) return end subroutine standard(no,ni,x,y,w,isd,intr,ju,g,xm,xs,ym,ys,xv,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),g(ni),xm(ni),xs(ni),xv(ni) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 10141 ym=0.0 y=v*y ys=sqrt(dot_product(y,y)) y=y/ys do 10151 j=1,ni if(ju(j).eq.0)goto 10151 xm(j)=0.0 x(:,j)=v*x(:,j) xv(j)=dot_product(x(:,j),x(:,j)) if(isd .eq. 0)goto 10171 xbq=dot_product(v,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 10181 10171 continue xs(j)=1.0 10181 continue continue 10151 continue continue goto 10191 10141 continue do 10201 j=1,ni if(ju(j).eq.0)goto 10201 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 10201 continue continue if(isd .ne. 0)goto 10221 xs=1.0 goto 10231 10221 continue do 10241 j=1,ni if(ju(j).eq.0)goto 10241 x(:,j)=x(:,j)/xs(j) 10241 continue continue xv=1.0 10231 continue continue ym=dot_product(w,y) y=v*(y-ym) ys=sqrt(dot_product(y,y)) y=y/ys 10191 continue continue g=0.0 do 10251 j=1,ni if(ju(j).ne.0) g(j)=dot_product(y,x(:,j)) 10251 continue continue deallocate(v) return end subroutine elnet1(beta,ni,ju,vp,cl,g,no,ne,nx,x,nlam,flmin,ulam,th *r,maxit,xv, lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),g(ni),x(no,ni),ulam(nlam),ao(nx,nlam) double precision rsqo(nlam),almo(nlam),xv(ni) double precision cl(2,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,da integer, dimension (:), allocatable :: mm double precision, dimension (:,:), allocatable :: c allocate(c(1:ni,1:nx),stat=jerr) if(jerr.ne.0) return; call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(da(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 alf=1.0 if(flmin .ge. 1.0)goto 10271 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 10271 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 10281 m=1,nlam if(itrace.ne.0) call setpb(m-1) if(flmin .lt. 1.0)goto 10301 alm=ulam(m) goto 10291 10301 if(m .le. 2)goto 10311 alm=alm*alf goto 10291 10311 if(m .ne. 1)goto 10321 alm=big goto 10331 10321 continue alm=0.0 do 10341 j=1,ni if(ju(j).eq.0)goto 10341 if(vp(j).le.0.0)goto 10341 alm=max(alm,abs(g(j))/vp(j)) 10341 continue continue alm=alf*alm/max(bta,1.0d-3) 10331 continue 10291 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 continue 10351 continue if(iz*jz.ne.0) go to 10360 nlp=nlp+1 dlx=0.0 do 10371 k=1,ni if(ju(k).eq.0)goto 10371 ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10371 if(mm(k) .ne. 0)goto 10391 nin=nin+1 if(nin.gt.nx)goto 10372 do 10401 j=1,ni if(ju(j).eq.0)goto 10401 if(mm(j) .eq. 0)goto 10421 c(j,nin)=c(k,mm(j)) goto 10401 10421 continue if(j .ne. k)goto 10441 c(j,nin)=xv(j) goto 10401 10441 continue c(j,nin)=dot_product(x(:,j),x(:,k)) 10401 continue continue mm(k)=nin ia(nin)=k 10391 continue del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 10451 j=1,ni if(ju(j).ne.0) g(j)=g(j)-c(j,mm(k))*del 10451 continue continue 10371 continue 10372 continue if(dlx.lt.thr)goto 10352 if(nin.gt.nx)goto 10352 if(nlp .le. maxit)goto 10471 jerr=-m return 10471 continue 10360 continue iz=1 da(1:nin)=a(ia(1:nin)) continue 10481 continue nlp=nlp+1 dlx=0.0 do 10491 l=1,nin k=ia(l) ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10491 del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 10501 j=1,nin g(ia(j))=g(ia(j))-c(ia(j),mm(k))*del 10501 continue continue 10491 continue continue if(dlx.lt.thr)goto 10482 if(nlp .le. maxit)goto 10521 jerr=-m return 10521 continue goto 10481 10482 continue da(1:nin)=a(ia(1:nin))-da(1:nin) do 10531 j=1,ni if(mm(j).ne.0)goto 10531 if(ju(j).ne.0) g(j)=g(j)-dot_product(da(1:nin),c(j,1:nin)) 10531 continue continue jz=0 goto 10351 10352 continue if(nin .le. nx)goto 10551 jerr=-10000-m goto 10282 10551 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 10281 if(flmin.ge.1.0)goto 10281 me=0 do 10561 j=1,nin if(ao(j,m).ne.0.0) me=me+1 10561 continue continue if(me.gt.ne)goto 10282 if(rsq-rsq0.lt.sml*rsq)goto 10282 if(rsq.gt.rsqmax)goto 10282 10281 continue 10282 continue deallocate(a,mm,c,da) return end subroutine elnetn(parm,no,ni,x,y,w,jd,vp,cl,ne,nx,nlam,flmin,ulam, *thr,isd, intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),x(no,ni),y(no),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,vlam integer, dimension (:), allocatable :: ju allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 10581 jerr=7777 return 10581 continue call standard1(no,ni,x,y,w,isd,intr,ju,xm,xs,ym,ys,xv,jerr) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 10601 do 10611 j=1,ni cl(:,j)=cl(:,j)*xs(j) 10611 continue continue 10601 continue if(flmin.ge.1.0) vlam=ulam/ys call elnet2(parm,ni,ju,vp,cl,y,no,ne,nx,x,nlam,flmin,vlam,thr,maxi *t,xv, lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 10621 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 10631 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 10631 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 10621 continue continue deallocate(xm,xs,ju,xv,vlam) return end subroutine standard1(no,ni,x,y,w,isd,intr,ju,xm,xs,ym,ys,xv,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),w(no),xm(ni),xs(ni),xv(ni) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 10651 ym=0.0 y=v*y ys=sqrt(dot_product(y,y)) y=y/ys do 10661 j=1,ni if(ju(j).eq.0)goto 10661 xm(j)=0.0 x(:,j)=v*x(:,j) xv(j)=dot_product(x(:,j),x(:,j)) if(isd .eq. 0)goto 10681 xbq=dot_product(v,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 10691 10681 continue xs(j)=1.0 10691 continue continue 10661 continue continue go to 10700 10651 continue do 10711 j=1,ni if(ju(j).eq.0)goto 10711 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 10711 continue continue if(isd .ne. 0)goto 10731 xs=1.0 goto 10741 10731 continue do 10751 j=1,ni if(ju(j).eq.0)goto 10751 x(:,j)=x(:,j)/xs(j) 10751 continue continue xv=1.0 10741 continue continue ym=dot_product(w,y) y=v*(y-ym) ys=sqrt(dot_product(y,y)) y=y/ys 10700 continue deallocate(v) return end subroutine elnet2(beta,ni,ju,vp,cl,y,no,ne,nx,x,nlam,flmin,ulam,th *r,maxit,xv, lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),y(no),x(no,ni),ulam(nlam),ao(nx,nlam) double precision rsqo(nlam),almo(nlam),xv(ni) double precision cl(2,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,g integer, dimension (:), allocatable :: mm,ix call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ix(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta ix=0 alf=1.0 if(flmin .ge. 1.0)goto 10771 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 10771 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) alm=0.0 do 10781 j=1,ni if(ju(j).eq.0)goto 10781 g(j)=abs(dot_product(y,x(:,j))) 10781 continue continue do 10791 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 10811 alm=ulam(m) goto 10801 10811 if(m .le. 2)goto 10821 alm=alm*alf goto 10801 10821 if(m .ne. 1)goto 10831 alm=big goto 10841 10831 continue alm0=0.0 do 10851 j=1,ni if(ju(j).eq.0)goto 10851 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 10851 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 10841 continue 10801 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 10861 k=1,ni if(ix(k).eq.1)goto 10861 if(ju(k).eq.0)goto 10861 if(g(k).gt.tlam*vp(k)) ix(k)=1 10861 continue continue continue 10871 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 10901 jerr=-m return 10901 continue nlp=nlp+1 dlx=0.0 do 10911 k=1,ni if(ix(k).eq.0)goto 10911 gk=dot_product(y,x(:,k)) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 10911 if(mm(k) .ne. 0)goto 10931 nin=nin+1 if(nin.gt.nx)goto 10912 mm(k)=nin ia(nin)=k 10931 continue del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y=y-del*x(:,k) dlx=max(xv(k)*del**2,dlx) 10911 continue 10912 continue if(nin.gt.nx)goto 10872 if(dlx .ge. thr)goto 10951 ixx=0 do 10961 k=1,ni if(ix(k).eq.1)goto 10961 if(ju(k).eq.0)goto 10961 g(k)=abs(dot_product(y,x(:,k))) if(g(k) .le. ab*vp(k))goto 10981 ix(k)=1 ixx=1 10981 continue 10961 continue continue if(ixx.eq.1) go to 10880 goto 10872 10951 continue if(nlp .le. maxit)goto 11001 jerr=-m return 11001 continue 10360 continue iz=1 continue 11011 continue nlp=nlp+1 dlx=0.0 do 11021 l=1,nin k=ia(l) gk=dot_product(y,x(:,k)) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11021 del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y=y-del*x(:,k) dlx=max(xv(k)*del**2,dlx) 11021 continue continue if(dlx.lt.thr)goto 11012 if(nlp .le. maxit)goto 11041 jerr=-m return 11041 continue goto 11011 11012 continue jz=0 goto 10871 10872 continue if(nin .le. nx)goto 11061 jerr=-10000-m goto 10792 11061 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 10791 if(flmin.ge.1.0)goto 10791 me=0 do 11071 j=1,nin if(ao(j,m).ne.0.0) me=me+1 11071 continue continue if(me.gt.ne)goto 10792 if(rsq-rsq0.lt.sml*rsq)goto 10792 if(rsq.gt.rsqmax)goto 10792 10791 continue 10792 continue deallocate(a,mm,g,ix) return end subroutine chkvars(no,ni,x,ju) implicit double precision(a-h,o-z) double precision x(no,ni) integer ju(ni) do 11081 j=1,ni ju(j)=0 t=x(1,j) do 11091 i=2,no if(x(i,j).eq.t)goto 11091 ju(j)=1 goto 11092 11091 continue 11092 continue 11081 continue continue return end subroutine uncomp(ni,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(*),a(ni) integer ia(*) a=0.0 if(nin.gt.0) a(ia(1:nin))=ca(1:nin) return end subroutine modval(a0,ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision ca(nin),x(n,*),f(n) integer ia(nin) f=a0 if(nin.le.0) return do 11101 i=1,n f(i)=f(i)+dot_product(ca(1:nin),x(i,ia(1:nin))) 11101 continue continue return end subroutine spelnet(ka,parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam, * flmin,ulam,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr *) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 11121 jerr=10000 return 11121 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) if(ka .ne. 1)goto 11141 call spelnetu (parm,no,ni,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,flmin,u *lam,thr,isd, intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) goto 11151 11141 continue call spelnetn (parm,no,ni,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,flmin,ul *am,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) 11151 continue continue deallocate(vq) return end subroutine spelnetu(parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam, f *lmin,ulam,thr,isd,intr, maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,g,xv,vlam integer, dimension (:), allocatable :: ju allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 11171 jerr=7777 return 11171 continue call spstandard(no,ni,x,ix,jx,y,w,ju,isd,intr,g,xm,xs,ym,ys,xv,jer *r) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 11191 do 11201 j=1,ni cl(:,j)=cl(:,j)*xs(j) 11201 continue continue 11191 continue if(flmin.ge.1.0) vlam=ulam/ys call spelnet1(parm,ni,g,no,w,ne,nx,x,ix,jx,ju,vp,cl,nlam,flmin,vla *m,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 11211 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 11221 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 11221 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 11211 continue continue deallocate(xm,xs,g,ju,xv,vlam) return end subroutine spstandard(no,ni,x,ix,jx,y,w,ju,isd,intr,g,xm,xs,ym,ys, *xv,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),g(ni),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 11241 ym=0.0 ys=sqrt(dot_product(w,y**2)) y=y/ys do 11251 j=1,ni if(ju(j).eq.0)goto 11251 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 11271 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 11281 11271 continue xs(j)=1.0 11281 continue continue 11251 continue continue goto 11291 11241 continue do 11301 j=1,ni if(ju(j).eq.0)goto 11301 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 11301 continue continue if(isd .ne. 0)goto 11321 xs=1.0 goto 11331 11321 continue xv=1.0 11331 continue continue ym=dot_product(w,y) y=y-ym ys=sqrt(dot_product(w,y**2)) y=y/ys 11291 continue continue g=0.0 do 11341 j=1,ni if(ju(j).eq.0)goto 11341 jb=ix(j) je=ix(j+1)-1 g(j)=dot_product(w(jx(jb:je))*y(jx(jb:je)),x(jb:je))/xs(j) 11341 continue continue return end subroutine spelnet1(beta,ni,g,no,w,ne,nx,x,ix,jx,ju,vp,cl,nlam,flm *in,ulam, thr,maxit,xm,xs,xv,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision g(ni),vp(ni),x(*),ulam(nlam),w(no) double precision ao(nx,nlam),rsqo(nlam),almo(nlam) double precision xm(ni),xs(ni),xv(ni),cl(2,ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,da integer, dimension (:), allocatable :: mm double precision, dimension (:,:), allocatable :: c allocate(c(1:ni,1:nx),stat=jerr) if(jerr.ne.0) return; call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(da(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 alf=1.0 if(flmin .ge. 1.0)goto 11361 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 11361 continue rsq=0.0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 11371 m=1,nlam if(itrace.ne.0) call setpb(m-1) if(flmin .lt. 1.0)goto 11391 alm=ulam(m) goto 11381 11391 if(m .le. 2)goto 11401 alm=alm*alf goto 11381 11401 if(m .ne. 1)goto 11411 alm=big goto 11421 11411 continue alm=0.0 do 11431 j=1,ni if(ju(j).eq.0)goto 11431 if(vp(j).le.0.0)goto 11431 alm=max(alm,abs(g(j))/vp(j)) 11431 continue continue alm=alf*alm/max(bta,1.0d-3) 11421 continue 11381 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 continue 11441 continue if(iz*jz.ne.0) go to 10360 nlp=nlp+1 dlx=0.0 do 11451 k=1,ni if(ju(k).eq.0)goto 11451 ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11451 if(mm(k) .ne. 0)goto 11471 nin=nin+1 if(nin.gt.nx)goto 11452 do 11481 j=1,ni if(ju(j).eq.0)goto 11481 if(mm(j) .eq. 0)goto 11501 c(j,nin)=c(k,mm(j)) goto 11481 11501 continue if(j .ne. k)goto 11521 c(j,nin)=xv(j) goto 11481 11521 continue c(j,nin)= (row_prod(j,k,ix,jx,x,w)-xm(j)*xm(k))/(xs(j)*xs(k)) 11481 continue continue mm(k)=nin ia(nin)=k 11471 continue del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 11531 j=1,ni if(ju(j).ne.0) g(j)=g(j)-c(j,mm(k))*del 11531 continue continue 11451 continue 11452 continue if(dlx.lt.thr)goto 11442 if(nin.gt.nx)goto 11442 if(nlp .le. maxit)goto 11551 jerr=-m return 11551 continue 10360 continue iz=1 da(1:nin)=a(ia(1:nin)) continue 11561 continue nlp=nlp+1 dlx=0.0 do 11571 l=1,nin k=ia(l) ak=a(k) u=g(k)+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11571 del=a(k)-ak rsq=rsq+del*(2.0*g(k)-del*xv(k)) dlx=max(xv(k)*del**2,dlx) do 11581 j=1,nin g(ia(j))=g(ia(j))-c(ia(j),mm(k))*del 11581 continue continue 11571 continue continue if(dlx.lt.thr)goto 11562 if(nlp .le. maxit)goto 11601 jerr=-m return 11601 continue goto 11561 11562 continue da(1:nin)=a(ia(1:nin))-da(1:nin) do 11611 j=1,ni if(mm(j).ne.0)goto 11611 if(ju(j).ne.0) g(j)=g(j)-dot_product(da(1:nin),c(j,1:nin)) 11611 continue continue jz=0 goto 11441 11442 continue if(nin .le. nx)goto 11631 jerr=-10000-m goto 11372 11631 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 11371 if(flmin.ge.1.0)goto 11371 me=0 do 11641 j=1,nin if(ao(j,m).ne.0.0) me=me+1 11641 continue continue if(me.gt.ne)goto 11372 if(rsq-rsq0.lt.sml*rsq)goto 11372 if(rsq.gt.rsqmax)goto 11372 11371 continue 11372 continue deallocate(a,mm,c,da) return end subroutine spelnetn(parm,no,ni,x,ix,jx,y,w,jd,vp,cl,ne,nx,nlam,flm *in,ulam, thr,isd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),vp(ni),y(no),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,vlam integer, dimension (:), allocatable :: ju allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vlam(1:nlam),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 11661 jerr=7777 return 11661 continue call spstandard1(no,ni,x,ix,jx,y,w,ju,isd,intr,xm,xs,ym,ys,xv,jerr *) if(jerr.ne.0) return cl=cl/ys if(isd .le. 0)goto 11681 do 11691 j=1,ni cl(:,j)=cl(:,j)*xs(j) 11691 continue continue 11681 continue if(flmin.ge.1.0) vlam=ulam/ys call spelnet2(parm,ni,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,nlam,flmin,vla *m,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 11701 k=1,lmu alm(k)=ys*alm(k) nk=nin(k) do 11711 l=1,nk ca(l,k)=ys*ca(l,k)/xs(ia(l)) 11711 continue continue a0(k)=0.0 if(intr.ne.0) a0(k)=ym-dot_product(ca(1:nk,k),xm(ia(1:nk))) 11701 continue continue deallocate(xm,xs,ju,xv,vlam) return end subroutine spstandard1(no,ni,x,ix,jx,y,w,ju,isd,intr,xm,xs,ym,ys,x *v,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),w(no),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 11731 ym=0.0 ys=sqrt(dot_product(w,y**2)) y=y/ys do 11741 j=1,ni if(ju(j).eq.0)goto 11741 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 11761 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 11771 11761 continue xs(j)=1.0 11771 continue continue 11741 continue continue return 11731 continue do 11781 j=1,ni if(ju(j).eq.0)goto 11781 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 11781 continue continue if(isd .ne. 0)goto 11801 xs=1.0 goto 11811 11801 continue xv=1.0 11811 continue continue ym=dot_product(w,y) y=y-ym ys=sqrt(dot_product(w,y**2)) y=y/ys return end subroutine spelnet2(beta,ni,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,nlam,flm *in,ulam, thr,maxit,xm,xs,xv,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision y(no),w(no),x(*),vp(ni),ulam(nlam),cl(2,ni) double precision ao(nx,nlam),rsqo(nlam),almo(nlam),xm(ni),xs(ni),x *v(ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: a,g integer, dimension (:), allocatable :: mm,iy call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 iy=0 alf=1.0 if(flmin .ge. 1.0)goto 11831 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 11831 continue rsq=0.0 a=0.0 mm=0 o=0.0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 11841 j=1,ni if(ju(j).eq.0)goto 11841 jb=ix(j) je=ix(j+1)-1 g(j)=abs(dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(j)) 11841 continue continue do 11851 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 11871 alm=ulam(m) goto 11861 11871 if(m .le. 2)goto 11881 alm=alm*alf goto 11861 11881 if(m .ne. 1)goto 11891 alm=big goto 11901 11891 continue alm0=0.0 do 11911 j=1,ni if(ju(j).eq.0)goto 11911 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 11911 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 11901 continue 11861 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 11921 k=1,ni if(iy(k).eq.1)goto 11921 if(ju(k).eq.0)goto 11921 if(g(k).gt.tlam*vp(k)) iy(k)=1 11921 continue continue continue 11931 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 11951 jerr=-m return 11951 continue nlp=nlp+1 dlx=0.0 do 11961 k=1,ni if(iy(k).eq.0)goto 11961 jb=ix(k) je=ix(k+1)-1 gk=dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(k) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 11961 if(mm(k) .ne. 0)goto 11981 nin=nin+1 if(nin.gt.nx)goto 11962 mm(k)=nin ia(nin)=k 11981 continue del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y(jx(jb:je))=y(jx(jb:je))-del*x(jb:je)/xs(k) o=o+del*xm(k)/xs(k) dlx=max(xv(k)*del**2,dlx) 11961 continue 11962 continue if(nin.gt.nx)goto 11932 if(dlx .ge. thr)goto 12001 ixx=0 do 12011 j=1,ni if(iy(j).eq.1)goto 12011 if(ju(j).eq.0)goto 12011 jb=ix(j) je=ix(j+1)-1 g(j)=abs(dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(j)) if(g(j) .le. ab*vp(j))goto 12031 iy(j)=1 ixx=1 12031 continue 12011 continue continue if(ixx.eq.1) go to 10880 goto 11932 12001 continue if(nlp .le. maxit)goto 12051 jerr=-m return 12051 continue 10360 continue iz=1 continue 12061 continue nlp=nlp+1 dlx=0.0 do 12071 l=1,nin k=ia(l) jb=ix(k) je=ix(k+1)-1 gk=dot_product(y(jx(jb:je))+o,w(jx(jb:je))*x(jb:je))/xs(k) ak=a(k) u=gk+ak*xv(k) v=abs(u)-vp(k)*ab a(k)=0.0 if(v.gt.0.0) a(k)=max(cl(1,k),min(cl(2,k),sign(v,u)/(xv(k)+vp(k)*d *em))) if(a(k).eq.ak)goto 12071 del=a(k)-ak rsq=rsq+del*(2.0*gk-del*xv(k)) y(jx(jb:je))=y(jx(jb:je))-del*x(jb:je)/xs(k) o=o+del*xm(k)/xs(k) dlx=max(xv(k)*del**2,dlx) 12071 continue continue if(dlx.lt.thr)goto 12062 if(nlp .le. maxit)goto 12091 jerr=-m return 12091 continue goto 12061 12062 continue jz=0 goto 11931 11932 continue if(nin .le. nx)goto 12111 jerr=-10000-m goto 11852 12111 continue if(nin.gt.0) ao(1:nin,m)=a(ia(1:nin)) kin(m)=nin rsqo(m)=rsq almo(m)=alm lmu=m if(m.lt.mnl)goto 11851 if(flmin.ge.1.0)goto 11851 me=0 do 12121 j=1,nin if(ao(j,m).ne.0.0) me=me+1 12121 continue continue if(me.gt.ne)goto 11852 if(rsq-rsq0.lt.sml*rsq)goto 11852 if(rsq.gt.rsqmax)goto 11852 11851 continue 11852 continue deallocate(a,mm,g,iy) return end subroutine spchkvars(no,ni,x,ix,ju) implicit double precision(a-h,o-z) double precision x(*) integer ix(*),ju(ni) do 12131 j=1,ni ju(j)=0 jb=ix(j) nj=ix(j+1)-jb if(nj.eq.0)goto 12131 je=ix(j+1)-1 if(nj .ge. no)goto 12151 do 12161 i=jb,je if(x(i).eq.0.0)goto 12161 ju(j)=1 goto 12162 12161 continue 12162 continue goto 12171 12151 continue t=x(jb) do 12181 i=jb+1,je if(x(i).eq.t)goto 12181 ju(j)=1 goto 12182 12181 continue 12182 continue 12171 continue continue 12131 continue continue return end subroutine cmodval(a0,ca,ia,nin,x,ix,jx,n,f) implicit double precision(a-h,o-z) double precision ca(*),x(*),f(n) integer ia(*),ix(*),jx(*) f=a0 do 12191 j=1,nin k=ia(j) kb=ix(k) ke=ix(k+1)-1 f(jx(kb:ke))=f(jx(kb:ke))+ca(j)*x(kb:ke) 12191 continue continue return end function row_prod(i,j,ia,ja,ra,w) implicit double precision(a-h,o-z) integer ia(*),ja(*) double precision ra(*),w(*) row_prod=dot(ra(ia(i)),ra(ia(j)),ja(ia(i)),ja(ia(j)), ia(i+1)-ia( *i),ia(j+1)-ia(j),w) return end function dot(x,y,mx,my,nx,ny,w) implicit double precision(a-h,o-z) double precision x(*),y(*),w(*) integer mx(*),my(*) i=1 j=i s=0.0 continue 12201 continue continue 12211 if(mx(i).ge.my(j))goto 12212 i=i+1 if(i.gt.nx) go to 12220 goto 12211 12212 continue if(mx(i).eq.my(j)) go to 12230 continue 12241 if(my(j).ge.mx(i))goto 12242 j=j+1 if(j.gt.ny) go to 12220 goto 12241 12242 continue if(mx(i).eq.my(j)) go to 12230 goto 12201 12230 continue s=s+w(mx(i))*x(i)*y(j) i=i+1 if(i.gt.nx)goto 12202 j=j+1 if(j.gt.ny)goto 12202 goto 12201 12202 continue 12220 continue dot=s return end subroutine lognet(parm,no,ni,nc,x,y,g,jd,vp,cl,ne,nx,nlam,flmin,ul *am,thr, isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,max(2,nc)),g(no,nc),vp(ni),ulam(nla *m) double precision ca(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl *(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq,xv integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 12261 jerr=10000 return 12261 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return if(kopt .ne. 2)goto 12281 allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return 12281 continue if(isd .le. 0)goto 12301 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 12301 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 12321 jerr=7777 return 12321 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) do 12331 i=1,no ww(i)=sum(y(i,:)) if(ww(i).gt.0.0) y(i,:)=y(i,:)/ww(i) 12331 continue continue sw=sum(ww) ww=ww/sw if(nc .ne. 1)goto 12351 call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 12371 do 12381 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12381 continue continue 12371 continue call lognet2n(parm,no,ni,x,y(:,1),g(:,1),ww,ju,vq,cl,ne,nx,nlam,fl *min,ulam, thr,isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,n *lp,jerr) goto 12341 12351 if(kopt .ne. 2)goto 12391 call multlstandard1(no,ni,x,ww,ju,isd,intr,xm,xs,xv) if(isd .le. 0)goto 12411 do 12421 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12421 continue continue 12411 continue call multlognetn(parm,no,ni,nc,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin, *ulam,thr, intr,maxit,xv,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) goto 12431 12391 continue call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 12451 do 12461 j=1,ni cl(:,j)=cl(:,j)*xs(j) 12461 continue continue 12451 continue call lognetn(parm,no,ni,nc,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam *,thr, isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) 12431 continue 12341 continue if(jerr.gt.0) return dev0=2.0*sw*dev0 do 12471 k=1,lmu nk=nin(k) do 12481 ic=1,nc if(isd .le. 0)goto 12501 do 12511 l=1,nk ca(l,ic,k)=ca(l,ic,k)/xs(ia(l)) 12511 continue continue 12501 continue if(intr .ne. 0)goto 12531 a0(ic,k)=0.0 goto 12541 12531 continue a0(ic,k)=a0(ic,k)-dot_product(ca(1:nk,ic,k),xm(ia(1:nk))) 12541 continue continue 12481 continue continue 12471 continue continue deallocate(ww,ju,vq,xm) if(isd.gt.0) deallocate(xs) if(kopt.eq.2) deallocate(xv) return end subroutine lstandard1(no,ni,x,w,ju,isd,intr,xm,xs) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xm(ni),xs(ni) integer ju(ni) if(intr .ne. 0)goto 12561 do 12571 j=1,ni if(ju(j).eq.0)goto 12571 xm(j)=0.0 if(isd .eq. 0)goto 12591 vc=dot_product(w,x(:,j)**2)-dot_product(w,x(:,j))**2 xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) 12591 continue 12571 continue continue return 12561 continue do 12601 j=1,ni if(ju(j).eq.0)goto 12601 xm(j)=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm(j) if(isd .le. 0)goto 12621 xs(j)=sqrt(dot_product(w,x(:,j)**2)) x(:,j)=x(:,j)/xs(j) 12621 continue 12601 continue continue return end subroutine multlstandard1(no,ni,x,w,ju,isd,intr,xm,xs,xv) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xm(ni),xs(ni),xv(ni) integer ju(ni) if(intr .ne. 0)goto 12641 do 12651 j=1,ni if(ju(j).eq.0)goto 12651 xm(j)=0.0 xv(j)=dot_product(w,x(:,j)**2) if(isd .eq. 0)goto 12671 xbq=dot_product(w,x(:,j))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc 12671 continue 12651 continue continue return 12641 continue do 12681 j=1,ni if(ju(j).eq.0)goto 12681 xm(j)=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm(j) xv(j)=dot_product(w,x(:,j)**2) if(isd .le. 0)goto 12701 xs(j)=sqrt(xv(j)) x(:,j)=x(:,j)/xs(j) xv(j)=1.0 12701 continue 12681 continue continue return end subroutine lognet2n(parm,no,ni,x,y,g,w,ju,vp,cl,ne,nx,nlam,flmin,u *lam,shri, isd,intr,maxit,kopt,lmu,a0,a,m,kin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2 *,ni) double precision a(nx,nlam),a0(nlam),dev(nlam),alm(nlam) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: b,bs,v,r,xv,q,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) allocate(b(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return fmax=log(1.0/pmin-1.0) fmin=-fmax vmin=(1.0+pmin)*pmin*(1.0-pmin) bta=parm omb=1.0-bta q0=dot_product(w,y) if(q0 .gt. pmin)goto 12721 jerr=8001 return 12721 continue if(q0 .lt. 1.0-pmin)goto 12741 jerr=9001 return 12741 continue if(intr.eq.0.0) q0=0.5 ixx=0 al=0.0 bz=0.0 if(intr.ne.0) bz=log(q0/(1.0-q0)) if(nonzero(no,g) .ne. 0)goto 12761 vi=q0*(1.0-q0) b(0)=bz v=vi*w r=w*(y-q0) q=q0 xmz=vi dev1=-(bz*q0+log(1.0-q0)) goto 12771 12761 continue b(0)=0.0 if(intr .eq. 0)goto 12791 b(0)=azero(no,y,g,w,jerr) if(jerr.ne.0) return 12791 continue q=1.0/(1.0+exp(-b(0)-g)) v=w*q*(1.0-q) r=w*(y-q) xmz=sum(v) dev1=-(b(0)*q0+dot_product(w,y*g+log(1.0-q))) 12771 continue continue if(kopt .le. 0)goto 12811 if(isd .le. 0 .or. intr .eq. 0)goto 12831 xv=0.25 goto 12841 12831 continue do 12851 j=1,ni if(ju(j).ne.0) xv(j)=0.25*dot_product(w,x(:,j)**2) 12851 continue continue 12841 continue continue 12811 continue dev0=dev1 do 12861 i=1,no if(y(i).gt.0.0) dev0=dev0+w(i)*y(i)*log(y(i)) if(y(i).lt.1.0) dev0=dev0+w(i)*(1.0-y(i))*log(1.0-y(i)) 12861 continue continue alf=1.0 if(flmin .ge. 1.0)goto 12881 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 12881 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) bs=0.0 b(1:ni)=0.0 shr=shri*dev0 do 12891 j=1,ni if(ju(j).eq.0)goto 12891 ga(j)=abs(dot_product(r,x(:,j))) 12891 continue continue do 12901 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 12921 al=ulam(ilm) goto 12911 12921 if(ilm .le. 2)goto 12931 al=al*alf goto 12911 12931 if(ilm .ne. 1)goto 12941 al=big goto 12951 12941 continue al0=0.0 do 12961 j=1,ni if(ju(j).eq.0)goto 12961 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 12961 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 12951 continue 12911 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 12971 k=1,ni if(ixx(k).eq.1)goto 12971 if(ju(k).eq.0)goto 12971 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 12971 continue continue 10880 continue continue 12981 continue if(nlp .le. maxit)goto 13001 jerr=-ilm return 13001 continue bs(0)=b(0) if(nin.gt.0) bs(m(1:nin))=b(m(1:nin)) if(kopt .ne. 0)goto 13021 do 13031 j=1,ni if(ixx(j).gt.0) xv(j)=dot_product(v,x(:,j)**2) 13031 continue continue 13021 continue continue 13041 continue nlp=nlp+1 dlx=0.0 do 13051 k=1,ni if(ixx(k).eq.0)goto 13051 bk=b(k) gk=dot_product(r,x(:,k)) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13071 b(k)=0.0 goto 13081 13071 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 13081 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 13051 dlx=max(dlx,xv(k)*d**2) r=r-d*v*x(:,k) if(mm(k) .ne. 0)goto 13101 nin=nin+1 if(nin.gt.nx)goto 13052 mm(k)=nin m(nin)=k 13101 continue 13051 continue 13052 continue if(nin.gt.nx)goto 13042 d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 13121 b(0)=b(0)+d dlx=max(dlx,xmz*d**2) r=r-d*v 13121 continue if(dlx.lt.shr)goto 13042 if(nlp .le. maxit)goto 13141 jerr=-ilm return 13141 continue continue 13151 continue nlp=nlp+1 dlx=0.0 do 13161 l=1,nin k=m(l) bk=b(k) gk=dot_product(r,x(:,k)) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13181 b(k)=0.0 goto 13191 13181 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 13191 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 13161 dlx=max(dlx,xv(k)*d**2) r=r-d*v*x(:,k) 13161 continue continue d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 13211 b(0)=b(0)+d dlx=max(dlx,xmz*d**2) r=r-d*v 13211 continue if(dlx.lt.shr)goto 13152 if(nlp .le. maxit)goto 13231 jerr=-ilm return 13231 continue goto 13151 13152 continue goto 13041 13042 continue if(nin.gt.nx)goto 12982 do 13241 i=1,no fi=b(0)+g(i) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin)),x(i,m(1:nin))) if(fi .ge. fmin)goto 13261 q(i)=0.0 goto 13251 13261 if(fi .le. fmax)goto 13271 q(i)=1.0 goto 13281 13271 continue q(i)=1.0/(1.0+exp(-fi)) 13281 continue 13251 continue 13241 continue continue v=w*q*(1.0-q) xmz=sum(v) if(xmz.le.vmin)goto 12982 r=w*(y-q) if(xmz*(b(0)-bs(0))**2 .ge. shr)goto 13301 ix=0 do 13311 j=1,nin k=m(j) if(xv(k)*(b(k)-bs(k))**2.lt.shr)goto 13311 ix=1 goto 13312 13311 continue 13312 continue if(ix .ne. 0)goto 13331 do 13341 k=1,ni if(ixx(k).eq.1)goto 13341 if(ju(k).eq.0)goto 13341 ga(k)=abs(dot_product(r,x(:,k))) if(ga(k) .le. al1*vp(k))goto 13361 ixx(k)=1 ix=1 13361 continue 13341 continue continue if(ix.eq.1) go to 10880 goto 12982 13331 continue 13301 continue goto 12981 12982 continue if(nin .le. nx)goto 13381 jerr=-10000-ilm goto 12902 13381 continue if(nin.gt.0) a(1:nin,ilm)=b(m(1:nin)) kin(ilm)=nin a0(ilm)=b(0) alm(ilm)=al lmu=ilm devi=dev2(no,w,y,q,pmin) dev(ilm)=(dev1-devi)/dev0 if(xmz.le.vmin)goto 12902 if(ilm.lt.mnl)goto 12901 if(flmin.ge.1.0)goto 12901 me=0 do 13391 j=1,nin if(a(j,ilm).ne.0.0) me=me+1 13391 continue continue if(me.gt.ne)goto 12902 if(dev(ilm).gt.devmax)goto 12902 if(dev(ilm)-dev(ilm-1).lt.sml)goto 12902 12901 continue 12902 continue g=log(q/(1.0-q)) deallocate(b,bs,v,r,xv,q,mm,ga,ixx) return end function dev2(n,w,y,p,pmin) implicit double precision(a-h,o-z) double precision w(n),y(n),p(n) pmax=1.0-pmin s=0.0 do 13401 i=1,n pi=min(max(pmin,p(i)),pmax) s=s-w(i)*(y(i)*log(pi)+(1.0-y(i))*log(1.0-pi)) 13401 continue continue dev2=s return end function azero(n,y,g,q,jerr) implicit double precision(a-h,o-z) parameter(eps=1.0d-7) double precision y(n),g(n),q(n) double precision, dimension (:), allocatable :: e,p,w azero = 0.0 allocate(e(1:n),stat=jerr) if(jerr.ne.0) return allocate(p(1:n),stat=jerr) if(jerr.ne.0) return allocate(w(1:n),stat=jerr) if(jerr.ne.0) return az=0.0 e=exp(-g) qy=dot_product(q,y) p=1.0/(1.0+e) continue 13411 continue w=q*p*(1.0-p) d=(qy-dot_product(q,p))/sum(w) az=az+d if(abs(d).lt.eps)goto 13412 ea0=exp(-az) p=1.0/(1.0+ea0*e) goto 13411 13412 continue azero=az deallocate(e,p,w) return end subroutine lognetn(parm,no,ni,nc,x,y,g,w,ju,vp,cl,ne,nx,nlam,flmin *,ulam,shri, isd,intr,maxit,kopt,lmu,a0,a,m,kin,dev0,dev,alm,nlp,j *err) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam *) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q double precision, dimension (:), allocatable :: sxp,sxpl double precision, dimension (:), allocatable :: di,v,r,ga double precision, dimension (:,:), allocatable :: b,bs,xv integer, dimension (:), allocatable :: mm,is,ixx allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(di(1:no),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin pfm=(1.0+pmin)*pmin pfx=(1.0-pmin)*pmax vmin=pfm*pmax bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 13421 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 13441 jerr =8000+ic return 13441 continue if(q0 .lt. 1.0-pmin)goto 13461 jerr =9000+ic return 13461 continue if(intr .ne. 0)goto 13481 q0=1.0/nc b(0,ic)=0.0 goto 13491 13481 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 13491 continue continue b(1:ni,ic)=0.0 13421 continue continue if(intr.eq.0) dev1=log(float(nc)) ixx=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 13511 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 13521 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 13521 continue continue goto 13531 13511 continue do 13541 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 13541 continue continue sxp=0.0 if(intr .ne. 0)goto 13561 b(0,:)=0.0 goto 13571 13561 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 13571 continue continue dev1=0.0 do 13581 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 13581 continue continue sxpl=w*log(sxp) do 13591 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 13591 continue continue 13531 continue continue do 13601 ic=1,nc do 13611 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 13611 continue continue 13601 continue continue dev0=dev0+dev1 if(kopt .le. 0)goto 13631 if(isd .le. 0 .or. intr .eq. 0)goto 13651 xv=0.25 goto 13661 13651 continue do 13671 j=1,ni if(ju(j).ne.0) xv(j,:)=0.25*dot_product(w,x(:,j)**2) 13671 continue continue 13661 continue continue 13631 continue alf=1.0 if(flmin .ge. 1.0)goto 13691 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 13691 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 13701 ic=1,nc r=w*(y(:,ic)-q(:,ic)/sxp) do 13711 j=1,ni if(ju(j).ne.0) ga(j)=max(ga(j),abs(dot_product(r,x(:,j)))) 13711 continue continue 13701 continue continue do 13721 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 13741 al=ulam(ilm) goto 13731 13741 if(ilm .le. 2)goto 13751 al=al*alf goto 13731 13751 if(ilm .ne. 1)goto 13761 al=big goto 13771 13761 continue al0=0.0 do 13781 j=1,ni if(ju(j).eq.0)goto 13781 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 13781 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 13771 continue 13731 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 13791 k=1,ni if(ixx(k).eq.1)goto 13791 if(ju(k).eq.0)goto 13791 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 13791 continue continue 10880 continue continue 13801 continue ix=0 jx=ix ig=0 if(nlp .le. maxit)goto 13821 jerr=-ilm return 13821 continue do 13831 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) xmz=0.0 do 13841 i=1,no pic=q(i,ic)/sxp(i) if(pic .ge. pfm)goto 13861 pic=0.0 v(i)=0.0 goto 13851 13861 if(pic .le. pfx)goto 13871 pic=1.0 v(i)=0.0 goto 13881 13871 continue v(i)=w(i)*pic*(1.0-pic) xmz=xmz+v(i) 13881 continue 13851 continue r(i)=w(i)*(y(i,ic)-pic) 13841 continue continue if(xmz.le.vmin)goto 13831 ig=1 if(kopt .ne. 0)goto 13901 do 13911 j=1,ni if(ixx(j).gt.0) xv(j,ic)=dot_product(v,x(:,j)**2) 13911 continue continue 13901 continue continue 13921 continue nlp=nlp+1 dlx=0.0 do 13931 k=1,ni if(ixx(k).eq.0)goto 13931 bk=b(k,ic) gk=dot_product(r,x(:,k)) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 13951 b(k,ic)=0.0 goto 13961 13951 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 13961 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 13931 dlx=max(dlx,xv(k,ic)*d**2) r=r-d*v*x(:,k) if(mm(k) .ne. 0)goto 13981 nin=nin+1 if(nin .le. nx)goto 14001 jx=1 goto 13932 14001 continue mm(k)=nin m(nin)=k 13981 continue 13931 continue 13932 continue if(jx.gt.0)goto 13922 d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 14021 b(0,ic)=b(0,ic)+d dlx=max(dlx,xmz*d**2) r=r-d*v 14021 continue if(dlx.lt.shr)goto 13922 if(nlp .le. maxit)goto 14041 jerr=-ilm return 14041 continue continue 14051 continue nlp=nlp+1 dlx=0.0 do 14061 l=1,nin k=m(l) bk=b(k,ic) gk=dot_product(r,x(:,k)) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 14081 b(k,ic)=0.0 goto 14091 14081 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 14091 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 14061 dlx=max(dlx,xv(k,ic)*d**2) r=r-d*v*x(:,k) 14061 continue continue d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 14111 b(0,ic)=b(0,ic)+d dlx=max(dlx,xmz*d**2) r=r-d*v 14111 continue if(dlx.lt.shr)goto 14052 if(nlp .le. maxit)goto 14131 jerr=-ilm return 14131 continue goto 14051 14052 continue goto 13921 13922 continue if(jx.gt.0)goto 13832 if(xmz*(b(0,ic)-bs(0,ic))**2.gt.shr) ix=1 if(ix .ne. 0)goto 14151 do 14161 j=1,nin k=m(j) if(xv(k,ic)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 14181 ix=1 goto 14162 14181 continue 14161 continue 14162 continue 14151 continue do 14191 i=1,no fi=b(0,ic)+g(i,ic) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin),ic),x(i,m(1:nin))) fi=min(max(exmn,fi),exmx) sxp(i)=sxp(i)-q(i,ic) q(i,ic)=min(max(emin*sxp(i),exp(fi)),emax*sxp(i)) sxp(i)=sxp(i)+q(i,ic) 14191 continue continue 13831 continue 13832 continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s di=s do 14201 j=1,nin l=m(j) if(vp(l) .gt. 0.0)goto 14221 s=sum(b(l,:))/nc goto 14231 14221 continue s=elc(parm,nc,cl(:,l),b(l,:),is) 14231 continue continue b(l,:)=b(l,:)-s di=di-s*x(:,l) 14201 continue continue di=exp(di) sxp=sxp*di do 14241 ic=1,nc q(:,ic)=q(:,ic)*di 14241 continue continue if(jx.gt.0)goto 13802 if(ig.eq.0)goto 13802 if(ix .ne. 0)goto 14261 do 14271 k=1,ni if(ixx(k).eq.1)goto 14271 if(ju(k).eq.0)goto 14271 ga(k)=0.0 14271 continue continue do 14281 ic=1,nc r=w*(y(:,ic)-q(:,ic)/sxp) do 14291 k=1,ni if(ixx(k).eq.1)goto 14291 if(ju(k).eq.0)goto 14291 ga(k)=max(ga(k),abs(dot_product(r,x(:,k)))) 14291 continue continue 14281 continue continue do 14301 k=1,ni if(ixx(k).eq.1)goto 14301 if(ju(k).eq.0)goto 14301 if(ga(k) .le. al1*vp(k))goto 14321 ixx(k)=1 ix=1 14321 continue 14301 continue continue if(ix.eq.1) go to 10880 goto 13802 14261 continue goto 13801 13802 continue if(jx .le. 0)goto 14341 jerr=-10000-ilm goto 13722 14341 continue devi=0.0 do 14351 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 14361 i=1,no if(y(i,ic).le.0.0)goto 14361 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 14361 continue continue 14351 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ig.eq.0)goto 13722 if(ilm.lt.mnl)goto 13721 if(flmin.ge.1.0)goto 13721 if(nintot(ni,nx,nc,a(1,1,ilm),m,nin,is).gt.ne)goto 13722 if(dev(ilm).gt.devmax)goto 13722 if(dev(ilm)-dev(ilm-1).lt.sml)goto 13722 13721 continue 13722 continue g=log(q) do 14371 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 14371 continue continue deallocate(sxp,b,bs,v,r,xv,q,mm,is,ga,ixx) return end subroutine kazero(kk,n,y,g,q,az,jerr) implicit double precision(a-h,o-z) parameter(eps=1.0d-7) double precision y(n,kk),g(n,kk),q(n),az(kk) double precision, dimension (:), allocatable :: s double precision, dimension (:,:), allocatable :: e allocate(e(1:n,1:kk),stat=jerr) if(jerr.ne.0) return allocate(s(1:n),stat=jerr) if(jerr.ne.0) return az=0.0 e=exp(g) do 14381 i=1,n s(i)=sum(e(i,:)) 14381 continue continue continue 14391 continue dm=0.0 do 14401 k=1,kk t=0.0 u=t do 14411 i=1,n pik=e(i,k)/s(i) t=t+q(i)*(y(i,k)-pik) u=u+q(i)*pik*(1.0-pik) 14411 continue continue d=t/u az(k)=az(k)+d ed=exp(d) dm=max(dm,abs(d)) do 14421 i=1,n z=e(i,k) e(i,k)=z*ed s(i)=s(i)-z+e(i,k) 14421 continue continue 14401 continue continue if(dm.lt.eps)goto 14392 goto 14391 14392 continue az=az-sum(az)/kk deallocate(e,s) return end function elc(parm,n,cl,a,m) implicit double precision(a-h,o-z) double precision a(n),cl(2) integer m(n) fn=n am=sum(a)/fn if((parm .ne. 0.0) .and. (n .ne. 2))goto 14441 elc=am go to 14450 14441 continue do 14461 i=1,n m(i)=i 14461 continue continue call psort7(a,m,1,n) if(a(m(1)) .ne. a(m(n)))goto 14481 elc=a(1) go to 14450 14481 continue if(mod(n,2) .ne. 1)goto 14501 ad=a(m(n/2+1)) goto 14511 14501 continue ad=0.5*(a(m(n/2+1))+a(m(n/2))) 14511 continue continue if(parm .ne. 1.0)goto 14531 elc=ad go to 14450 14531 continue b1=min(am,ad) b2=max(am,ad) k2=1 continue 14541 if(a(m(k2)).gt.b1)goto 14542 k2=k2+1 goto 14541 14542 continue k1=k2-1 continue 14551 if(a(m(k2)).ge.b2)goto 14552 k2=k2+1 goto 14551 14552 continue r=parm/((1.0-parm)*fn) is=0 sm=n-2*(k1-1) do 14561 k=k1,k2-1 sm=sm-2.0 s=r*sm+am if(s .le. a(m(k)) .or. s .gt. a(m(k+1)))goto 14581 is=k goto 14562 14581 continue 14561 continue 14562 continue if(is .eq. 0)goto 14601 elc=s go to 14450 14601 continue r2=2.0*r s1=a(m(k1)) am2=2.0*am cri=r2*sum(abs(a-s1))+s1*(s1-am2) elc=s1 do 14611 k=k1+1,k2 s=a(m(k)) if(s.eq.s1)goto 14611 c=r2*sum(abs(a-s))+s*(s-am2) if(c .ge. cri)goto 14631 cri=c elc=s 14631 continue s1=s 14611 continue continue 14450 continue elc=max(maxval(a-cl(2)),min(minval(a-cl(1)),elc)) return end function nintot(ni,nx,nc,a,m,nin,is) implicit double precision(a-h,o-z) double precision a(nx,nc) integer m(nx),is(ni) is=0 nintot=0 do 14641 ic=1,nc do 14651 j=1,nin k=m(j) if(is(k).ne.0)goto 14651 if(a(j,ic).eq.0.0)goto 14651 is(k)=k nintot=nintot+1 14651 continue continue 14641 continue continue return end subroutine luncomp(ni,nx,nc,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(nx,nc),a(ni,nc) integer ia(nx) a=0.0 do 14661 ic=1,nc if(nin.gt.0) a(ia(1:nin),ic)=ca(1:nin,ic) 14661 continue continue return end subroutine lmodval(nt,x,nc,nx,a0,ca,ia,nin,ans) implicit double precision(a-h,o-z) double precision a0(nc),ca(nx,nc),x(nt,*),ans(nc,nt) integer ia(nx) do 14671 i=1,nt do 14681 ic=1,nc ans(ic,i)=a0(ic) if(nin.gt.0) ans(ic,i)=ans(ic,i)+dot_product(ca(1:nin,ic),x(i,ia(1 *:nin))) 14681 continue continue 14671 continue continue return end subroutine splognet(parm,no,ni,nc,x,ix,jx,y,g,jd,vp,cl,ne,nx,nlam, *flmin, ulam,thr,isd,intr,maxit,kopt,lmu,a0,ca,ia,nin,dev0,dev,alm *,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,max(2,nc)),g(no,nc),vp(ni),ulam(nlam) double precision ca(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl *(2,ni) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq,xv integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 14701 jerr=10000 return 14701 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return if(kopt .ne. 2)goto 14721 allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return 14721 continue call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 14741 jerr=7777 return 14741 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) do 14751 i=1,no ww(i)=sum(y(i,:)) if(ww(i).gt.0.0) y(i,:)=y(i,:)/ww(i) 14751 continue continue sw=sum(ww) ww=ww/sw if(nc .ne. 1)goto 14771 call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 14791 do 14801 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14801 continue continue 14791 continue call sprlognet2n(parm,no,ni,x,ix,jx,y(:,1),g(:,1),ww,ju,vq,cl,ne,n *x,nlam, flmin,ulam,thr,isd,intr,maxit,kopt,xm,xs,lmu,a0,ca,ia,nin *,dev0,dev, alm,nlp,jerr) goto 14761 14771 if(kopt .ne. 2)goto 14811 call multsplstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs,xv) if(isd .le. 0)goto 14831 do 14841 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14841 continue continue 14831 continue call multsprlognetn(parm,no,ni,nc,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nl *am,flmin, ulam,thr,intr,maxit,xv,xm,xs,lmu,a0,ca,ia,nin,dev0,dev, *alm,nlp,jerr) goto 14851 14811 continue call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 14871 do 14881 j=1,ni cl(:,j)=cl(:,j)*xs(j) 14881 continue continue 14871 continue call sprlognetn(parm,no,ni,nc,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nlam,f *lmin, ulam,thr,isd,intr,maxit,kopt,xm,xs,lmu,a0,ca, ia,nin,dev0, *dev,alm,nlp,jerr) 14851 continue 14761 continue if(jerr.gt.0) return dev0=2.0*sw*dev0 do 14891 k=1,lmu nk=nin(k) do 14901 ic=1,nc if(isd .le. 0)goto 14921 do 14931 l=1,nk ca(l,ic,k)=ca(l,ic,k)/xs(ia(l)) 14931 continue continue 14921 continue if(intr .ne. 0)goto 14951 a0(ic,k)=0.0 goto 14961 14951 continue a0(ic,k)=a0(ic,k)-dot_product(ca(1:nk,ic,k),xm(ia(1:nk))) 14961 continue continue 14901 continue continue 14891 continue continue deallocate(ww,ju,vq,xm,xs) if(kopt.eq.2) deallocate(xv) return end subroutine multsplstandard2(no,ni,x,ix,jx,w,ju,isd,intr,xm,xs,xv) implicit double precision(a-h,o-z) double precision x(*),w(no),xm(ni),xs(ni),xv(ni) integer ix(*),jx(*),ju(ni) if(intr .ne. 0)goto 14981 do 14991 j=1,ni if(ju(j).eq.0)goto 14991 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .eq. 0)goto 15011 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=xv(j)-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 15021 15011 continue xs(j)=1.0 15021 continue continue 14991 continue continue return 14981 continue do 15031 j=1,ni if(ju(j).eq.0)goto 15031 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd .le. 0)goto 15051 xs(j)=sqrt(xv(j)) xv(j)=1.0 15051 continue 15031 continue continue if(isd.eq.0) xs=1.0 return end subroutine splstandard2(no,ni,x,ix,jx,w,ju,isd,intr,xm,xs) implicit double precision(a-h,o-z) double precision x(*),w(no),xm(ni),xs(ni) integer ix(*),jx(*),ju(ni) if(intr .ne. 0)goto 15071 do 15081 j=1,ni if(ju(j).eq.0)goto 15081 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 if(isd .eq. 0)goto 15101 vc=dot_product(w(jx(jb:je)),x(jb:je)**2) -dot_product(w(jx(jb:je) *),x(jb:je))**2 xs(j)=sqrt(vc) goto 15111 15101 continue xs(j)=1.0 15111 continue continue 15081 continue continue return 15071 continue do 15121 j=1,ni if(ju(j).eq.0)goto 15121 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) if(isd.ne.0) xs(j)=sqrt(dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j *)**2) 15121 continue continue if(isd.eq.0) xs=1.0 return end subroutine sprlognet2n(parm,no,ni,x,ix,jx,y,g,w,ju,vp,cl,ne,nx,nla *m, flmin,ulam,shri,isd,intr,maxit,kopt,xb,xs, lmu,a0,a,m,kin,dev *0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision a(nx,nlam),a0(nlam),dev(nlam),alm(nlam) double precision xb(ni),xs(ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: xm,b,bs,v,r double precision, dimension (:), allocatable :: sc,xv,q,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) allocate(b(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(0:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return fmax=log(1.0/pmin-1.0) fmin=-fmax vmin=(1.0+pmin)*pmin*(1.0-pmin) bta=parm omb=1.0-bta q0=dot_product(w,y) if(q0 .gt. pmin)goto 15141 jerr=8001 return 15141 continue if(q0 .lt. 1.0-pmin)goto 15161 jerr=9001 return 15161 continue if(intr.eq.0) q0=0.5 bz=0.0 if(intr.ne.0) bz=log(q0/(1.0-q0)) if(nonzero(no,g) .ne. 0)goto 15181 vi=q0*(1.0-q0) b(0)=bz v=vi*w r=w*(y-q0) q=q0 xm(0)=vi dev1=-(bz*q0+log(1.0-q0)) goto 15191 15181 continue b(0)=0.0 if(intr .eq. 0)goto 15211 b(0)=azero(no,y,g,w,jerr) if(jerr.ne.0) return 15211 continue q=1.0/(1.0+exp(-b(0)-g)) v=w*q*(1.0-q) r=w*(y-q) xm(0)=sum(v) dev1=-(b(0)*q0+dot_product(w,y*g+log(1.0-q))) 15191 continue continue if(kopt .le. 0)goto 15231 if(isd .le. 0 .or. intr .eq. 0)goto 15251 xv=0.25 goto 15261 15251 continue do 15271 j=1,ni if(ju(j).eq.0)goto 15271 jb=ix(j) je=ix(j+1)-1 xv(j)=0.25*(dot_product(w(jx(jb:je)),x(jb:je)**2)-xb(j)**2) 15271 continue continue 15261 continue continue 15231 continue b(1:ni)=0.0 dev0=dev1 do 15281 i=1,no if(y(i).gt.0.0) dev0=dev0+w(i)*y(i)*log(y(i)) if(y(i).lt.1.0) dev0=dev0+w(i)*(1.0-y(i))*log(1.0-y(i)) 15281 continue continue alf=1.0 if(flmin .ge. 1.0)goto 15301 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 15301 continue m=0 mm=0 nin=0 o=0.0 svr=o mnl=min(mnlam,nlam) bs=0.0 nlp=0 nin=nlp shr=shri*dev0 al=0.0 ixx=0 do 15311 j=1,ni if(ju(j).eq.0)goto 15311 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=abs((gj-svr*xb(j))/xs(j)) 15311 continue continue do 15321 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 15341 al=ulam(ilm) goto 15331 15341 if(ilm .le. 2)goto 15351 al=al*alf goto 15331 15351 if(ilm .ne. 1)goto 15361 al=big goto 15371 15361 continue al0=0.0 do 15381 j=1,ni if(ju(j).eq.0)goto 15381 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 15381 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 15371 continue 15331 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 15391 k=1,ni if(ixx(k).eq.1)goto 15391 if(ju(k).eq.0)goto 15391 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 15391 continue continue 10880 continue continue 15401 continue if(nlp .le. maxit)goto 15421 jerr=-ilm return 15421 continue bs(0)=b(0) if(nin.gt.0) bs(m(1:nin))=b(m(1:nin)) do 15431 j=1,ni if(ixx(j).eq.0)goto 15431 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=v(jx(jb:je)) xm(j)=dot_product(sc(1:jn),x(jb:je)) if(kopt .ne. 0)goto 15451 xv(j)=dot_product(sc(1:jn),x(jb:je)**2) xv(j)=(xv(j)-2.0*xb(j)*xm(j)+xm(0)*xb(j)**2)/xs(j)**2 15451 continue 15431 continue continue continue 15461 continue nlp=nlp+1 dlx=0.0 do 15471 k=1,ni if(ixx(k).eq.0)goto 15471 jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 15491 b(k)=0.0 goto 15501 15491 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 15501 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 15471 dlx=max(dlx,xv(k)*d**2) if(mm(k) .ne. 0)goto 15521 nin=nin+1 if(nin.gt.nx)goto 15472 mm(k)=nin m(nin)=k sc(1:jn)=v(jx(jb:je)) xm(k)=dot_product(sc(1:jn),x(jb:je)) 15521 continue r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 15471 continue 15472 continue if(nin.gt.nx)goto 15462 d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 15541 b(0)=b(0)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 15541 continue if(dlx.lt.shr)goto 15462 if(nlp .le. maxit)goto 15561 jerr=-ilm return 15561 continue continue 15571 continue nlp=nlp+1 dlx=0.0 do 15581 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k)*b(k) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 15601 b(k)=0.0 goto 15611 15601 continue b(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*al2))) 15611 continue continue d=b(k)-bk if(abs(d).le.0.0)goto 15581 dlx=max(dlx,xv(k)*d**2) r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 15581 continue continue d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 15631 b(0)=b(0)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 15631 continue if(dlx.lt.shr)goto 15572 if(nlp .le. maxit)goto 15651 jerr=-ilm return 15651 continue goto 15571 15572 continue goto 15461 15462 continue if(nin.gt.nx)goto 15402 sc=b(0) b0=0.0 do 15661 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l)*x(jb:je)/xs(l) b0=b0-b(l)*xb(l)/xs(l) 15661 continue continue sc=sc+b0 do 15671 i=1,no fi=sc(i)+g(i) if(fi .ge. fmin)goto 15691 q(i)=0.0 goto 15681 15691 if(fi .le. fmax)goto 15701 q(i)=1.0 goto 15711 15701 continue q(i)=1.0/(1.0+exp(-fi)) 15711 continue 15681 continue 15671 continue continue v=w*q*(1.0-q) xm(0)=sum(v) if(xm(0).lt.vmin)goto 15402 r=w*(y-q) svr=sum(r) o=0.0 if(xm(0)*(b(0)-bs(0))**2 .ge. shr)goto 15731 kx=0 do 15741 j=1,nin k=m(j) if(xv(k)*(b(k)-bs(k))**2.lt.shr)goto 15741 kx=1 goto 15742 15741 continue 15742 continue if(kx .ne. 0)goto 15761 do 15771 j=1,ni if(ixx(j).eq.1)goto 15771 if(ju(j).eq.0)goto 15771 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+v(jx(jb:je))*o gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=abs((gj-svr*xb(j))/xs(j)) if(ga(j) .le. al1*vp(j))goto 15791 ixx(j)=1 kx=1 15791 continue 15771 continue continue if(kx.eq.1) go to 10880 goto 15402 15761 continue 15731 continue goto 15401 15402 continue if(nin .le. nx)goto 15811 jerr=-10000-ilm goto 15322 15811 continue if(nin.gt.0) a(1:nin,ilm)=b(m(1:nin)) kin(ilm)=nin a0(ilm)=b(0) alm(ilm)=al lmu=ilm devi=dev2(no,w,y,q,pmin) dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 15321 if(flmin.ge.1.0)goto 15321 me=0 do 15821 j=1,nin if(a(j,ilm).ne.0.0) me=me+1 15821 continue continue if(me.gt.ne)goto 15322 if(dev(ilm).gt.devmax)goto 15322 if(dev(ilm)-dev(ilm-1).lt.sml)goto 15322 if(xm(0).lt.vmin)goto 15322 15321 continue 15322 continue g=log(q/(1.0-q)) deallocate(xm,b,bs,v,r,sc,xv,q,mm,ga,ixx) return end subroutine sprlognetn(parm,no,ni,nc,x,ix,jx,y,g,w,ju,vp,cl,ne,nx,n *lam,flmin, ulam,shri,isd,intr,maxit,kopt,xb,xs,lmu,a0,a,m,kin,dev *0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam),xb *(ni),xs(ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q double precision, dimension (:), allocatable :: sxp,sxpl double precision, dimension (:), allocatable :: sc,xm,v,r,ga double precision, dimension (:,:), allocatable :: b,bs,xv integer, dimension (:), allocatable :: mm,is,iy allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(xm(0:ni),stat=jerr) if(jerr.ne.0) return allocate(r(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin pfm=(1.0+pmin)*pmin pfx=(1.0-pmin)*pmax vmin=pfm*pmax bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 15831 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 15851 jerr =8000+ic return 15851 continue if(q0 .lt. 1.0-pmin)goto 15871 jerr =9000+ic return 15871 continue if(intr.eq.0) q0=1.0/nc b(1:ni,ic)=0.0 b(0,ic)=0.0 if(intr .eq. 0)goto 15891 b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 15891 continue 15831 continue continue if(intr.eq.0) dev1=log(float(nc)) iy=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 15911 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 15921 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 15921 continue continue goto 15931 15911 continue do 15941 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 15941 continue continue sxp=0.0 if(intr .ne. 0)goto 15961 b(0,:)=0.0 goto 15971 15961 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 15971 continue continue dev1=0.0 do 15981 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 15981 continue continue sxpl=w*log(sxp) do 15991 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 15991 continue continue 15931 continue continue do 16001 ic=1,nc do 16011 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 16011 continue continue 16001 continue continue dev0=dev0+dev1 if(kopt .le. 0)goto 16031 if(isd .le. 0 .or. intr .eq. 0)goto 16051 xv=0.25 goto 16061 16051 continue do 16071 j=1,ni if(ju(j).eq.0)goto 16071 jb=ix(j) je=ix(j+1)-1 xv(j,:)=0.25*(dot_product(w(jx(jb:je)),x(jb:je)**2)-xb(j)**2) 16071 continue continue 16061 continue continue 16031 continue alf=1.0 if(flmin .ge. 1.0)goto 16091 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 16091 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 svr=0.0 o=0.0 shr=shri*dev0 ga=0.0 do 16101 ic=1,nc v=q(:,ic)/sxp r=w*(y(:,ic)-v) v=w*v*(1.0-v) do 16111 j=1,ni if(ju(j).eq.0)goto 16111 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=max(ga(j),abs(gj-svr*xb(j))/xs(j)) 16111 continue continue 16101 continue continue do 16121 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 16141 al=ulam(ilm) goto 16131 16141 if(ilm .le. 2)goto 16151 al=al*alf goto 16131 16151 if(ilm .ne. 1)goto 16161 al=big goto 16171 16161 continue al0=0.0 do 16181 j=1,ni if(ju(j).eq.0)goto 16181 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 16181 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 16171 continue 16131 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 16191 k=1,ni if(iy(k).eq.1)goto 16191 if(ju(k).eq.0)goto 16191 if(ga(k).gt.tlam*vp(k)) iy(k)=1 16191 continue continue 10880 continue continue 16201 continue ixx=0 jxx=ixx ig=0 if(nlp .le. maxit)goto 16221 jerr=-ilm return 16221 continue do 16231 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) xm(0)=0.0 svr=0.0 o=0.0 do 16241 i=1,no pic=q(i,ic)/sxp(i) if(pic .ge. pfm)goto 16261 pic=0.0 v(i)=0.0 goto 16251 16261 if(pic .le. pfx)goto 16271 pic=1.0 v(i)=0.0 goto 16281 16271 continue v(i)=w(i)*pic*(1.0-pic) xm(0)=xm(0)+v(i) 16281 continue 16251 continue r(i)=w(i)*(y(i,ic)-pic) svr=svr+r(i) 16241 continue continue if(xm(0).le.vmin)goto 16231 ig=1 do 16291 j=1,ni if(iy(j).eq.0)goto 16291 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(v(jx(jb:je)),x(jb:je)) if(kopt .ne. 0)goto 16311 xv(j,ic)=dot_product(v(jx(jb:je)),x(jb:je)**2) xv(j,ic)=(xv(j,ic)-2.0*xb(j)*xm(j)+xm(0)*xb(j)**2)/xs(j)**2 16311 continue 16291 continue continue continue 16321 continue nlp=nlp+1 dlx=0.0 do 16331 k=1,ni if(iy(k).eq.0)goto 16331 jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k,ic) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 16351 b(k,ic)=0.0 goto 16361 16351 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 16361 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 16331 dlx=max(dlx,xv(k,ic)*d**2) if(mm(k) .ne. 0)goto 16381 nin=nin+1 if(nin .le. nx)goto 16401 jxx=1 goto 16332 16401 continue mm(k)=nin m(nin)=k xm(k)=dot_product(v(jx(jb:je)),x(jb:je)) 16381 continue r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 16331 continue 16332 continue if(jxx.gt.0)goto 16322 d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 16421 b(0,ic)=b(0,ic)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 16421 continue if(dlx.lt.shr)goto 16322 if(nlp .le. maxit)goto 16441 jerr=-ilm return 16441 continue continue 16451 continue nlp=nlp+1 dlx=0.0 do 16461 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 jn=ix(k+1)-ix(k) bk=b(k,ic) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gk=dot_product(sc(1:jn),x(jb:je)) gk=(gk-svr*xb(k))/xs(k) u=gk+xv(k,ic)*b(k,ic) au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 16481 b(k,ic)=0.0 goto 16491 16481 continue b(k,ic)=max(cl(1,k),min(cl(2,k),sign(au,u)/ (xv(k,ic)+vp(k)*al2)) *) 16491 continue continue d=b(k,ic)-bk if(abs(d).le.0.0)goto 16461 dlx=max(dlx,xv(k,ic)*d**2) r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) o=o+d*(xb(k)/xs(k)) svr=svr-d*(xm(k)-xb(k)*xm(0))/xs(k) 16461 continue continue d=0.0 if(intr.ne.0) d=svr/xm(0) if(d .eq. 0.0)goto 16511 b(0,ic)=b(0,ic)+d dlx=max(dlx,xm(0)*d**2) r=r-d*v svr=svr-d*xm(0) 16511 continue if(dlx.lt.shr)goto 16452 if(nlp .le. maxit)goto 16531 jerr=-ilm return 16531 continue goto 16451 16452 continue goto 16321 16322 continue if(jxx.gt.0)goto 16232 if(xm(0)*(b(0,ic)-bs(0,ic))**2.gt.shr) ixx=1 if(ixx .ne. 0)goto 16551 do 16561 j=1,nin k=m(j) if(xv(k,ic)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 16581 ixx=1 goto 16562 16581 continue 16561 continue 16562 continue 16551 continue sc=b(0,ic)+g(:,ic) b0=0.0 do 16591 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l,ic)*x(jb:je)/xs(l) b0=b0-b(l,ic)*xb(l)/xs(l) 16591 continue continue sc=min(max(exmn,sc+b0),exmx) sxp=sxp-q(:,ic) q(:,ic)=min(max(emin*sxp,exp(sc)),emax*sxp) sxp=sxp+q(:,ic) 16231 continue 16232 continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s sc=s b0=0.0 do 16601 j=1,nin l=m(j) if(vp(l) .gt. 0.0)goto 16621 s=sum(b(l,:))/nc goto 16631 16621 continue s=elc(parm,nc,cl(:,l),b(l,:),is) 16631 continue continue b(l,:)=b(l,:)-s jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))-s*x(jb:je)/xs(l) b0=b0+s*xb(l)/xs(l) 16601 continue continue sc=sc+b0 sc=exp(sc) sxp=sxp*sc do 16641 ic=1,nc q(:,ic)=q(:,ic)*sc 16641 continue continue if(jxx.gt.0)goto 16202 if(ig.eq.0)goto 16202 if(ixx .ne. 0)goto 16661 do 16671 j=1,ni if(iy(j).eq.1)goto 16671 if(ju(j).eq.0)goto 16671 ga(j)=0.0 16671 continue continue do 16681 ic=1,nc v=q(:,ic)/sxp r=w*(y(:,ic)-v) v=w*v*(1.0-v) do 16691 j=1,ni if(iy(j).eq.1)goto 16691 if(ju(j).eq.0)goto 16691 jb=ix(j) je=ix(j+1)-1 jn=ix(j+1)-ix(j) sc(1:jn)=r(jx(jb:je))+o*v(jx(jb:je)) gj=dot_product(sc(1:jn),x(jb:je)) ga(j)=max(ga(j),abs(gj-svr*xb(j))/xs(j)) 16691 continue continue 16681 continue continue do 16701 k=1,ni if(iy(k).eq.1)goto 16701 if(ju(k).eq.0)goto 16701 if(ga(k) .le. al1*vp(k))goto 16721 iy(k)=1 ixx=1 16721 continue 16701 continue continue if(ixx.eq.1) go to 10880 goto 16202 16661 continue goto 16201 16202 continue if(jxx .le. 0)goto 16741 jerr=-10000-ilm goto 16122 16741 continue devi=0.0 do 16751 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 16761 i=1,no if(y(i,ic).le.0.0)goto 16761 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 16761 continue continue 16751 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ig.eq.0)goto 16122 if(ilm.lt.mnl)goto 16121 if(flmin.ge.1.0)goto 16121 if(nintot(ni,nx,nc,a(1,1,ilm),m,nin,is).gt.ne)goto 16122 if(dev(ilm).gt.devmax)goto 16122 if(dev(ilm)-dev(ilm-1).lt.sml)goto 16122 16121 continue 16122 continue g=log(q) do 16771 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 16771 continue continue deallocate(sxp,b,bs,v,r,xv,q,mm,is,xm,sc,ga,iy) return end subroutine lcmodval(nc,nx,a0,ca,ia,nin,x,ix,jx,n,f) implicit double precision(a-h,o-z) double precision a0(nc),ca(nx,nc),x(*),f(nc,n) integer ia(*),ix(*),jx(*) do 16781 ic=1,nc f(ic,:)=a0(ic) 16781 continue continue do 16791 j=1,nin k=ia(j) kb=ix(k) ke=ix(k+1)-1 do 16801 ic=1,nc f(ic,jx(kb:ke))=f(ic,jx(kb:ke))+ca(j,ic)*x(kb:ke) 16801 continue continue 16791 continue continue return end subroutine coxnet(parm,no,ni,x,y,d,g,w,jd,vp,cl,ne,nx,nlam,flmin,u *lam,thr, maxit,isd,lmu,ca,ia,nin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),d(no),g(no),w(no),vp(ni),ulam(nlam *) double precision ca(nx,nlam),dev(nlam),alm(nlam),cl(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 16821 jerr=10000 return 16821 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return if(isd .le. 0)goto 16841 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 16841 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 16861 jerr=7777 return 16861 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 16881 jerr=9999 return 16881 continue ww=ww/sw call cstandard(no,ni,x,ww,ju,isd,xs) if(isd .le. 0)goto 16901 do 16911 j=1,ni cl(:,j)=cl(:,j)*xs(j) 16911 continue continue 16901 continue call coxnet1(parm,no,ni,x,y,d,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam, *thr, isd,maxit,lmu,ca,ia,nin,dev0,dev,alm,nlp,jerr) if(jerr.gt.0) return dev0=2.0*sw*dev0 if(isd .le. 0)goto 16931 do 16941 k=1,lmu nk=nin(k) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) 16941 continue continue 16931 continue deallocate(ww,ju,vq) if(isd.gt.0) deallocate(xs) return end subroutine cstandard(no,ni,x,w,ju,isd,xs) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),xs(ni) integer ju(ni) do 16951 j=1,ni if(ju(j).eq.0)goto 16951 xm=dot_product(w,x(:,j)) x(:,j)=x(:,j)-xm if(isd .le. 0)goto 16971 xs(j)=sqrt(dot_product(w,x(:,j)**2)) x(:,j)=x(:,j)/xs(j) 16971 continue 16951 continue continue return end subroutine coxnet1(parm,no,ni,x,y,d,g,q,ju,vp,cl,ne,nx,nlam,flmin, *ulam,cthri, isd,maxit,lmu,ao,m,kin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),q(no),d(no),g(no),vp(ni),ulam(nlam *) double precision ao(nx,nlam),dev(nlam),alm(nlam),cl(2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: w,dk,v,xs,wr double precision, dimension (:), allocatable :: a,as,f,dq double precision, dimension (:), allocatable :: e,uu,ga integer, dimension (:), allocatable :: jp,kp,mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) isd = isd*1 sml=sml*100.0 devmax=devmax*0.99/0.999 allocate(e(1:no),stat=jerr) if(jerr.ne.0) return allocate(uu(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(jp(1:no),stat=jerr) if(jerr.ne.0) return allocate(kp(1:no),stat=jerr) if(jerr.ne.0) return allocate(dk(1:no),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(dq(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return call groups(no,y,d,q,nk,kp,jp,t0,jerr) if(jerr.ne.0) go to 12220 alpha=parm oma=1.0-alpha nlm=0 ixx=0 al=0.0 dq=d*q call died(no,nk,dq,kp,jp,dk) a=0.0 f(1)=0.0 fmax=log(huge(f(1))*0.1) if(nonzero(no,g) .eq. 0)goto 16991 f=g-dot_product(q,g) e=q*exp(sign(min(abs(f),fmax),f)) goto 17001 16991 continue f=0.0 e=q 17001 continue continue r0=risk(no,ni,nk,dq,dk,f,e,kp,jp,uu) rr=-(dot_product(dk(1:nk),log(dk(1:nk)))+r0) dev0=rr do 17011 i=1,no if((y(i) .ge. t0) .and. (q(i) .gt. 0.0))goto 17031 w(i)=0.0 wr(i)=w(i) 17031 continue 17011 continue continue call outer(no,nk,dq,dk,kp,jp,e,wr,w,jerr,uu) if(jerr.ne.0) go to 12220 alf=1.0 if(flmin .ge. 1.0)goto 17051 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 17051 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) as=0.0 cthr=cthri*dev0 do 17061 j=1,ni if(ju(j).eq.0)goto 17061 ga(j)=abs(dot_product(wr,x(:,j))) 17061 continue continue do 17071 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 17091 al=ulam(ilm) goto 17081 17091 if(ilm .le. 2)goto 17101 al=al*alf goto 17081 17101 if(ilm .ne. 1)goto 17111 al=big goto 17121 17111 continue al0=0.0 do 17131 j=1,ni if(ju(j).eq.0)goto 17131 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 17131 continue continue al0=al0/max(parm,1.0d-3) al=alf*al0 17121 continue 17081 continue sa=alpha*al omal=oma*al tlam=alpha*(2.0*al-al0) do 17141 k=1,ni if(ixx(k).eq.1)goto 17141 if(ju(k).eq.0)goto 17141 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 17141 continue continue 10880 continue continue 17151 continue if(nlp .le. maxit)goto 17171 jerr=-ilm return 17171 continue if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) call vars(no,ni,x,w,ixx,v) continue 17181 continue nlp=nlp+1 dli=0.0 do 17191 j=1,ni if(ixx(j).eq.0)goto 17191 u=a(j)*v(j)+dot_product(wr,x(:,j)) if(abs(u) .gt. vp(j)*sa)goto 17211 at=0.0 goto 17221 17211 continue at=max(cl(1,j),min(cl(2,j),sign(abs(u)-vp(j)*sa,u)/ (v(j)+vp(j)*o *mal))) 17221 continue continue if(at .eq. a(j))goto 17241 del=at-a(j) a(j)=at dli=max(dli,v(j)*del**2) wr=wr-del*w*x(:,j) f=f+del*x(:,j) if(mm(j) .ne. 0)goto 17261 nin=nin+1 if(nin.gt.nx)goto 17192 mm(j)=nin m(nin)=j 17261 continue 17241 continue 17191 continue 17192 continue if(nin.gt.nx)goto 17182 if(dli.lt.cthr)goto 17182 if(nlp .le. maxit)goto 17281 jerr=-ilm return 17281 continue continue 17291 continue nlp=nlp+1 dli=0.0 do 17301 l=1,nin j=m(l) u=a(j)*v(j)+dot_product(wr,x(:,j)) if(abs(u) .gt. vp(j)*sa)goto 17321 at=0.0 goto 17331 17321 continue at=max(cl(1,j),min(cl(2,j),sign(abs(u)-vp(j)*sa,u)/ (v(j)+vp(j)*o *mal))) 17331 continue continue if(at .eq. a(j))goto 17351 del=at-a(j) a(j)=at dli=max(dli,v(j)*del**2) wr=wr-del*w*x(:,j) f=f+del*x(:,j) 17351 continue 17301 continue continue if(dli.lt.cthr)goto 17292 if(nlp .le. maxit)goto 17371 jerr=-ilm return 17371 continue goto 17291 17292 continue goto 17181 17182 continue if(nin.gt.nx)goto 17152 e=q*exp(sign(min(abs(f),fmax),f)) call outer(no,nk,dq,dk,kp,jp,e,wr,w,jerr,uu) if(jerr .eq. 0)goto 17391 jerr=jerr-ilm go to 12220 17391 continue ix=0 do 17401 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.cthr)goto 17401 ix=1 goto 17402 17401 continue 17402 continue if(ix .ne. 0)goto 17421 do 17431 k=1,ni if(ixx(k).eq.1)goto 17431 if(ju(k).eq.0)goto 17431 ga(k)=abs(dot_product(wr,x(:,k))) if(ga(k) .le. sa*vp(k))goto 17451 ixx(k)=1 ix=1 17451 continue 17431 continue continue if(ix.eq.1) go to 10880 goto 17152 17421 continue goto 17151 17152 continue if(nin .le. nx)goto 17471 jerr=-10000-ilm goto 17072 17471 continue if(nin.gt.0) ao(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(risk(no,ni,nk,dq,dk,f,e,kp,jp,uu)-r0)/rr if(ilm.lt.mnl)goto 17071 if(flmin.ge.1.0)goto 17071 me=0 do 17481 j=1,nin if(ao(j,ilm).ne.0.0) me=me+1 17481 continue continue if(me.gt.ne)goto 17072 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 17072 if(dev(ilm).gt.devmax)goto 17072 17071 continue 17072 continue g=f 12220 continue deallocate(e,uu,w,dk,v,xs,f,wr,a,as,jp,kp,dq,mm,ga,ixx) return end subroutine cxmodval(ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision ca(nin),x(n,*),f(n) integer ia(nin) f=0.0 if(nin.le.0) return do 17491 i=1,n f(i)=f(i)+dot_product(ca(1:nin),x(i,ia(1:nin))) 17491 continue continue return end subroutine groups(no,y,d,q,nk,kp,jp,t0,jerr) implicit double precision(a-h,o-z) double precision y(no),d(no),q(no) integer jp(no),kp(*) do 17501 j=1,no jp(j)=j 17501 continue continue call psort7(y,jp,1,no) nj=0 do 17511 j=1,no if(q(jp(j)).le.0.0)goto 17511 nj=nj+1 jp(nj)=jp(j) 17511 continue continue if(nj .ne. 0)goto 17531 jerr=20000 return 17531 continue j=1 continue 17541 if(d(jp(j)).gt.0.0)goto 17542 j=j+1 if(j.gt.nj)goto 17542 goto 17541 17542 continue if(j .lt. nj-1)goto 17561 jerr=30000 return 17561 continue t0=y(jp(j)) j0=j-1 if(j0 .le. 0)goto 17581 continue 17591 if(y(jp(j0)).lt.t0)goto 17592 j0=j0-1 if(j0.eq.0)goto 17592 goto 17591 17592 continue if(j0 .le. 0)goto 17611 nj=nj-j0 do 17621 j=1,nj jp(j)=jp(j+j0) 17621 continue continue 17611 continue 17581 continue jerr=0 nk=0 yk=t0 j=2 continue 17631 continue continue 17641 if(d(jp(j)).gt.0.0.and.y(jp(j)).gt.yk)goto 17642 j=j+1 if(j.gt.nj)goto 17642 goto 17641 17642 continue nk=nk+1 kp(nk)=j-1 if(j.gt.nj)goto 17632 if(j .ne. nj)goto 17661 nk=nk+1 kp(nk)=nj goto 17632 17661 continue yk=y(jp(j)) j=j+1 goto 17631 17632 continue return end subroutine outer(no,nk,d,dk,kp,jp,e,wr,w,jerr,u) implicit double precision(a-h,o-z) double precision d(no),dk(nk),wr(no),w(no) double precision e(no),u(no),b,c integer kp(nk),jp(no) call usk(no,nk,kp,jp,e,u) b=dk(1)/u(1) c=dk(1)/u(1)**2 jerr=0 do 17671 j=1,kp(1) i=jp(j) w(i)=e(i)*(b-e(i)*c) if(w(i) .gt. 0.0)goto 17691 jerr=-30000 return 17691 continue wr(i)=d(i)-e(i)*b 17671 continue continue do 17701 k=2,nk j1=kp(k-1)+1 j2=kp(k) b=b+dk(k)/u(k) c=c+dk(k)/u(k)**2 do 17711 j=j1,j2 i=jp(j) w(i)=e(i)*(b-e(i)*c) if(w(i) .gt. 0.0)goto 17731 jerr=-30000 return 17731 continue wr(i)=d(i)-e(i)*b 17711 continue continue 17701 continue continue return end subroutine vars(no,ni,x,w,ixx,v) implicit double precision(a-h,o-z) double precision x(no,ni),w(no),v(ni) integer ixx(ni) do 17741 j=1,ni if(ixx(j).gt.0) v(j)=dot_product(w,x(:,j)**2) 17741 continue continue return end subroutine died(no,nk,d,kp,jp,dk) implicit double precision(a-h,o-z) double precision d(no),dk(nk) integer kp(nk),jp(no) dk(1)=sum(d(jp(1:kp(1)))) do 17751 k=2,nk dk(k)=sum(d(jp((kp(k-1)+1):kp(k)))) 17751 continue continue return end subroutine usk(no,nk,kp,jp,e,u) implicit double precision(a-h,o-z) double precision e(no),u(nk),h integer kp(nk),jp(no) h=0.0 do 17761 k=nk,1,-1 j2=kp(k) j1=1 if(k.gt.1) j1=kp(k-1)+1 do 17771 j=j2,j1,-1 h=h+e(jp(j)) 17771 continue continue u(k)=h 17761 continue continue return end function risk(no,ni,nk,d,dk,f,e,kp,jp,u) implicit double precision(a-h,o-z) double precision d(no),dk(nk),f(no) integer kp(nk),jp(no) double precision e(no),u(nk) ni = ni*1 call usk(no,nk,kp,jp,e,u) u=log(u) risk=dot_product(d,f)-dot_product(dk,u) return end subroutine loglike(no,ni,x,y,d,g,w,nlam,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),d(no),g(no),w(no),a(ni,nlam),flog( *nlam) double precision, dimension (:), allocatable :: dk,f,xm,dq,q double precision, dimension (:), allocatable :: e,uu integer, dimension (:), allocatable :: jp,kp allocate(e(1:no),stat=jerr) if(jerr.ne.0) return allocate(q(1:no),stat=jerr) if(jerr.ne.0) return allocate(uu(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return allocate(dk(1:no),stat=jerr) if(jerr.ne.0) return allocate(jp(1:no),stat=jerr) if(jerr.ne.0) return allocate(kp(1:no),stat=jerr) if(jerr.ne.0) return allocate(dq(1:no),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return q=max(0d0,w) sw=sum(q) if(sw .gt. 0.0)goto 17791 jerr=9999 go to 12220 17791 continue call groups(no,y,d,q,nk,kp,jp,t0,jerr) if(jerr.ne.0) go to 12220 fmax=log(huge(e(1))*0.1) dq=d*q call died(no,nk,dq,kp,jp,dk) gm=dot_product(q,g)/sw do 17801 j=1,ni xm(j)=dot_product(q,x(:,j))/sw 17801 continue continue do 17811 lam=1,nlam do 17821 i=1,no f(i)=g(i)-gm+dot_product(a(:,lam),(x(i,:)-xm)) e(i)=q(i)*exp(sign(min(abs(f(i)),fmax),f(i))) 17821 continue continue flog(lam)=risk(no,ni,nk,dq,dk,f,e,kp,jp,uu) 17811 continue continue 12220 continue deallocate(e,uu,dk,f,jp,kp,dq) return end subroutine fishnet(parm,no,ni,x,y,g,w,jd,vp,cl,ne,nx,nlam,flmin,ul *am,thr, isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),w(no),vp(ni),ulam(nlam) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 17841 jerr=10000 return 17841 continue if(minval(y) .ge. 0.0)goto 17861 jerr=8888 return 17861 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return if(isd .le. 0)goto 17881 allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return 17881 continue call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 17901 jerr=7777 go to 12220 17901 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 17921 jerr=9999 go to 12220 17921 continue ww=ww/sw call lstandard1(no,ni,x,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 17941 do 17951 j=1,ni cl(:,j)=cl(:,j)*xs(j) 17951 continue continue 17941 continue call fishnet1(parm,no,ni,x,y,g,ww,ju,vq,cl,ne,nx,nlam,flmin,ulam,t *hr, isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp,jerr) if(jerr.gt.0) go to 12220 dev0=2.0*sw*dev0 do 17961 k=1,lmu nk=nin(k) if(isd.gt.0) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) if(intr .ne. 0)goto 17981 a0(k)=0.0 goto 17991 17981 continue a0(k)=a0(k)-dot_product(ca(1:nk,k),xm(ia(1:nk))) 17991 continue continue 17961 continue continue 12220 continue deallocate(ww,ju,vq,xm) if(isd.gt.0) deallocate(xs) return end subroutine fishnet1(parm,no,ni,x,y,g,q,ju,vp,cl,ne,nx,nlam,flmin,u *lam,shri, isd,intr,maxit,lmu,a0,ca,m,kin,dev0,dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),q(no),vp(ni),ulam(nlam) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: t,w,wr,v,a,f,as,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) sml=sml*10.0 isd = isd*1 allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(t(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return bta=parm omb=1.0-bta t=q*y yb=sum(t) fmax=log(huge(bta)*0.1) if(nonzero(no,g) .ne. 0)goto 18011 if(intr .eq. 0)goto 18031 w=q*yb az=log(yb) f=az dv0=yb*(az-1.0) goto 18041 18031 continue w=q az=0.0 f=az dv0=-1.0 18041 continue continue goto 18051 18011 continue w=q*exp(sign(min(abs(g),fmax),g)) v0=sum(w) if(intr .eq. 0)goto 18071 eaz=yb/v0 w=eaz*w az=log(eaz) f=az+g dv0=dot_product(t,g)-yb*(1.0-az) goto 18081 18071 continue az=0.0 f=g dv0=dot_product(t,g)-v0 18081 continue continue 18051 continue continue a=0.0 as=0.0 wr=t-w v0=1.0 if(intr.ne.0) v0=yb dvr=-yb do 18091 i=1,no if(t(i).gt.0.0) dvr=dvr+t(i)*log(y(i)) 18091 continue continue dvr=dvr-dv0 dev0=dvr alf=1.0 if(flmin .ge. 1.0)goto 18111 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 18111 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) shr=shri*dev0 ixx=0 al=0.0 do 18121 j=1,ni if(ju(j).eq.0)goto 18121 ga(j)=abs(dot_product(wr,x(:,j))) 18121 continue continue do 18131 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 18151 al=ulam(ilm) goto 18141 18151 if(ilm .le. 2)goto 18161 al=al*alf goto 18141 18161 if(ilm .ne. 1)goto 18171 al=big goto 18181 18171 continue al0=0.0 do 18191 j=1,ni if(ju(j).eq.0)goto 18191 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 18191 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 18181 continue 18141 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 18201 k=1,ni if(ixx(k).eq.1)goto 18201 if(ju(k).eq.0)goto 18201 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 18201 continue continue 10880 continue continue 18211 continue if(nlp .le. maxit)goto 18231 jerr=-ilm return 18231 continue az0=az if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) do 18241 j=1,ni if(ixx(j).ne.0) v(j)=dot_product(w,x(:,j)**2) 18241 continue continue continue 18251 continue nlp=nlp+1 dlx=0.0 do 18261 k=1,ni if(ixx(k).eq.0)goto 18261 ak=a(k) u=dot_product(wr,x(:,k))+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 18281 a(k)=0.0 goto 18291 18281 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 18291 continue continue if(a(k).eq.ak)goto 18261 d=a(k)-ak dlx=max(dlx,v(k)*d**2) wr=wr-d*w*x(:,k) f=f+d*x(:,k) if(mm(k) .ne. 0)goto 18311 nin=nin+1 if(nin.gt.nx)goto 18262 mm(k)=nin m(nin)=k 18311 continue 18261 continue 18262 continue if(nin.gt.nx)goto 18252 if(intr .eq. 0)goto 18331 d=sum(wr)/v0 az=az+d dlx=max(dlx,v0*d**2) wr=wr-d*w f=f+d 18331 continue if(dlx.lt.shr)goto 18252 if(nlp .le. maxit)goto 18351 jerr=-ilm return 18351 continue continue 18361 continue nlp=nlp+1 dlx=0.0 do 18371 l=1,nin k=m(l) ak=a(k) u=dot_product(wr,x(:,k))+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 18391 a(k)=0.0 goto 18401 18391 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 18401 continue continue if(a(k).eq.ak)goto 18371 d=a(k)-ak dlx=max(dlx,v(k)*d**2) wr=wr-d*w*x(:,k) f=f+d*x(:,k) 18371 continue continue if(intr .eq. 0)goto 18421 d=sum(wr)/v0 az=az+d dlx=max(dlx,v0*d**2) wr=wr-d*w f=f+d 18421 continue if(dlx.lt.shr)goto 18362 if(nlp .le. maxit)goto 18441 jerr=-ilm return 18441 continue goto 18361 18362 continue goto 18251 18252 continue if(nin.gt.nx)goto 18212 w=q*exp(sign(min(abs(f),fmax),f)) v0=sum(w) wr=t-w if(v0*(az-az0)**2 .ge. shr)goto 18461 ix=0 do 18471 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.shr)goto 18471 ix=1 goto 18472 18471 continue 18472 continue if(ix .ne. 0)goto 18491 do 18501 k=1,ni if(ixx(k).eq.1)goto 18501 if(ju(k).eq.0)goto 18501 ga(k)=abs(dot_product(wr,x(:,k))) if(ga(k) .le. al1*vp(k))goto 18521 ixx(k)=1 ix=1 18521 continue 18501 continue continue if(ix.eq.1) go to 10880 goto 18212 18491 continue 18461 continue goto 18211 18212 continue if(nin .le. nx)goto 18541 jerr=-10000-ilm goto 18132 18541 continue if(nin.gt.0) ca(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin a0(ilm)=az alm(ilm)=al lmu=ilm dev(ilm)=(dot_product(t,f)-v0-dv0)/dvr if(ilm.lt.mnl)goto 18131 if(flmin.ge.1.0)goto 18131 me=0 do 18551 j=1,nin if(ca(j,ilm).ne.0.0) me=me+1 18551 continue continue if(me.gt.ne)goto 18132 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 18132 if(dev(ilm).gt.devmax)goto 18132 18131 continue 18132 continue g=f continue deallocate(t,w,wr,v,a,f,as,mm,ga,ixx) return end function nonzero(n,v) implicit double precision(a-h,o-z) double precision v(n) nonzero=0 do 18561 i=1,n if(v(i) .eq. 0.0)goto 18581 nonzero=1 return 18581 continue 18561 continue continue return end subroutine solns(ni,nx,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,lmu),b(ni,lmu) integer ia(nx),nin(lmu) do 18591 lam=1,lmu call uncomp(ni,a(:,lam),ia,nin(lam),b(:,lam)) 18591 continue continue return end subroutine lsolns(ni,nx,nc,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,nc,lmu),b(ni,nc,lmu) integer ia(nx),nin(lmu) do 18601 lam=1,lmu call luncomp(ni,nx,nc,a(1,1,lam),ia,nin(lam),b(1,1,lam)) 18601 continue continue return end subroutine deviance(no,ni,x,y,g,q,nlam,a0,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no),g(no),q(no),a(ni,nlam),a0(nlam),fl *og(nlam) double precision, dimension (:), allocatable :: w if(minval(y) .ge. 0.0)goto 18621 jerr=8888 return 18621 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 18641 jerr=9999 go to 12220 18641 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 18651 lam=1,nlam s=0.0 do 18661 i=1,no if(w(i).le.0.0)goto 18661 f=g(i)+a0(lam)+dot_product(a(:,lam),x(i,:)) s=s+w(i)*(y(i)*f-exp(sign(min(abs(f),fmax),f))) 18661 continue continue flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 18651 continue continue 12220 continue deallocate(w) return end subroutine spfishnet(parm,no,ni,x,ix,jx,y,g,w,jd,vp,cl,ne,nx,nlam, *flmin, ulam,thr,isd,intr,maxit,lmu,a0,ca,ia,nin,dev0,dev,alm,nlp, *jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,ww,vq integer, dimension (:), allocatable :: ju if(maxval(vp) .gt. 0.0)goto 18681 jerr=10000 return 18681 continue if(minval(y) .ge. 0.0)goto 18701 jerr=8888 return 18701 continue allocate(ww(1:no),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 18721 jerr=7777 go to 12220 18721 continue vq=max(0d0,vp) vq=vq*ni/sum(vq) ww=max(0d0,w) sw=sum(ww) if(sw .gt. 0.0)goto 18741 jerr=9999 go to 12220 18741 continue ww=ww/sw call splstandard2(no,ni,x,ix,jx,ww,ju,isd,intr,xm,xs) if(isd .le. 0)goto 18761 do 18771 j=1,ni cl(:,j)=cl(:,j)*xs(j) 18771 continue continue 18761 continue call spfishnet1(parm,no,ni,x,ix,jx,y,g,ww,ju,vq,cl,ne,nx,nlam,flmi *n,ulam,thr, isd,intr,maxit,xm,xs,lmu,a0,ca,ia,nin,dev0,dev,alm,nl *p,jerr) if(jerr.gt.0) go to 12220 dev0=2.0*sw*dev0 do 18781 k=1,lmu nk=nin(k) if(isd.gt.0) ca(1:nk,k)=ca(1:nk,k)/xs(ia(1:nk)) if(intr .ne. 0)goto 18801 a0(k)=0.0 goto 18811 18801 continue a0(k)=a0(k)-dot_product(ca(1:nk,k),xm(ia(1:nk))) 18811 continue continue 18781 continue continue 12220 continue deallocate(ww,ju,vq,xm,xs) return end subroutine spfishnet1(parm,no,ni,x,ix,jx,y,g,q,ju,vp,cl,ne,nx,nlam *,flmin,ulam, shri,isd,intr,maxit,xb,xs,lmu,a0,ca,m,kin,dev0,dev,a *lm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),vp(ni),ulam(nlam),xb(ni),x *s(ni) double precision ca(nx,nlam),a0(nlam),dev(nlam),alm(nlam),cl(2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:), allocatable :: qy,t,w,wr,v double precision, dimension (:), allocatable :: a,as,xm,ga integer, dimension (:), allocatable :: mm,ixx call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) sml=sml*10.0 isd = isd*1 allocate(a(1:ni),stat=jerr) if(jerr.ne.0) return allocate(as(1:ni),stat=jerr) if(jerr.ne.0) return allocate(t(1:no),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(wr(1:no),stat=jerr) if(jerr.ne.0) return allocate(v(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(qy(1:no),stat=jerr) if(jerr.ne.0) return bta=parm omb=1.0-bta fmax=log(huge(bta)*0.1) qy=q*y yb=sum(qy) if(nonzero(no,g) .ne. 0)goto 18831 t=0.0 if(intr .eq. 0)goto 18851 w=q*yb az=log(yb) uu=az xm=yb*xb dv0=yb*(az-1.0) goto 18861 18851 continue w=q xm=0.0 uu=0.0 az=uu dv0=-1.0 18861 continue continue goto 18871 18831 continue w=q*exp(sign(min(abs(g),fmax),g)) ww=sum(w) t=g if(intr .eq. 0)goto 18891 eaz=yb/ww w=eaz*w az=log(eaz) uu=az dv0=dot_product(qy,g)-yb*(1.0-az) goto 18901 18891 continue uu=0.0 az=uu dv0=dot_product(qy,g)-ww 18901 continue continue do 18911 j=1,ni if(ju(j).eq.0)goto 18911 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) 18911 continue continue 18871 continue continue tt=yb*uu ww=1.0 if(intr.ne.0) ww=yb wr=qy-q*(yb*(1.0-uu)) a=0.0 as=0.0 dvr=-yb do 18921 i=1,no if(qy(i).gt.0.0) dvr=dvr+qy(i)*log(y(i)) 18921 continue continue dvr=dvr-dv0 dev0=dvr alf=1.0 if(flmin .ge. 1.0)goto 18941 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 18941 continue m=0 mm=0 nlp=0 nin=nlp mnl=min(mnlam,nlam) shr=shri*dev0 al=0.0 ixx=0 do 18951 j=1,ni if(ju(j).eq.0)goto 18951 jb=ix(j) je=ix(j+1)-1 ga(j)=abs(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(j)-ww*xb(j) *)-xb(j)*tt)/xs(j) 18951 continue continue do 18961 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 18981 al=ulam(ilm) goto 18971 18981 if(ilm .le. 2)goto 18991 al=al*alf goto 18971 18991 if(ilm .ne. 1)goto 19001 al=big goto 19011 19001 continue al0=0.0 do 19021 j=1,ni if(ju(j).eq.0)goto 19021 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 19021 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 19011 continue 18971 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 19031 k=1,ni if(ixx(k).eq.1)goto 19031 if(ju(k).eq.0)goto 19031 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 19031 continue continue 10880 continue continue 19041 continue if(nlp .le. maxit)goto 19061 jerr=-ilm return 19061 continue az0=az if(nin.gt.0) as(m(1:nin))=a(m(1:nin)) do 19071 j=1,ni if(ixx(j).eq.0)goto 19071 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) v(j)=(dot_product(w(jx(jb:je)),x(jb:je)**2) -2.0*xb(j)*xm(j)+ww*x *b(j)**2)/xs(j)**2 19071 continue continue continue 19081 continue nlp=nlp+1 dlx=0.0 do 19091 k=1,ni if(ixx(k).eq.0)goto 19091 jb=ix(k) je=ix(k+1)-1 ak=a(k) u=(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(k)-ww*xb(k))-xb(k) **tt)/xs(k)+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 19111 a(k)=0.0 goto 19121 19111 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 19121 continue continue if(a(k).eq.ak)goto 19091 if(mm(k) .ne. 0)goto 19141 nin=nin+1 if(nin.gt.nx)goto 19092 mm(k)=nin m(nin)=k 19141 continue d=a(k)-ak dlx=max(dlx,v(k)*d**2) dv=d/xs(k) wr(jx(jb:je))=wr(jx(jb:je))-dv*w(jx(jb:je))*x(jb:je) t(jx(jb:je))=t(jx(jb:je))+dv*x(jb:je) uu=uu-dv*xb(k) tt=tt-dv*xm(k) 19091 continue 19092 continue if(nin.gt.nx)goto 19082 if(intr .eq. 0)goto 19161 d=tt/ww-uu az=az+d dlx=max(dlx,ww*d**2) uu=uu+d 19161 continue if(dlx.lt.shr)goto 19082 if(nlp .le. maxit)goto 19181 jerr=-ilm return 19181 continue continue 19191 continue nlp=nlp+1 dlx=0.0 do 19201 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 ak=a(k) u=(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(k)-ww*xb(k))-xb(k) **tt)/xs(k)+v(k)*ak au=abs(u)-vp(k)*al1 if(au .gt. 0.0)goto 19221 a(k)=0.0 goto 19231 19221 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(v(k)+vp(k)*al2))) 19231 continue continue if(a(k).eq.ak)goto 19201 d=a(k)-ak dlx=max(dlx,v(k)*d**2) dv=d/xs(k) wr(jx(jb:je))=wr(jx(jb:je))-dv*w(jx(jb:je))*x(jb:je) t(jx(jb:je))=t(jx(jb:je))+dv*x(jb:je) uu=uu-dv*xb(k) tt=tt-dv*xm(k) 19201 continue continue if(intr .eq. 0)goto 19251 d=tt/ww-uu az=az+d dlx=max(dlx,ww*d**2) uu=uu+d 19251 continue if(dlx.lt.shr)goto 19192 if(nlp .le. maxit)goto 19271 jerr=-ilm return 19271 continue goto 19191 19192 continue goto 19081 19082 continue if(nin.gt.nx)goto 19042 euu=exp(sign(min(abs(uu),fmax),uu)) w=euu*q*exp(sign(min(abs(t),fmax),t)) ww=sum(w) wr=qy-w*(1.0-uu) tt=sum(wr) if(ww*(az-az0)**2 .ge. shr)goto 19291 kx=0 do 19301 j=1,nin k=m(j) if(v(k)*(a(k)-as(k))**2.lt.shr)goto 19301 kx=1 goto 19302 19301 continue 19302 continue if(kx .ne. 0)goto 19321 do 19331 j=1,ni if(ixx(j).eq.1)goto 19331 if(ju(j).eq.0)goto 19331 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) ga(j)=abs(dot_product(wr(jx(jb:je)),x(jb:je)) -uu*(xm(j)-ww*xb(j) *)-xb(j)*tt)/xs(j) if(ga(j) .le. al1*vp(j))goto 19351 ixx(j)=1 kx=1 19351 continue 19331 continue continue if(kx.eq.1) go to 10880 goto 19042 19321 continue 19291 continue goto 19041 19042 continue if(nin .le. nx)goto 19371 jerr=-10000-ilm goto 18962 19371 continue if(nin.gt.0) ca(1:nin,ilm)=a(m(1:nin)) kin(ilm)=nin a0(ilm)=az alm(ilm)=al lmu=ilm dev(ilm)=(dot_product(qy,t)+yb*uu-ww-dv0)/dvr if(ilm.lt.mnl)goto 18961 if(flmin.ge.1.0)goto 18961 me=0 do 19381 j=1,nin if(ca(j,ilm).ne.0.0) me=me+1 19381 continue continue if(me.gt.ne)goto 18962 if((dev(ilm)-dev(ilm-mnl+1))/dev(ilm).lt.sml)goto 18962 if(dev(ilm).gt.devmax)goto 18962 18961 continue 18962 continue g=t+uu continue deallocate(t,w,wr,v,a,qy,xm,as,mm,ga,ixx) return end subroutine spdeviance(no,ni,x,ix,jx,y,g,q,nlam,a0,a,flog,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),a(ni,nlam),a0(nlam),flog(n *lam) integer ix(*),jx(*) double precision, dimension (:), allocatable :: w,f if(minval(y) .ge. 0.0)goto 19401 jerr=8888 return 19401 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 19421 jerr=9999 go to 12220 19421 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 19431 lam=1,nlam f=a0(lam) do 19441 j=1,ni if(a(j,lam).eq.0.0)goto 19441 jb=ix(j) je=ix(j+1)-1 f(jx(jb:je))=f(jx(jb:je))+a(j,lam)*x(jb:je) 19441 continue continue f=f+g s=dot_product(w,y*f-exp(sign(min(abs(f),fmax),f))) flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 19431 continue continue 12220 continue deallocate(w,f) return end subroutine cspdeviance(no,x,ix,jx,y,g,q,nx,nlam,a0,ca,ia,nin,flog, *jerr) implicit double precision(a-h,o-z) double precision x(*),y(no),g(no),q(no),ca(nx,nlam),a0(nlam),flog( *nlam) integer ix(*),jx(*),nin(nlam),ia(nx) double precision, dimension (:), allocatable :: w,f if(minval(y) .ge. 0.0)goto 19461 jerr=8888 return 19461 continue allocate(w(1:no),stat=jerr) if(jerr.ne.0) return allocate(f(1:no),stat=jerr) if(jerr.ne.0) return w=max(0d0,q) sw=sum(w) if(sw .gt. 0.0)goto 19481 jerr=9999 go to 12220 19481 continue yb=dot_product(w,y)/sw fmax=log(huge(y(1))*0.1) do 19491 lam=1,nlam f=a0(lam) do 19501 k=1,nin(lam) j=ia(k) jb=ix(j) je=ix(j+1)-1 f(jx(jb:je))=f(jx(jb:je))+ca(k,lam)*x(jb:je) 19501 continue continue f=f+g s=dot_product(w,y*f-exp(sign(min(abs(f),fmax),f))) flog(lam)=2.0*(sw*yb*(log(yb)-1.0)-s) 19491 continue continue 12220 continue deallocate(w,f) return end subroutine multelnet(parm,no,ni,nr,x,y,w,jd,vp,cl,ne,nx,nlam, flm *in,ulam,thr,isd,jsd,intr,maxit, lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr *) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nr),w(no),vp(ni),ca(nx,nr,nlam) double precision ulam(nlam),a0(nr,nlam),rsq(nlam),alm(nlam),cl(2,n *i) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 19521 jerr=10000 return 19521 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) call multelnetn(parm,no,ni,nr,x,y,w,jd,vq,cl,ne,nx,nlam,flmin,ulam *,thr,isd, jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) deallocate(vq) return end subroutine multelnetn(parm,no,ni,nr,x,y,w,jd,vp,cl,ne,nx,nlam,flmi *n,ulam,thr, isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),x(no,ni),y(no,nr),w(no),ulam(nlam),cl(2,ni *) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,ym,ys integer, dimension (:), allocatable :: ju double precision, dimension (:,:,:), allocatable :: clt allocate(clt(1:2,1:nr,1:ni),stat=jerr); if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ym(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ys(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return call chkvars(no,ni,x,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 19541 jerr=7777 return 19541 continue call multstandard1(no,ni,nr,x,y,w,isd,jsd,intr,ju,xm,xs,ym,ys,xv,y *s0,jerr) if(jerr.ne.0) return do 19551 j=1,ni do 19561 k=1,nr do 19571 i=1,2 clt(i,k,j)=cl(i,j) 19571 continue continue 19561 continue continue 19551 continue continue if(isd .le. 0)goto 19591 do 19601 j=1,ni do 19611 k=1,nr do 19621 i=1,2 clt(i,k,j)=clt(i,k,j)*xs(j) 19621 continue continue 19611 continue continue 19601 continue continue 19591 continue if(jsd .le. 0)goto 19641 do 19651 j=1,ni do 19661 k=1,nr do 19671 i=1,2 clt(i,k,j)=clt(i,k,j)/ys(k) 19671 continue continue 19661 continue continue 19651 continue continue 19641 continue call multelnet2(parm,ni,nr,ju,vp,clt,y,no,ne,nx,x,nlam,flmin,ulam, *thr,maxit,xv, ys0,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 19681 k=1,lmu nk=nin(k) do 19691 j=1,nr do 19701 l=1,nk ca(l,j,k)=ys(j)*ca(l,j,k)/xs(ia(l)) 19701 continue continue if(intr .ne. 0)goto 19721 a0(j,k)=0.0 goto 19731 19721 continue a0(j,k)=ym(j)-dot_product(ca(1:nk,j,k),xm(ia(1:nk))) 19731 continue continue 19691 continue continue 19681 continue continue deallocate(xm,xs,ym,ys,ju,xv,clt) return end subroutine multstandard1(no,ni,nr,x,y,w,isd,jsd,intr,ju, xm,xs,ym *,ys,xv,ys0,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nr),w(no),xm(ni),xs(ni),xv(ni),ym(n *r),ys(nr) integer ju(ni) double precision, dimension (:), allocatable :: v allocate(v(1:no),stat=jerr) if(jerr.ne.0) return w=w/sum(w) v=sqrt(w) if(intr .ne. 0)goto 19751 do 19761 j=1,ni if(ju(j).eq.0)goto 19761 xm(j)=0.0 x(:,j)=v*x(:,j) z=dot_product(x(:,j),x(:,j)) if(isd .le. 0)goto 19781 xbq=dot_product(v,x(:,j))**2 vc=z-xbq xs(j)=sqrt(vc) x(:,j)=x(:,j)/xs(j) xv(j)=1.0+xbq/vc goto 19791 19781 continue xs(j)=1.0 xv(j)=z 19791 continue continue 19761 continue continue ys0=0.0 do 19801 j=1,nr ym(j)=0.0 y(:,j)=v*y(:,j) z=dot_product(y(:,j),y(:,j)) if(jsd .le. 0)goto 19821 u=z-dot_product(v,y(:,j))**2 ys0=ys0+z/u ys(j)=sqrt(u) y(:,j)=y(:,j)/ys(j) goto 19831 19821 continue ys(j)=1.0 ys0=ys0+z 19831 continue continue 19801 continue continue go to 10700 19751 continue do 19841 j=1,ni if(ju(j).eq.0)goto 19841 xm(j)=dot_product(w,x(:,j)) x(:,j)=v*(x(:,j)-xm(j)) xv(j)=dot_product(x(:,j),x(:,j)) if(isd.gt.0) xs(j)=sqrt(xv(j)) 19841 continue continue if(isd .ne. 0)goto 19861 xs=1.0 goto 19871 19861 continue do 19881 j=1,ni if(ju(j).eq.0)goto 19881 x(:,j)=x(:,j)/xs(j) 19881 continue continue xv=1.0 19871 continue continue ys0=0.0 do 19891 j=1,nr ym(j)=dot_product(w,y(:,j)) y(:,j)=v*(y(:,j)-ym(j)) z=dot_product(y(:,j),y(:,j)) if(jsd .le. 0)goto 19911 ys(j)=sqrt(z) y(:,j)=y(:,j)/ys(j) goto 19921 19911 continue ys0=ys0+z 19921 continue continue 19891 continue continue if(jsd .ne. 0)goto 19941 ys=1.0 goto 19951 19941 continue ys0=nr 19951 continue continue 10700 continue deallocate(v) return end subroutine multelnet2(beta,ni,nr,ju,vp,cl,y,no,ne,nx,x,nlam,flmin, *ulam,thri, maxit,xv,ys0,lmu,ao,ia,kin,rsqo,almo,nlp,jerr) implicit double precision(a-h,o-z) double precision vp(ni),y(no,nr),x(no,ni),ulam(nlam),ao(nx,nr,nlam *) double precision rsqo(nlam),almo(nlam),xv(ni),cl(2,nr,ni) integer ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: g,gk,del,gj integer, dimension (:), allocatable :: mm,ix,isc double precision, dimension (:,:), allocatable :: a allocate(a(1:nr,1:ni),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(gj(1:nr),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nr),stat=jerr) if(jerr.ne.0) return allocate(del(1:nr),stat=jerr) if(jerr.ne.0) return allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ix(1:ni),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nr),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta ix=0 thr=thri*ys0/nr alf=1.0 if(flmin .ge. 1.0)goto 19971 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 19971 continue rsq=ys0 a=0.0 mm=0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) alm=0.0 do 19981 j=1,ni if(ju(j).eq.0)goto 19981 g(j)=0.0 do 19991 k=1,nr g(j)=g(j)+dot_product(y(:,k),x(:,j))**2 19991 continue continue g(j)=sqrt(g(j)) 19981 continue continue do 20001 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 20021 alm=ulam(m) goto 20011 20021 if(m .le. 2)goto 20031 alm=alm*alf goto 20011 20031 if(m .ne. 1)goto 20041 alm=big goto 20051 20041 continue alm0=0.0 do 20061 j=1,ni if(ju(j).eq.0)goto 20061 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 20061 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 20051 continue 20011 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 20071 k=1,ni if(ix(k).eq.1)goto 20071 if(ju(k).eq.0)goto 20071 if(g(k).gt.tlam*vp(k)) ix(k)=1 20071 continue continue continue 20081 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 20101 jerr=-m return 20101 continue nlp=nlp+1 dlx=0.0 do 20111 k=1,ni if(ix(k).eq.0)goto 20111 gkn=0.0 do 20121 j=1,nr gj(j)=dot_product(y(:,j),x(:,k)) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 20121 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 20141 a(:,k)=0.0 goto 20151 20141 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 20151 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 20111 do 20161 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(:,j)=y(:,j)-del(j)*x(:,k) dlx=max(dlx,xv(k)*del(j)**2) 20161 continue continue if(mm(k) .ne. 0)goto 20181 nin=nin+1 if(nin.gt.nx)goto 20112 mm(k)=nin ia(nin)=k 20181 continue 20111 continue 20112 continue if(nin.gt.nx)goto 20082 if(dlx .ge. thr)goto 20201 ixx=0 do 20211 k=1,ni if(ix(k).eq.1)goto 20211 if(ju(k).eq.0)goto 20211 g(k)=0.0 do 20221 j=1,nr g(k)=g(k)+dot_product(y(:,j),x(:,k))**2 20221 continue continue g(k)=sqrt(g(k)) if(g(k) .le. ab*vp(k))goto 20241 ix(k)=1 ixx=1 20241 continue 20211 continue continue if(ixx.eq.1) go to 10880 goto 20082 20201 continue if(nlp .le. maxit)goto 20261 jerr=-m return 20261 continue 10360 continue iz=1 continue 20271 continue nlp=nlp+1 dlx=0.0 do 20281 l=1,nin k=ia(l) gkn=0.0 do 20291 j=1,nr gj(j)=dot_product(y(:,j),x(:,k)) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 20291 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 20311 a(:,k)=0.0 goto 20321 20311 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 20321 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 20281 do 20331 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(:,j)=y(:,j)-del(j)*x(:,k) dlx=max(dlx,xv(k)*del(j)**2) 20331 continue continue 20281 continue continue if(dlx.lt.thr)goto 20272 if(nlp .le. maxit)goto 20351 jerr=-m return 20351 continue goto 20271 20272 continue jz=0 goto 20081 20082 continue if(nin .le. nx)goto 20371 jerr=-10000-m goto 20002 20371 continue if(nin .le. 0)goto 20391 do 20401 j=1,nr ao(1:nin,j,m)=a(j,ia(1:nin)) 20401 continue continue 20391 continue kin(m)=nin rsqo(m)=1.0-rsq/ys0 almo(m)=alm lmu=m if(m.lt.mnl)goto 20001 if(flmin.ge.1.0)goto 20001 me=0 do 20411 j=1,nin if(ao(j,1,m).ne.0.0) me=me+1 20411 continue continue if(me.gt.ne)goto 20002 if(rsq0-rsq.lt.sml*rsq)goto 20002 if(rsqo(m).gt.rsqmax)goto 20002 20001 continue 20002 continue deallocate(a,mm,g,ix,del,gj,gk) return end subroutine chkbnds(nr,gk,gkn,xv,cl,al1,al2,a,isc,jerr) implicit double precision(a-h,o-z) double precision gk(nr),cl(2,nr),a(nr) integer isc(nr) kerr=0 al1p=1.0+al1/xv al2p=al2/xv isc=0 gsq=gkn**2 asq=dot_product(a,a) usq=0.0 u=0.0 kn=-1 continue 20421 continue vmx=0.0 do 20431 k=1,nr v=max(a(k)-cl(2,k),cl(1,k)-a(k)) if(v .le. vmx)goto 20451 vmx=v kn=k 20451 continue 20431 continue continue if(vmx.le.0.0)goto 20422 if(isc(kn).ne.0)goto 20422 gsq=gsq-gk(kn)**2 g=sqrt(gsq)/xv if(a(kn).lt.cl(1,kn)) u=cl(1,kn) if(a(kn).gt.cl(2,kn)) u=cl(2,kn) usq=usq+u**2 if(usq .ne. 0.0)goto 20471 b=max(0d0,(g-al2p)/al1p) goto 20481 20471 continue b0=sqrt(asq-a(kn)**2) b=bnorm(b0,al1p,al2p,g,usq,kerr) if(kerr.ne.0)goto 20422 20481 continue continue asq=usq+b**2 if(asq .gt. 0.0)goto 20501 a=0.0 goto 20422 20501 continue a(kn)=u isc(kn)=1 f=1.0/(xv*(al1p+al2p/sqrt(asq))) do 20511 j=1,nr if(isc(j).eq.0) a(j)=f*gk(j) 20511 continue continue goto 20421 20422 continue if(kerr.ne.0) jerr=kerr return end subroutine chkbnds1(nr,gk,gkn,xv,cl1,cl2,al1,al2,a,isc,jerr) implicit double precision(a-h,o-z) double precision gk(nr),a(nr) integer isc(nr) kerr=0 al1p=1.0+al1/xv al2p=al2/xv isc=0 gsq=gkn**2 asq=dot_product(a,a) usq=0.0 u=0.0 kn=-1 continue 20521 continue vmx=0.0 do 20531 k=1,nr v=max(a(k)-cl2,cl1-a(k)) if(v .le. vmx)goto 20551 vmx=v kn=k 20551 continue 20531 continue continue if(vmx.le.0.0)goto 20522 if(isc(kn).ne.0)goto 20522 gsq=gsq-gk(kn)**2 g=sqrt(gsq)/xv if(a(kn).lt.cl1) u=cl1 if(a(kn).gt.cl2) u=cl2 usq=usq+u**2 if(usq .ne. 0.0)goto 20571 b=max(0d0,(g-al2p)/al1p) goto 20581 20571 continue b0=sqrt(asq-a(kn)**2) b=bnorm(b0,al1p,al2p,g,usq,kerr) if(kerr.ne.0)goto 20522 20581 continue continue asq=usq+b**2 if(asq .gt. 0.0)goto 20601 a=0.0 goto 20522 20601 continue a(kn)=u isc(kn)=1 f=1.0/(xv*(al1p+al2p/sqrt(asq))) do 20611 j=1,nr if(isc(j).eq.0) a(j)=f*gk(j) 20611 continue continue goto 20521 20522 continue if(kerr.ne.0) jerr=kerr return end function bnorm(b0,al1p,al2p,g,usq,jerr) implicit double precision(a-h,o-z) data thr,mxit /1.0d-10,100/ b=b0 zsq=b**2+usq if(zsq .gt. 0.0)goto 20631 bnorm=0.0 return 20631 continue z=sqrt(zsq) f=b*(al1p+al2p/z)-g jerr=0 do 20641 it=1,mxit b=b-f/(al1p+al2p*usq/(z*zsq)) zsq=b**2+usq if(zsq .gt. 0.0)goto 20661 bnorm=0.0 return 20661 continue z=sqrt(zsq) f=b*(al1p+al2p/z)-g if(abs(f).le.thr)goto 20642 if(b .gt. 0.0)goto 20681 b=0.0 goto 20642 20681 continue 20641 continue 20642 continue bnorm=b if(it.ge.mxit) jerr=90000 return entry chg_bnorm(arg,irg) bnorm = 0.0 thr=arg mxit=irg return entry get_bnorm(arg,irg) bnorm = 0.0 arg=thr irg=mxit return end subroutine multsolns(ni,nx,nr,lmu,a,ia,nin,b) implicit double precision(a-h,o-z) double precision a(nx,nr,lmu),b(ni,nr,lmu) integer ia(nx),nin(lmu) do 20691 lam=1,lmu call multuncomp(ni,nr,nx,a(1,1,lam),ia,nin(lam),b(1,1,lam)) 20691 continue continue return end subroutine multuncomp(ni,nr,nx,ca,ia,nin,a) implicit double precision(a-h,o-z) double precision ca(nx,nr),a(ni,nr) integer ia(nx) a=0.0 if(nin .le. 0)goto 20711 do 20721 j=1,nr a(ia(1:nin),j)=ca(1:nin,j) 20721 continue continue 20711 continue return end subroutine multmodval(nx,nr,a0,ca,ia,nin,n,x,f) implicit double precision(a-h,o-z) double precision a0(nr),ca(nx,nr),x(n,*),f(nr,n) integer ia(nx) do 20731 i=1,n f(:,i)=a0 20731 continue continue if(nin.le.0) return do 20741 i=1,n do 20751 j=1,nr f(j,i)=f(j,i)+dot_product(ca(1:nin,j),x(i,ia(1:nin))) 20751 continue continue 20741 continue continue return end subroutine multspelnet(parm,no,ni,nr,x,ix,jx,y,w,jd,vp,cl,ne,nx, *nlam,flmin,ulam,thr,isd, jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm, *nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nr),w(no),vp(ni),ulam(nlam),cl(2,ni) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: vq; if(maxval(vp) .gt. 0.0)goto 20771 jerr=10000 return 20771 continue allocate(vq(1:ni),stat=jerr) if(jerr.ne.0) return vq=max(0d0,vp) vq=vq*ni/sum(vq) call multspelnetn(parm,no,ni,nr,x,ix,jx,y,w,jd,vq,cl,ne,nx,nlam,fl *min, ulam,thr,isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,nlp,jer *r) deallocate(vq) return end subroutine multspelnetn(parm,no,ni,nr,x,ix,jx,y,w,jd,vp,cl,ne,nx,n *lam,flmin, ulam,thr,isd,jsd,intr,maxit,lmu,a0,ca,ia,nin,rsq,alm,n *lp,jerr) implicit double precision(a-h,o-z) double precision x(*),vp(ni),y(no,nr),w(no),ulam(nlam),cl(2,ni) double precision ca(nx,nr,nlam),a0(nr,nlam),rsq(nlam),alm(nlam) integer ix(*),jx(*),jd(*),ia(nx),nin(nlam) double precision, dimension (:), allocatable :: xm,xs,xv,ym,ys integer, dimension (:), allocatable :: ju double precision, dimension (:,:,:), allocatable :: clt allocate(clt(1:2,1:nr,1:ni),stat=jerr) if(jerr.ne.0) return allocate(xm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xs(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ym(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ys(1:nr),stat=jerr) if(jerr.ne.0) return allocate(ju(1:ni),stat=jerr) if(jerr.ne.0) return allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return call spchkvars(no,ni,x,ix,ju) if(jd(1).gt.0) ju(jd(2:(jd(1)+1)))=0 if(maxval(ju) .gt. 0)goto 20791 jerr=7777 return 20791 continue call multspstandard1(no,ni,nr,x,ix,jx,y,w,ju,isd,jsd,intr, xm,xs, *ym,ys,xv,ys0,jerr) if(jerr.ne.0) return do 20801 j=1,ni do 20811 k=1,nr do 20821 i=1,2 clt(i,k,j)=cl(i,j) 20821 continue continue 20811 continue continue 20801 continue continue if(isd .le. 0)goto 20841 do 20851 j=1,ni do 20861 k=1,nr do 20871 i=1,2 clt(i,k,j)=clt(i,k,j)*xs(j) 20871 continue continue 20861 continue continue 20851 continue continue 20841 continue if(jsd .le. 0)goto 20891 do 20901 j=1,ni do 20911 k=1,nr do 20921 i=1,2 clt(i,k,j)=clt(i,k,j)/ys(k) 20921 continue continue 20911 continue continue 20901 continue continue 20891 continue call multspelnet2(parm,ni,nr,y,w,no,ne,nx,x,ix,jx,ju,vp,clt,nlam,f *lmin, ulam,thr,maxit,xm,xs,xv,ys0,lmu,ca,ia,nin,rsq,alm,nlp,jerr) if(jerr.gt.0) return do 20931 k=1,lmu nk=nin(k) do 20941 j=1,nr do 20951 l=1,nk ca(l,j,k)=ys(j)*ca(l,j,k)/xs(ia(l)) 20951 continue continue if(intr .ne. 0)goto 20971 a0(j,k)=0.0 goto 20981 20971 continue a0(j,k)=ym(j)-dot_product(ca(1:nk,j,k),xm(ia(1:nk))) 20981 continue continue 20941 continue continue 20931 continue continue deallocate(xm,xs,ym,ys,ju,xv,clt) return end subroutine multspstandard1(no,ni,nr,x,ix,jx,y,w,ju,isd,jsd,intr, *xm,xs,ym,ys,xv,ys0,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nr),w(no),xm(ni),xs(ni),xv(ni),ym(nr),y *s(nr) integer ix(*),jx(*),ju(ni) jerr = jerr*1 w=w/sum(w) if(intr .ne. 0)goto 21001 do 21011 j=1,ni if(ju(j).eq.0)goto 21011 xm(j)=0.0 jb=ix(j) je=ix(j+1)-1 z=dot_product(w(jx(jb:je)),x(jb:je)**2) if(isd .le. 0)goto 21031 xbq=dot_product(w(jx(jb:je)),x(jb:je))**2 vc=z-xbq xs(j)=sqrt(vc) xv(j)=1.0+xbq/vc goto 21041 21031 continue xs(j)=1.0 xv(j)=z 21041 continue continue 21011 continue continue ys0=0.0 do 21051 j=1,nr ym(j)=0.0 z=dot_product(w,y(:,j)**2) if(jsd .le. 0)goto 21071 u=z-dot_product(w,y(:,j))**2 ys0=ys0+z/u ys(j)=sqrt(u) y(:,j)=y(:,j)/ys(j) goto 21081 21071 continue ys(j)=1.0 ys0=ys0+z 21081 continue continue 21051 continue continue return 21001 continue do 21091 j=1,ni if(ju(j).eq.0)goto 21091 jb=ix(j) je=ix(j+1)-1 xm(j)=dot_product(w(jx(jb:je)),x(jb:je)) xv(j)=dot_product(w(jx(jb:je)),x(jb:je)**2)-xm(j)**2 if(isd.gt.0) xs(j)=sqrt(xv(j)) 21091 continue continue if(isd .ne. 0)goto 21111 xs=1.0 goto 21121 21111 continue xv=1.0 21121 continue continue ys0=0.0 do 21131 j=1,nr ym(j)=dot_product(w,y(:,j)) y(:,j)=y(:,j)-ym(j) z=dot_product(w,y(:,j)**2) if(jsd .le. 0)goto 21151 ys(j)=sqrt(z) y(:,j)=y(:,j)/ys(j) goto 21161 21151 continue ys0=ys0+z 21161 continue continue 21131 continue continue if(jsd .ne. 0)goto 21181 ys=1.0 goto 21191 21181 continue ys0=nr 21191 continue continue return end subroutine multspelnet2(beta,ni,nr,y,w,no,ne,nx,x,ix,jx,ju,vp,cl,n *lam,flmin, ulam,thri,maxit,xm,xs,xv,ys0,lmu,ao,ia,kin,rsqo,almo,n *lp,jerr) implicit double precision(a-h,o-z) double precision y(no,nr),w(no),x(*),vp(ni),ulam(nlam),cl(2,nr,ni) double precision ao(nx,nr,nlam),rsqo(nlam),almo(nlam),xm(ni),xs(ni *),xv(ni) integer ix(*),jx(*),ju(ni),ia(nx),kin(nlam) double precision, dimension (:), allocatable :: g,gj,gk,del,o integer, dimension (:), allocatable :: mm,iy,isc double precision, dimension (:,:), allocatable :: a allocate(a(1:nr,1:ni),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,rsqmax,pmin,exmx,itrace) allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(g(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gj(1:nr),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nr),stat=jerr) if(jerr.ne.0) return allocate(del(1:nr),stat=jerr) if(jerr.ne.0) return allocate(o(1:nr),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nr),stat=jerr) if(jerr.ne.0) return bta=beta omb=1.0-bta alm=0.0 iy=0 thr=thri*ys0/nr alf=1.0 if(flmin .ge. 1.0)goto 21211 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 21211 continue rsq=ys0 a=0.0 mm=0 o=0.0 nlp=0 nin=nlp iz=0 mnl=min(mnlam,nlam) do 21221 j=1,ni if(ju(j).eq.0)goto 21221 jb=ix(j) je=ix(j+1)-1 g(j)=0.0 do 21231 k=1,nr g(j)=g(j)+(dot_product(y(jx(jb:je),k),w(jx(jb:je))*x(jb:je))/xs(j) *)**2 21231 continue continue g(j)=sqrt(g(j)) 21221 continue continue do 21241 m=1,nlam if(itrace.ne.0) call setpb(m-1) alm0=alm if(flmin .lt. 1.0)goto 21261 alm=ulam(m) goto 21251 21261 if(m .le. 2)goto 21271 alm=alm*alf goto 21251 21271 if(m .ne. 1)goto 21281 alm=big goto 21291 21281 continue alm0=0.0 do 21301 j=1,ni if(ju(j).eq.0)goto 21301 if(vp(j).gt.0.0) alm0=max(alm0,g(j)/vp(j)) 21301 continue continue alm0=alm0/max(bta,1.0d-3) alm=alf*alm0 21291 continue 21251 continue dem=alm*omb ab=alm*bta rsq0=rsq jz=1 tlam=bta*(2.0*alm-alm0) do 21311 k=1,ni if(iy(k).eq.1)goto 21311 if(ju(k).eq.0)goto 21311 if(g(k).gt.tlam*vp(k)) iy(k)=1 21311 continue continue continue 21321 continue if(iz*jz.ne.0) go to 10360 10880 if(nlp .le. maxit)goto 21341 jerr=-m return 21341 continue nlp=nlp+1 dlx=0.0 do 21351 k=1,ni if(iy(k).eq.0)goto 21351 jb=ix(k) je=ix(k+1)-1 gkn=0.0 do 21361 j=1,nr gj(j)=dot_product(y(jx(jb:je),j)+o(j),w(jx(jb:je))*x(jb:je))/xs(k) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 21361 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 21381 a(:,k)=0.0 goto 21391 21381 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 21391 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 21351 if(mm(k) .ne. 0)goto 21411 nin=nin+1 if(nin.gt.nx)goto 21352 mm(k)=nin ia(nin)=k 21411 continue do 21421 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(jx(jb:je),j)=y(jx(jb:je),j)-del(j)*x(jb:je)/xs(k) o(j)=o(j)+del(j)*xm(k)/xs(k) dlx=max(xv(k)*del(j)**2,dlx) 21421 continue continue 21351 continue 21352 continue if(nin.gt.nx)goto 21322 if(dlx .ge. thr)goto 21441 ixx=0 do 21451 j=1,ni if(iy(j).eq.1)goto 21451 if(ju(j).eq.0)goto 21451 jb=ix(j) je=ix(j+1)-1 g(j)=0.0 do 21461 k=1,nr g(j)=g(j)+ (dot_product(y(jx(jb:je),k)+o(k),w(jx(jb:je))*x(jb:je) *)/xs(j))**2 21461 continue continue g(j)=sqrt(g(j)) if(g(j) .le. ab*vp(j))goto 21481 iy(j)=1 ixx=1 21481 continue 21451 continue continue if(ixx.eq.1) go to 10880 goto 21322 21441 continue if(nlp .le. maxit)goto 21501 jerr=-m return 21501 continue 10360 continue iz=1 continue 21511 continue nlp=nlp+1 dlx=0.0 do 21521 l=1,nin k=ia(l) jb=ix(k) je=ix(k+1)-1 gkn=0.0 do 21531 j=1,nr gj(j)= dot_product(y(jx(jb:je),j)+o(j),w(jx(jb:je))*x(jb:je))/xs( *k) gk(j)=gj(j)+a(j,k)*xv(k) gkn=gkn+gk(j)**2 21531 continue continue gkn=sqrt(gkn) u=1.0-ab*vp(k)/gkn del=a(:,k) if(u .gt. 0.0)goto 21551 a(:,k)=0.0 goto 21561 21551 continue a(:,k)=gk*(u/(xv(k)+dem*vp(k))) call chkbnds(nr,gk,gkn,xv(k),cl(1,1,k), dem*vp(k),ab*vp(k),a(:,k) *,isc,jerr) if(jerr.ne.0) return 21561 continue continue del=a(:,k)-del if(maxval(abs(del)).le.0.0)goto 21521 do 21571 j=1,nr rsq=rsq-del(j)*(2.0*gj(j)-del(j)*xv(k)) y(jx(jb:je),j)=y(jx(jb:je),j)-del(j)*x(jb:je)/xs(k) o(j)=o(j)+del(j)*xm(k)/xs(k) dlx=max(xv(k)*del(j)**2,dlx) 21571 continue continue 21521 continue continue if(dlx.lt.thr)goto 21512 if(nlp .le. maxit)goto 21591 jerr=-m return 21591 continue goto 21511 21512 continue jz=0 goto 21321 21322 continue if(nin .le. nx)goto 21611 jerr=-10000-m goto 21242 21611 continue if(nin .le. 0)goto 21631 do 21641 j=1,nr ao(1:nin,j,m)=a(j,ia(1:nin)) 21641 continue continue 21631 continue kin(m)=nin rsqo(m)=1.0-rsq/ys0 almo(m)=alm lmu=m if(m.lt.mnl)goto 21241 if(flmin.ge.1.0)goto 21241 me=0 do 21651 j=1,nin if(ao(j,1,m).ne.0.0) me=me+1 21651 continue continue if(me.gt.ne)goto 21242 if(rsq0-rsq.lt.sml*rsq)goto 21242 if(rsqo(m).gt.rsqmax)goto 21242 21241 continue 21242 continue deallocate(a,mm,g,iy,gj,gk,del,o) return end subroutine multlognetn(parm,no,ni,nc,x,y,g,w,ju,vp,cl,ne,nx,nlam,f *lmin,ulam, shri,intr,maxit,xv,lmu,a0,a,m,kin,dev0,dev,alm,nlp,jer *r) implicit double precision(a-h,o-z) double precision x(no,ni),y(no,nc),g(no,nc),w(no),vp(ni),ulam(nlam *),cl(2,ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),xv( *ni) integer ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q,r,b,bs double precision, dimension (:), allocatable :: sxp,sxpl,ga,gk,del integer, dimension (:), allocatable :: mm,is,ixx,isc allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return allocate(r(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ixx(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nc),stat=jerr) if(jerr.ne.0) return allocate(del(1:nc),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nc),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 21661 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 21681 jerr =8000+ic return 21681 continue if(q0 .lt. pmax)goto 21701 jerr =9000+ic return 21701 continue if(intr .ne. 0)goto 21721 q0=1.0/nc b(0,ic)=0.0 goto 21731 21721 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 21731 continue continue b(1:ni,ic)=0.0 21661 continue continue if(intr.eq.0) dev1=log(float(nc)) ixx=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 21751 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 21761 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 21761 continue continue goto 21771 21751 continue do 21781 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 21781 continue continue sxp=0.0 if(intr .ne. 0)goto 21801 b(0,:)=0.0 goto 21811 21801 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 21811 continue continue dev1=0.0 do 21821 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 21821 continue continue sxpl=w*log(sxp) do 21831 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 21831 continue continue 21771 continue continue do 21841 ic=1,nc do 21851 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 21851 continue continue 21841 continue continue dev0=dev0+dev1 alf=1.0 if(flmin .ge. 1.0)goto 21871 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 21871 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 21881 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 21891 j=1,ni if(ju(j).ne.0) ga(j)=ga(j)+dot_product(r(:,ic),x(:,j))**2 21891 continue continue 21881 continue continue ga=sqrt(ga) do 21901 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 21921 al=ulam(ilm) goto 21911 21921 if(ilm .le. 2)goto 21931 al=al*alf goto 21911 21931 if(ilm .ne. 1)goto 21941 al=big goto 21951 21941 continue al0=0.0 do 21961 j=1,ni if(ju(j).eq.0)goto 21961 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 21961 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 21951 continue 21911 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 21971 k=1,ni if(ixx(k).eq.1)goto 21971 if(ju(k).eq.0)goto 21971 if(ga(k).gt.tlam*vp(k)) ixx(k)=1 21971 continue continue 10880 continue continue 21981 continue ix=0 jx=ix kx=jx t=0.0 if(nlp .le. maxit)goto 22001 jerr=-ilm return 22001 continue do 22011 ic=1,nc t=max(t,maxval(q(:,ic)*(1.0-q(:,ic)/sxp)/sxp)) 22011 continue continue if(t .ge. eps)goto 22031 kx=1 goto 21982 22031 continue t=2.0*t alt=al1/t al2t=al2/t do 22041 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp)/t d=0.0 if(intr.ne.0) d=sum(r(:,ic)) if(d .eq. 0.0)goto 22061 b(0,ic)=b(0,ic)+d r(:,ic)=r(:,ic)-d*w dlx=max(dlx,d**2) 22061 continue 22041 continue continue continue 22071 continue nlp=nlp+nc dlx=0.0 do 22081 k=1,ni if(ixx(k).eq.0)goto 22081 gkn=0.0 do 22091 ic=1,nc gk(ic)=dot_product(r(:,ic),x(:,k))+b(k,ic)*xv(k) gkn=gkn+gk(ic)**2 22091 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn del=b(k,:) if(u .gt. 0.0)goto 22111 b(k,:)=0.0 goto 22121 22111 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k), cl(2,k),vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22121 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22081 do 22131 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(:,ic)=r(:,ic)-del(ic)*w*x(:,k) 22131 continue continue if(mm(k) .ne. 0)goto 22151 nin=nin+1 if(nin .le. nx)goto 22171 jx=1 goto 22082 22171 continue mm(k)=nin m(nin)=k 22151 continue 22081 continue 22082 continue if(jx.gt.0)goto 22072 if(dlx.lt.shr)goto 22072 if(nlp .le. maxit)goto 22191 jerr=-ilm return 22191 continue continue 22201 continue nlp=nlp+nc dlx=0.0 do 22211 l=1,nin k=m(l) gkn=0.0 do 22221 ic=1,nc gk(ic)=dot_product(r(:,ic),x(:,k))+b(k,ic)*xv(k) gkn=gkn+gk(ic)**2 22221 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn del=b(k,:) if(u .gt. 0.0)goto 22241 b(k,:)=0.0 goto 22251 22241 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k), cl(2,k),vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22251 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22211 do 22261 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(:,ic)=r(:,ic)-del(ic)*w*x(:,k) 22261 continue continue 22211 continue continue if(dlx.lt.shr)goto 22202 if(nlp .le. maxit)goto 22281 jerr=-ilm return 22281 continue goto 22201 22202 continue goto 22071 22072 continue if(jx.gt.0)goto 21982 do 22291 ic=1,nc if((b(0,ic)-bs(0,ic))**2.gt.shr) ix=1 if(ix .ne. 0)goto 22311 do 22321 j=1,nin k=m(j) if(xv(k)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 22341 ix=1 goto 22322 22341 continue 22321 continue 22322 continue 22311 continue do 22351 i=1,no fi=b(0,ic)+g(i,ic) if(nin.gt.0) fi=fi+dot_product(b(m(1:nin),ic),x(i,m(1:nin))) fi=min(max(exmn,fi),exmx) sxp(i)=sxp(i)-q(i,ic) q(i,ic)=min(max(emin*sxp(i),exp(fi)),emax*sxp(i)) sxp(i)=sxp(i)+q(i,ic) 22351 continue continue 22291 continue continue s=-sum(b(0,:))/nc b(0,:)=b(0,:)+s if(jx.gt.0)goto 21982 if(ix .ne. 0)goto 22371 do 22381 k=1,ni if(ixx(k).eq.1)goto 22381 if(ju(k).eq.0)goto 22381 ga(k)=0.0 22381 continue continue do 22391 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 22401 k=1,ni if(ixx(k).eq.1)goto 22401 if(ju(k).eq.0)goto 22401 ga(k)=ga(k)+dot_product(r(:,ic),x(:,k))**2 22401 continue continue 22391 continue continue ga=sqrt(ga) do 22411 k=1,ni if(ixx(k).eq.1)goto 22411 if(ju(k).eq.0)goto 22411 if(ga(k) .le. al1*vp(k))goto 22431 ixx(k)=1 ix=1 22431 continue 22411 continue continue if(ix.eq.1) go to 10880 goto 21982 22371 continue goto 21981 21982 continue if(kx .le. 0)goto 22451 jerr=-20000-ilm goto 21902 22451 continue if(jx .le. 0)goto 22471 jerr=-10000-ilm goto 21902 22471 continue devi=0.0 do 22481 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 22491 i=1,no if(y(i,ic).le.0.0)goto 22491 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 22491 continue continue 22481 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 21901 if(flmin.ge.1.0)goto 21901 me=0 do 22501 j=1,nin if(a(j,1,ilm).ne.0.0) me=me+1 22501 continue continue if(me.gt.ne)goto 21902 if(dev(ilm).gt.devmax)goto 21902 if(dev(ilm)-dev(ilm-1).lt.sml)goto 21902 21901 continue 21902 continue g=log(q) do 22511 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 22511 continue continue deallocate(sxp,b,bs,r,q,mm,is,ga,ixx,gk,del,sxpl) return end subroutine multsprlognetn(parm,no,ni,nc,x,ix,jx,y,g,w,ju,vp,cl,ne, *nx,nlam, flmin,ulam,shri,intr,maxit,xv,xb,xs,lmu,a0,a,m,kin,dev0, *dev,alm,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),y(no,nc),g(no,nc),w(no),vp(ni) double precision ulam(nlam),xb(ni),xs(ni),xv(ni) double precision a(nx,nc,nlam),a0(nc,nlam),dev(nlam),alm(nlam),cl( *2,ni) integer ix(*),jx(*),ju(ni),m(nx),kin(nlam) double precision, dimension (:,:), allocatable :: q,r,b,bs double precision, dimension (:), allocatable :: sxp,sxpl,ga,gk double precision, dimension (:), allocatable :: del,sc,svr integer, dimension (:), allocatable :: mm,is,iy,isc allocate(b(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(bs(0:ni,1:nc),stat=jerr) if(jerr.ne.0) return allocate(q(1:no,1:nc),stat=jerr) if(jerr.ne.0) return allocate(r(1:no,1:nc),stat=jerr) if(jerr.ne.0) return call get_int_parms(sml,eps,big,mnlam,devmax,pmin,exmx,itrace) exmn=-exmx allocate(mm(1:ni),stat=jerr) if(jerr.ne.0) return allocate(ga(1:ni),stat=jerr) if(jerr.ne.0) return allocate(gk(1:nc),stat=jerr) if(jerr.ne.0) return allocate(del(1:nc),stat=jerr) if(jerr.ne.0) return allocate(iy(1:ni),stat=jerr) if(jerr.ne.0) return allocate(is(1:max(nc,ni)),stat=jerr) if(jerr.ne.0) return allocate(sxp(1:no),stat=jerr) if(jerr.ne.0) return allocate(sxpl(1:no),stat=jerr) if(jerr.ne.0) return allocate(svr(1:nc),stat=jerr) if(jerr.ne.0) return allocate(sc(1:no),stat=jerr) if(jerr.ne.0) return allocate(isc(1:nc),stat=jerr) if(jerr.ne.0) return pmax=1.0-pmin emin=pmin/pmax emax=1.0/emin bta=parm omb=1.0-bta dev1=0.0 dev0=0.0 do 22521 ic=1,nc q0=dot_product(w,y(:,ic)) if(q0 .gt. pmin)goto 22541 jerr =8000+ic return 22541 continue if(q0 .lt. pmax)goto 22561 jerr =9000+ic return 22561 continue b(1:ni,ic)=0.0 if(intr .ne. 0)goto 22581 q0=1.0/nc b(0,ic)=0.0 goto 22591 22581 continue b(0,ic)=log(q0) dev1=dev1-q0*b(0,ic) 22591 continue continue 22521 continue continue if(intr.eq.0) dev1=log(float(nc)) iy=0 al=0.0 if(nonzero(no*nc,g) .ne. 0)goto 22611 b(0,:)=b(0,:)-sum(b(0,:))/nc sxp=0.0 do 22621 ic=1,nc q(:,ic)=exp(b(0,ic)) sxp=sxp+q(:,ic) 22621 continue continue goto 22631 22611 continue do 22641 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 22641 continue continue sxp=0.0 if(intr .ne. 0)goto 22661 b(0,:)=0.0 goto 22671 22661 continue call kazero(nc,no,y,g,w,b(0,:),jerr) if(jerr.ne.0) return 22671 continue continue dev1=0.0 do 22681 ic=1,nc q(:,ic)=b(0,ic)+g(:,ic) dev1=dev1-dot_product(w,y(:,ic)*q(:,ic)) q(:,ic)=exp(q(:,ic)) sxp=sxp+q(:,ic) 22681 continue continue sxpl=w*log(sxp) do 22691 ic=1,nc dev1=dev1+dot_product(y(:,ic),sxpl) 22691 continue continue 22631 continue continue do 22701 ic=1,nc do 22711 i=1,no if(y(i,ic).gt.0.0) dev0=dev0+w(i)*y(i,ic)*log(y(i,ic)) 22711 continue continue 22701 continue continue dev0=dev0+dev1 alf=1.0 if(flmin .ge. 1.0)goto 22731 eqs=max(eps,flmin) alf=eqs**(1.0/(nlam-1)) 22731 continue m=0 mm=0 nin=0 nlp=0 mnl=min(mnlam,nlam) bs=0.0 shr=shri*dev0 ga=0.0 do 22741 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) svr(ic)=sum(r(:,ic)) do 22751 j=1,ni if(ju(j).eq.0)goto 22751 jb=ix(j) je=ix(j+1)-1 gj=dot_product(r(jx(jb:je),ic),x(jb:je)) ga(j)=ga(j)+((gj-svr(ic)*xb(j))/xs(j))**2 22751 continue continue 22741 continue continue ga=sqrt(ga) do 22761 ilm=1,nlam if(itrace.ne.0) call setpb(ilm-1) al0=al if(flmin .lt. 1.0)goto 22781 al=ulam(ilm) goto 22771 22781 if(ilm .le. 2)goto 22791 al=al*alf goto 22771 22791 if(ilm .ne. 1)goto 22801 al=big goto 22811 22801 continue al0=0.0 do 22821 j=1,ni if(ju(j).eq.0)goto 22821 if(vp(j).gt.0.0) al0=max(al0,ga(j)/vp(j)) 22821 continue continue al0=al0/max(bta,1.0d-3) al=alf*al0 22811 continue 22771 continue al2=al*omb al1=al*bta tlam=bta*(2.0*al-al0) do 22831 k=1,ni if(iy(k).eq.1)goto 22831 if(ju(k).eq.0)goto 22831 if(ga(k).gt.tlam*vp(k)) iy(k)=1 22831 continue continue 10880 continue continue 22841 continue ixx=0 jxx=ixx kxx=jxx t=0.0 if(nlp .le. maxit)goto 22861 jerr=-ilm return 22861 continue do 22871 ic=1,nc t=max(t,maxval(q(:,ic)*(1.0-q(:,ic)/sxp)/sxp)) 22871 continue continue if(t .ge. eps)goto 22891 kxx=1 goto 22842 22891 continue t=2.0*t alt=al1/t al2t=al2/t do 22901 ic=1,nc bs(0,ic)=b(0,ic) if(nin.gt.0) bs(m(1:nin),ic)=b(m(1:nin),ic) r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp)/t svr(ic)=sum(r(:,ic)) if(intr .eq. 0)goto 22921 b(0,ic)=b(0,ic)+svr(ic) r(:,ic)=r(:,ic)-svr(ic)*w dlx=max(dlx,svr(ic)**2) 22921 continue 22901 continue continue continue 22931 continue nlp=nlp+nc dlx=0.0 do 22941 k=1,ni if(iy(k).eq.0)goto 22941 jb=ix(k) je=ix(k+1)-1 del=b(k,:) gkn=0.0 do 22951 ic=1,nc u=(dot_product(r(jx(jb:je),ic),x(jb:je))-svr(ic)*xb(k))/xs(k) gk(ic)=u+del(ic)*xv(k) gkn=gkn+gk(ic)**2 22951 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn if(u .gt. 0.0)goto 22971 b(k,:)=0.0 goto 22981 22971 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k),cl(2,k), vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 22981 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 22941 do 22991 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(jx(jb:je),ic)=r(jx(jb:je),ic) -del(ic)*w(jx(jb:je))*(x(jb:je)-x *b(k))/xs(k) 22991 continue continue if(mm(k) .ne. 0)goto 23011 nin=nin+1 if(nin .le. nx)goto 23031 jxx=1 goto 22942 23031 continue mm(k)=nin m(nin)=k 23011 continue 22941 continue 22942 continue if(jxx.gt.0)goto 22932 if(dlx.lt.shr)goto 22932 if(nlp .le. maxit)goto 23051 jerr=-ilm return 23051 continue continue 23061 continue nlp=nlp+nc dlx=0.0 do 23071 l=1,nin k=m(l) jb=ix(k) je=ix(k+1)-1 del=b(k,:) gkn=0.0 do 23081 ic=1,nc u=(dot_product(r(jx(jb:je),ic),x(jb:je)) -svr(ic)*xb(k))/xs(k) gk(ic)=u+del(ic)*xv(k) gkn=gkn+gk(ic)**2 23081 continue continue gkn=sqrt(gkn) u=1.0-alt*vp(k)/gkn if(u .gt. 0.0)goto 23101 b(k,:)=0.0 goto 23111 23101 continue b(k,:)=gk*(u/(xv(k)+vp(k)*al2t)) call chkbnds1(nc,gk,gkn,xv(k),cl(1,k),cl(2,k), vp(k)*al2t,alt*vp( *k),b(k,:),isc,jerr) if(jerr.ne.0) return 23111 continue continue del=b(k,:)-del if(maxval(abs(del)).le.0.0)goto 23071 do 23121 ic=1,nc dlx=max(dlx,xv(k)*del(ic)**2) r(jx(jb:je),ic)=r(jx(jb:je),ic) -del(ic)*w(jx(jb:je))*(x(jb:je)-x *b(k))/xs(k) 23121 continue continue 23071 continue continue if(dlx.lt.shr)goto 23062 if(nlp .le. maxit)goto 23141 jerr=-ilm return 23141 continue goto 23061 23062 continue goto 22931 22932 continue if(jxx.gt.0)goto 22842 do 23151 ic=1,nc if((b(0,ic)-bs(0,ic))**2.gt.shr) ixx=1 if(ixx .ne. 0)goto 23171 do 23181 j=1,nin k=m(j) if(xv(k)*(b(k,ic)-bs(k,ic))**2 .le. shr)goto 23201 ixx=1 goto 23182 23201 continue 23181 continue 23182 continue 23171 continue sc=b(0,ic)+g(:,ic) b0=0.0 do 23211 j=1,nin l=m(j) jb=ix(l) je=ix(l+1)-1 sc(jx(jb:je))=sc(jx(jb:je))+b(l,ic)*x(jb:je)/xs(l) b0=b0-b(l,ic)*xb(l)/xs(l) 23211 continue continue sc=min(max(exmn,sc+b0),exmx) sxp=sxp-q(:,ic) q(:,ic)=min(max(emin*sxp,exp(sc)),emax*sxp) sxp=sxp+q(:,ic) 23151 continue continue s=sum(b(0,:))/nc b(0,:)=b(0,:)-s if(jxx.gt.0)goto 22842 if(ixx .ne. 0)goto 23231 do 23241 j=1,ni if(iy(j).eq.1)goto 23241 if(ju(j).eq.0)goto 23241 ga(j)=0.0 23241 continue continue do 23251 ic=1,nc r(:,ic)=w*(y(:,ic)-q(:,ic)/sxp) do 23261 j=1,ni if(iy(j).eq.1)goto 23261 if(ju(j).eq.0)goto 23261 jb=ix(j) je=ix(j+1)-1 gj=dot_product(r(jx(jb:je),ic),x(jb:je)) ga(j)=ga(j)+((gj-svr(ic)*xb(j))/xs(j))**2 23261 continue continue 23251 continue continue ga=sqrt(ga) do 23271 k=1,ni if(iy(k).eq.1)goto 23271 if(ju(k).eq.0)goto 23271 if(ga(k) .le. al1*vp(k))goto 23291 iy(k)=1 ixx=1 23291 continue 23271 continue continue if(ixx.eq.1) go to 10880 goto 22842 23231 continue goto 22841 22842 continue if(kxx .le. 0)goto 23311 jerr=-20000-ilm goto 22762 23311 continue if(jxx .le. 0)goto 23331 jerr=-10000-ilm goto 22762 23331 continue devi=0.0 do 23341 ic=1,nc if(nin.gt.0) a(1:nin,ic,ilm)=b(m(1:nin),ic) a0(ic,ilm)=b(0,ic) do 23351 i=1,no if(y(i,ic).le.0.0)goto 23351 devi=devi-w(i)*y(i,ic)*log(q(i,ic)/sxp(i)) 23351 continue continue 23341 continue continue kin(ilm)=nin alm(ilm)=al lmu=ilm dev(ilm)=(dev1-devi)/dev0 if(ilm.lt.mnl)goto 22761 if(flmin.ge.1.0)goto 22761 me=0 do 23361 j=1,nin if(a(j,1,ilm).ne.0.0) me=me+1 23361 continue continue if(me.gt.ne)goto 22762 if(dev(ilm).gt.devmax)goto 22762 if(dev(ilm)-dev(ilm-1).lt.sml)goto 22762 22761 continue 22762 continue g=log(q) do 23371 i=1,no g(i,:)=g(i,:)-sum(g(i,:))/nc 23371 continue continue deallocate(sxp,b,bs,r,q,mm,is,sc,ga,iy,gk,del,sxpl) return end subroutine psort7(v,a,ii,jj) implicit double precision(a-h,o-z) c c puts into a the permutation vector which sorts v into c increasing order. the array v is not modified. c only elements from ii to jj are considered. c arrays iu(k) and il(k) permit sorting up to 2**(k+1)-1 elements c c this is a modification of cacm algorithm #347 by r. c. singleton, c which is a modified hoare quicksort. c dimension a(jj),v(jj),iu(20),il(20) integer t,tt integer a double precision v m=1 i=ii j=jj 10 if (i.ge.j) go to 80 20 k=i ij=(j+i)/2 t=a(ij) vt=v(t) if (v(a(i)).le.vt) go to 30 a(ij)=a(i) a(i)=t t=a(ij) vt=v(t) 30 l=j if (v(a(j)).ge.vt) go to 50 a(ij)=a(j) a(j)=t t=a(ij) vt=v(t) if (v(a(i)).le.vt) go to 50 a(ij)=a(i) a(i)=t t=a(ij) vt=v(t) go to 50 40 a(l)=a(k) a(k)=tt 50 l=l-1 if (v(a(l)).gt.vt) go to 50 tt=a(l) vtt=v(tt) 60 k=k+1 if (v(a(k)).lt.vt) go to 60 if (k.le.l) go to 40 if (l-i.le.j-k) go to 70 il(m)=i iu(m)=l i=k m=m+1 go to 90 70 il(m)=k iu(m)=j j=l m=m+1 go to 90 80 m=m-1 if (m.eq.0) return i=il(m) j=iu(m) 90 if (j-i.gt.10) go to 20 if (i.eq.ii) go to 10 i=i-1 100 i=i+1 if (i.eq.j) go to 80 t=a(i+1) vt=v(t) if (v(a(i)).le.vt) go to 100 k=i 110 a(k+1)=a(k) k=k-1 if (vt.lt.v(a(k))) go to 110 a(k+1)=t go to 100 end glmnet/src/glmnetpp/src/legacy/wls.f0000644000175000017500000005645113775432176017350 0ustar nileshnileshc mortran 2.0 (version of 7/04/75 mod 7/4/87 (ajc)) subroutine wls(alm0,almc,alpha,m,no,ni,x,r,v,intr,ju,vp,cl,nx,thr, *maxit, a,aint,g,ia,iy,iz,mm,nino,rsqc,nlp,jerr) implicit double precision(a-h,o-z) double precision x(no,ni),r(no),a(ni),vp(ni),cl(2,ni) double precision v(no),g(ni) integer iy(ni),ia(nx),ju(ni),mm(ni) double precision, dimension (:), allocatable :: xv allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return do 10011 j=1,ni if(ju(j).eq.0)goto 10011 g(j)=abs(dot_product(r,x(:,j))) 10011 continue continue do 10021 j=1,ni if(iy(j).gt.0) xv(j)=dot_product(v,x(:,j)**2) 10021 continue continue xmz = sum(v) ab=almc*alpha dem=almc*(1.0-alpha) tlam=alpha*(2.0*almc-alm0) do 10031 k=1,ni if(iy(k).eq.1)goto 10031 if(ju(k).eq.0)goto 10031 if(g(k) .le. tlam*vp(k))goto 10051 iy(k)=1 xv(k)=dot_product(v,x(:,k)**2) 10051 continue 10031 continue continue jz = 1 continue 10061 continue if(iz*jz.ne.0) go to 10070 10080 continue nlp=nlp+1 dlx=0.0 do 10091 k=1,ni if(iy(k).eq.0)goto 10091 gk=dot_product(r,x(:,k)) ak=a(k) u=gk+ak*xv(k) au=abs(u)-vp(k)*ab if(au .gt. 0.0)goto 10111 a(k)=0.0 goto 10121 10111 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*dem))) 10121 continue continue if(a(k).eq.ak)goto 10091 if(mm(k) .ne. 0)goto 10141 nino=nino+1 if(nino.gt.nx)goto 10092 mm(k)=nino ia(nino)=k 10141 continue d=a(k)-ak rsqc=rsqc+d*(2.0*gk-d*xv(k)) r=r-d*v*x(:,k) dlx=max(xv(k)*d**2,dlx) 10091 continue 10092 continue if(nino.gt.nx)goto 10062 d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 10161 aint=aint+d rsqc=rsqc+d*(2.0*sum(r)-d*xmz) dlx=max(dlx,xmz*d**2) r=r-d*v 10161 continue if(dlx .ge. thr)goto 10181 ixx=0 do 10191 k=1,ni if(iy(k).eq.1)goto 10191 if(ju(k).eq.0)goto 10191 g(k)=abs(dot_product(r,x(:,k))) if(g(k) .le. ab*vp(k))goto 10211 iy(k)=1 xv(k)=dot_product(v,x(:,k)**2) ixx=1 10211 continue 10191 continue continue if(ixx.eq.1) go to 10080 goto 10062 10181 continue if(nlp .le. maxit)goto 10231 jerr=-m return 10231 continue 10070 continue iz = 1 continue 10241 continue nlp=nlp+1 dlx=0.0 do 10251 l=1,nino k=ia(l) gk=dot_product(r,x(:,k)) ak=a(k) u=gk+ak*xv(k) au=abs(u)-vp(k)*ab if(au .gt. 0.0)goto 10271 a(k)=0.0 goto 10281 10271 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*dem))) 10281 continue continue if(a(k).eq.ak)goto 10251 d=a(k)-ak rsqc=rsqc+d*(2.0*gk-d*xv(k)) r=r-d*v*x(:,k) dlx=max(xv(k)*d**2,dlx) 10251 continue continue d=0.0 if(intr.ne.0) d=sum(r)/xmz if(d .eq. 0.0)goto 10301 aint=aint+d rsqc=rsqc+d*(2.0*sum(r)-d*xmz) dlx=max(dlx,xmz*d**2) r=r-d*v 10301 continue if(dlx.lt.thr)goto 10242 if(nlp .le. maxit)goto 10321 jerr=-m return 10321 continue goto 10241 10242 continue jz=0 goto 10061 10062 continue deallocate(xv) return end subroutine spwls(alm0,almc,alpha,m,no,ni,x,ix,jx,xm,xs,r,v,intr,ju *, vp,cl,nx,thr,maxit,a,aint,g,ia,iy,iz,mm,nino,rsqc,nlp,jerr) implicit double precision(a-h,o-z) double precision x(*),xm(ni),xs(ni),r(no),a(ni),vp(ni),cl(2,ni) double precision v(no),g(ni) integer ix(*),jx(*),iy(ni),ia(nx),ju(ni),mm(ni) double precision, dimension (:), allocatable :: xv allocate(xv(1:ni),stat=jerr) if(jerr.ne.0) return xmz = sum(v) rsum = sum(r) do 10331 j=1,ni if(ju(j).eq.0)goto 10331 jb=ix(j) je=ix(j+1)-1 g(j)=abs(dot_product(r(jx(jb:je)),x(jb:je))-rsum*xm(j))/xs(j) 10331 continue continue do 10341 j=1,ni if(iy(j) .le. 0)goto 10361 jb=ix(j) je=ix(j+1)-1 xv(j)=dot_product(v(jx(jb:je)),x(jb:je)**2) xv(j)=xv(j)-2*xm(j)*dot_product(v(jx(jb:je)),x(jb:je)) xv(j)=(xv(j)+xmz*xm(j)**2)/xs(j)**2 10361 continue 10341 continue continue ab=almc*alpha dem=almc*(1.0-alpha) tlam=alpha*(2.0*almc-alm0) do 10371 k=1,ni if(iy(k).eq.1)goto 10371 if(ju(k).eq.0)goto 10371 if(g(k) .le. tlam*vp(k))goto 10391 iy(k)=1 jb=ix(k) je=ix(k+1)-1 xv(k)=dot_product(v(jx(jb:je)),x(jb:je)**2) xv(k)=xv(k)-2*xm(k)*dot_product(v(jx(jb:je)),x(jb:je)) xv(k)=(xv(k)+xmz*xm(k)**2)/xs(k)**2 10391 continue 10371 continue continue jz = 1 continue 10401 continue if(iz*jz.ne.0) go to 10070 10080 continue nlp=nlp+1 dlx=0.0 do 10411 k=1,ni if(iy(k).eq.0)goto 10411 jb=ix(k) je=ix(k+1)-1 gk=(dot_product(r(jx(jb:je)),x(jb:je))-rsum*xm(k))/xs(k) ak=a(k) u=gk+ak*xv(k) au=abs(u)-vp(k)*ab if(au .gt. 0.0)goto 10431 a(k)=0.0 goto 10441 10431 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*dem))) 10441 continue continue if(a(k).eq.ak)goto 10411 if(mm(k) .ne. 0)goto 10461 nino=nino+1 if(nino.gt.nx)goto 10412 mm(k)=nino ia(nino)=k 10461 continue d=a(k)-ak rsqc=rsqc+d*(2.0*gk-d*xv(k)) jb=ix(k) je=ix(k+1)-1 r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) r=r+d*v*xm(k)/xs(k) rsum=sum(r) dlx=max(xv(k)*d**2,dlx) 10411 continue 10412 continue if(nino.gt.nx)goto 10402 d=0.0 if(intr.ne.0) d=rsum/xmz if(d .eq. 0.0)goto 10481 aint=aint+d rsqc=rsqc+d*(2.0*rsum-d*xmz) dlx=max(dlx,xmz*d**2) r=r-d*v rsum=sum(r) 10481 continue if(dlx .ge. thr)goto 10501 ixx=0 do 10511 k=1,ni if(iy(k).eq.1)goto 10511 if(ju(k).eq.0)goto 10511 jb=ix(k) je=ix(k+1)-1 g(k)=dot_product(r(jx(jb:je)),x(jb:je)) g(k)=abs(g(k)-rsum*xm(k))/xs(k) if(g(k) .le. ab*vp(k))goto 10531 iy(k)=1 xv(k)=dot_product(v(jx(jb:je)),x(jb:je)**2) vx=dot_product(v(jx(jb:je)),x(jb:je)) xv(k)=xv(k)-2*xm(k)*vx xv(k)=(xv(k)+xmz*xm(k)**2)/xs(k)**2 ixx=1 10531 continue 10511 continue continue if(ixx.eq.1) go to 10080 goto 10402 10501 continue if(nlp .le. maxit)goto 10551 jerr=-m return 10551 continue 10070 continue iz = 1 continue 10561 continue nlp=nlp+1 dlx=0.0 do 10571 l=1,nino k=ia(l) jb=ix(k) je=ix(k+1)-1 gk=(dot_product(r(jx(jb:je)),x(jb:je))-rsum*xm(k))/xs(k) ak=a(k) u=gk+ak*xv(k) au=abs(u)-vp(k)*ab if(au .gt. 0.0)goto 10591 a(k)=0.0 goto 10601 10591 continue a(k)=max(cl(1,k),min(cl(2,k),sign(au,u)/(xv(k)+vp(k)*dem))) 10601 continue continue if(a(k).eq.ak)goto 10571 d=a(k)-ak rsqc=rsqc+d*(2.0*gk-d*xv(k)) jb=ix(k) je=ix(k+1)-1 r(jx(jb:je))=r(jx(jb:je))-d*v(jx(jb:je))*x(jb:je)/xs(k) r=r+d*v*xm(k)/xs(k) rsum=sum(r) dlx=max(xv(k)*d**2,dlx) 10571 continue continue d=0.0 if(intr.ne.0) d=rsum/xmz if(d .eq. 0.0)goto 10621 aint=aint+d rsqc=rsqc+d*(2.0*rsum-d*xmz) dlx=max(dlx,xmz*d**2) r=r-d*v rsum=rsum-d*xmz 10621 continue if(dlx.lt.thr)goto 10562 if(nlp .le. maxit)goto 10641 jerr=-m return 10641 continue goto 10561 10562 continue jz=0 goto 10401 10402 continue deallocate(xv) return end subroutine get_int_parms2(epsnr,mxitnr) implicit double precision(a-h,o-z) data epsnr0,mxitnr0 /1.0d-6,25/ epsnr=epsnr0 mxitnr=mxitnr0 return entry chg_epsnr(arg) epsnr0=arg return entry chg_mxitnr(irg) mxitnr0=irg return end glmnet/src/glmnetpp/src/legacy/pb.c0000644000175000017500000000120313573515432017113 0ustar nileshnilesh#include #include /* The progress bar */ static SEXP pb; /* Save the progress bar */ SEXP storePB(SEXP tpb) { pb = tpb; return(R_NilValue); } /* Set Progress bar to value */ void F77_SUB(setpb)(int *val) { SEXP s, t; /* printf("%d\n", *val); */ t = s = PROTECT(allocList(3)); SET_TYPEOF(s, LANGSXP); SETCAR(t, install("setTxtProgressBar")); t = CDR(t); SETCAR(t, pb); SET_TAG(t, install("pb")); t = CDR(t); SETCAR(t, ScalarInteger(*val)); SET_TAG(t, install("value")); eval(s, R_GetCurrentEnv()); UNPROTECT(1); } /* Nullify progress bar, not really used */ void nullifyPB() { pb = NULL; } glmnet/src/glmnetpp/src/internal.cpp0000644000175000017500000000273714140040573017425 0ustar nileshnilesh#include namespace glmnetpp { double InternalParams::sml = 1e-5; double InternalParams::eps = 1e-6; double InternalParams::big = 9.9e35; int InternalParams::mnlam = 5; double InternalParams::rsqmax = 0.999; double InternalParams::pmin = 1e-9; double InternalParams::exmx = 250.0; int InternalParams::itrace = 0; // TODO: this interface is kinda terrible, // but need it to be compatible with current R interface. void get_int_parms(double& sml, double& eps, double& big, int& mnlam, double& rsqmax, double& pmin, double& exmx, int& itrace) { sml = InternalParams::sml; eps = InternalParams::eps; big = InternalParams::big; mnlam = InternalParams::mnlam; rsqmax = InternalParams::rsqmax; pmin = InternalParams::pmin; exmx = InternalParams::exmx; itrace = InternalParams::itrace; } void chg_fract_dev(double arg) { InternalParams::sml = arg; } void chg_min_flmin(double arg) { InternalParams::eps = arg; } void chg_dev_max(double arg) { InternalParams::rsqmax = arg; } void chg_big(double arg) { InternalParams::big = arg; } void chg_min_lambdas(int irg) { InternalParams::mnlam = irg; } void chg_min_null_prob(double arg) { InternalParams::pmin = arg; } void chg_max_exp(double arg) { InternalParams::exmx = arg; } void chg_itrace(int irg) { InternalParams::itrace = irg; } } // namespace glmnetpp glmnet/src/glmnetpp/clean-build.sh0000755000175000017500000000161714140040573017030 0ustar nileshnilesh#!/bin/bash # directory where current shell script resides PROJECTDIR=$(dirname "$BASH_SOURCE") cd "$PROJECTDIR" mode=$1 # debug/release mode shift # shift command-line arguments # the rest are cmake command-line arguments mkdir -p build && cd build # if debug directory does not exist, create it mkdir -p debug # if release directory does not exist, create it mkdir -p release # if debug mode if [ "$mode" = "debug" ]; then cd debug # if release mode elif [ "$mode" = "release" ]; then cd release else echo "Usage: ./clean-build.sh [cmake options]" 1>&2 exit 1 fi # directory with R include files R_HOME=$(Rscript -e "cat(Sys.getenv(\"R_HOME\"))") R_INCLUDE_DIR=$(Rscript -e "cat(Sys.getenv(\"R_INCLUDE_DIR\"))") R_LIB_DIR="$R_HOME/lib" rm -rf * cmake ../../ \ -DR_INCLUDE_DIR=$R_INCLUDE_DIR \ -DR_LIB_DIR=$R_LIB_DIR \ "$@" cmake --build . -- -j12 glmnet/src/glmnetpp/benchmark/0000755000175000017500000000000014140271174016242 5ustar nileshnileshglmnet/src/glmnetpp/benchmark/CMakeLists.txt0000644000175000017500000000127014140040573020777 0ustar nileshnilesh# All macro tests set( BENCHMARKS #lasso_stress_benchmark gaussian_cov_benchmark gaussian_naive_benchmark gaussian_benchmark ) foreach( benchmark ${BENCHMARKS} ) add_executable(${benchmark} ${CMAKE_CURRENT_SOURCE_DIR}/${benchmark}.cpp) target_compile_options(${benchmark} PRIVATE -std=c++17) target_include_directories(${benchmark} PRIVATE ${PROJECT_SOURCE_DIR}/test ${GLMNETPP_SOURCEDIR}) target_link_libraries(${benchmark} benchmark::benchmark_main ${PROJECT_NAME} legacy Eigen3::Eigen) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(${benchmark} pthread) endif() endforeach() glmnet/src/glmnetpp/benchmark/gaussian_cov_benchmark.cpp0000644000175000017500000001077114140040573023444 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { template struct gaussian_cov_fixture : benchmark::Fixture { using internal_t = ElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, double, int, int>; using elnet_point_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, internal_t>; using elnet_path_t = ElnetPath< util::glm_type::gaussian, util::mode_type::cov, elnet_point_t>; elnet_path_t elnet_path; Eigen::VectorXd xm, xs, xv; double ym = 0, ys = 0, flmin = 0.; double alpha = 1.0, thr = 1e-14; int ne, maxit = 100000, nlam = 100; Eigen::VectorXd ulam; int lmu, nlp, jerr; Eigen::MatrixXd ao; Eigen::VectorXd g, g_cache, rsqo, almo; Eigen::VectorXi ia, kin; void init(int p, int nx, int nlam) { lmu = 0; nlp = 0; jerr = 0; g.setZero(p); ao.setZero(nx, nlam); ia.setZero(p); kin.setZero(nlam); rsqo.setZero(nlam); almo.setZero(nlam); } void reset() { lmu = 0; nlp = 0; jerr = 0; ao.setZero(); ia.setZero(); kin.setZero(); rsqo.setZero(); almo.setZero(); } void run(benchmark::State& state) { int seed = 123124; int n = state.range(0); int p = state.range(1); DataGen dgen(seed); auto X = dgen.make_X(n, p); auto cl = dgen.make_cl(p); auto vp = dgen.make_vp(p); auto w = dgen.make_w(n); auto ju = dgen.make_ju(p); auto beta = dgen.make_beta(p); auto y = dgen.make_y(X, beta); Eigen::VectorXd g(p); g.setZero(); Eigen::VectorXd xv(p); xv.setOnes(); auto nx = dgen.make_nx(p); Eigen::VectorXi ia(nx); ia.setZero(); vp /= vp.sum() / p; Chkvars::eval(X, ju); xm.setZero(p); xs.setZero(p); xv.setZero(p); g_cache.setZero(p); init(p, nx, nlam); Standardize::eval(X, y, w, false, true, ju, g_cache, xm, xs, ym, ys, xv); cl /= ys; for (int j = 0; j < p; ++j) { cl.col(j) *= xs(j); } ne = p; state.counters["n"] = n; state.counters["p"] = p; for (auto _ : state) { state.PauseTiming(); reset(); g = g_cache; state.ResumeTiming(); if constexpr (do_glmnetpp) { elnet_path.fit(alpha, ju, vp, cl, g, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } else { int ni = X.cols(); int no = X.rows(); elnet1_(&alpha, &ni, ju.data(), vp.data(), cl.data(), g.data(), &no, &ne, &nx, X.data(), &nlam, &flmin, ulam.data(), &thr, &maxit, xv.data(), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } } } }; BENCHMARK_TEMPLATE_DEFINE_F( gaussian_cov_fixture, glmnetpp, true)(benchmark::State& state) { run(state); } BENCHMARK_TEMPLATE_DEFINE_F( gaussian_cov_fixture, legacy, false)(benchmark::State& state) { run(state); } BENCHMARK_REGISTER_F(gaussian_cov_fixture, glmnetpp) ->ArgsProduct({ //{100, 500, 1000, 2000}, //benchmark::CreateRange(2, 1<<11, 2) {1000}, benchmark::CreateRange(1<<5, 1<<11, 2) }) ; BENCHMARK_REGISTER_F(gaussian_cov_fixture, legacy) ->ArgsProduct({ //{100, 500, 1000, 2000}, //benchmark::CreateRange(2, 1<<11, 2) {1000}, benchmark::CreateRange(1<<5, 1<<11, 2) }) ; } // namespace glmnetpp glmnet/src/glmnetpp/benchmark/gaussian_naive_benchmark.cpp0000644000175000017500000001065214140040573023755 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { template struct gaussian_cov_fixture : benchmark::Fixture { using internal_t = ElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, double, int, int>; using elnet_point_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, internal_t>; using elnet_path_t = ElnetPath< util::glm_type::gaussian, util::mode_type::naive, elnet_point_t>; elnet_path_t elnet_path; Eigen::VectorXd xm, xs, xv; double ym = 0, ys = 0, flmin = 0.; double alpha = 1.0, thr = 1e-14; int ne, maxit = 100000, nlam = 100; Eigen::VectorXd ulam; int lmu, nlp, jerr; Eigen::MatrixXd ao; Eigen::VectorXd rsqo, almo; Eigen::VectorXi ia, kin; void init(int p, int nx, int nlam) { lmu = 0; nlp = 0; jerr = 0; ao.setZero(nx, nlam); ia.setZero(p); kin.setZero(nlam); rsqo.setZero(nlam); almo.setZero(nlam); } void reset() { lmu = 0; nlp = 0; jerr = 0; ao.setZero(); ia.setZero(); kin.setZero(); rsqo.setZero(); almo.setZero(); } void run(benchmark::State& state) { int seed = 123124; int n = state.range(0); int p = state.range(1); DataGen dgen(seed); auto X = dgen.make_X(n, p); auto cl = dgen.make_cl(p); auto vp = dgen.make_vp(p); auto w = dgen.make_w(n); auto ju = dgen.make_ju(p); auto beta = dgen.make_beta(p); auto y = dgen.make_y(X, beta); Eigen::VectorXd g(p); g.setZero(); Eigen::VectorXd xv(p); xv.setOnes(); auto nx = dgen.make_nx(p); Eigen::VectorXi ia(nx); ia.setZero(); vp /= vp.sum() / p; Chkvars::eval(X, ju); xm.setZero(p); xs.setZero(p); xv.setZero(p); init(p, nx, nlam); Standardize1::eval(X, y, w, false, true, ju, xm, xs, ym, ys, xv); cl /= ys; for (int j = 0; j < p; ++j) { cl.col(j) *= xs(j); } ne = p; state.counters["n"] = n; state.counters["p"] = p; for (auto _ : state) { state.PauseTiming(); reset(); state.ResumeTiming(); if constexpr (do_glmnetpp) { elnet_path.fit(alpha, ju, vp, cl, y, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } else { int ni = X.cols(); int no = X.rows(); elnet2_(&alpha, &ni, ju.data(), vp.data(), cl.data(), y.data(), &no, &ne, &nx, X.data(), &nlam, &flmin, ulam.data(), &thr, &maxit, xv.data(), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } } } }; BENCHMARK_TEMPLATE_DEFINE_F( gaussian_cov_fixture, glmnetpp, true)(benchmark::State& state) { run(state); } BENCHMARK_TEMPLATE_DEFINE_F( gaussian_cov_fixture, legacy, false)(benchmark::State& state) { run(state); } BENCHMARK_REGISTER_F(gaussian_cov_fixture, glmnetpp) ->ArgsProduct({ //{100, 500, 1000, 2000}, //benchmark::CreateRange(2, 1<<11, 2) {1000}, benchmark::CreateRange(1<<5, 1<<11, 2) }) ; BENCHMARK_REGISTER_F(gaussian_cov_fixture, legacy) ->ArgsProduct({ //{100, 500, 1000, 2000}, //benchmark::CreateRange(2, 1<<11, 2) {1000}, benchmark::CreateRange(1<<5, 1<<11, 2) }) ; } // namespace glmnetpp glmnet/src/glmnetpp/benchmark/analyze/0000755000175000017500000000000014140271174017705 5ustar nileshnileshglmnet/src/glmnetpp/benchmark/analyze/analyze.py0000644000175000017500000000244114140040573021720 0ustar nileshnilesh# A simple script to run benchmark programs and visualize the data. # Assumes that the benchmark program has already been built. import argparse import matplotlib.pyplot as plt import path_names import analyze_set_vs_vector_loop as asvvl import analyze_lasso_stress_benchmark as alsb import analyze_gaussian_benchmark as agb parser = argparse.ArgumentParser(description='Collects data and produces plots of benchmark programs.') parser.add_argument('bench_names', nargs='*', help='list of benchmark program names to analyze.') parser.add_argument('-a', action='store_const', const=True, help='analyze all benchmark programs in build/release/benchmark.') args = parser.parse_args() if len(args.bench_names) == 0 and not args.a: raise RuntimeError( 'At least one benchmark name must be specified if -a is not specified.') # Dictionary of bench name to module name bench_to_module = { asvvl.TESTNAME : asvvl, alsb.TESTNAME : alsb, agb.TESTNAME : agb } mods = [bench_to_module[bench_name] for bench_name in args.bench_names] for mod in mods: mod.plot(mod.run(path_names.bench_dir, path_names.data_dir, path_names.ref_dir, path_names.data_scr_dir), path_names.fig_dir) glmnet/src/glmnetpp/benchmark/analyze/path_names.py0000644000175000017500000000023614140040573022374 0ustar nileshnileshfig_dir = '../../docs/figs' bench_dir = '../../build/release/benchmark' data_dir = '../../docs/data' ref_dir = '../reference' data_scr_dir = '../data/script' glmnet/src/glmnetpp/benchmark/analyze/analyze_gaussian_benchmark.py0000644000175000017500000000477114140040573025634 0ustar nileshnileshimport io import os import matplotlib.pyplot as plt import pandas as pd import numpy as np from subprocess import check_output TESTNAME = 'gaussian_benchmark' n = 1000 ps = 2**(np.linspace(1, 14, 14)).astype(int) def plot(df, fig_dir): grouped = df.groupby(['ka', 'sp']) ncols=2 nrows = int(np.ceil(grouped.ngroups/ncols)) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(12,6), sharey=True) for (key, ax) in zip(grouped.groups.keys(), axes.flatten()): print(grouped.get_group(key)) grouped.get_group(key).plot(ax=ax, x='p', y='relative') ax.set_title('N=1000, ka={ka}, sp={sp}'.format(ka=key[0], sp=key[1])) ax.legend() ax.set_ylabel('Time (s)') plt.tight_layout() plt.savefig(os.path.join(fig_dir, TESTNAME + '_fig.png')) # Run benchmark # bench_dir directory to glmnetpp benchmark program (e.g. build/release/benchmark) # data_dir directory to store our timing data (e.g. docs/data) # ref_dir directory to reference (glmnet) program for comparison (e.g. benchmark/reference) # data_scr_dir directory to scripts that generate data (e.g. benchmark/data/script) # gen boolean whether to generate data or not def run(bench_dir, data_dir, ref_dir, data_scr_dir, gen=False): df = pd.DataFrame() # save current working directory cur_path = os.getcwd() # change directory to glmnetpp benchmark location os.chdir(bench_dir) bench_path = os.path.join('.', TESTNAME) print('Benchmark path: {p}'.format(p=bench_path)) # run our benchmark and get output args = (bench_path, "--benchmark_format=csv") data = io.StringIO(check_output(args).decode("utf-8")) os.chdir(cur_path) df_bench = pd.read_csv(data, sep=',') n = df_bench['n'][0] # assume n is constant throughout df = df_bench[['p', 'ka', 'sp', 'real_time']] df_glmnet = df[df_bench['glmnetpp'] == 0] df_glmnetpp = df[df_bench['glmnetpp'] == 1] df_glmnet.set_index(['p', 'ka', 'sp'], inplace=True) df_glmnetpp.set_index(['p', 'ka', 'sp'], inplace=True) df = pd.concat([df_glmnet, df_glmnetpp], axis=1) df.columns = ['glmnet', 'glmnetpp'] df *= 1e-9 df.reset_index(inplace=True) df['relative'] = df['glmnet'] / df['glmnetpp'] # save absolute time data_path = os.path.join(data_dir, TESTNAME + ".csv") df.to_csv(data_path) return df if __name__ == '__main__': import path_names as pn df = pd.read_csv(os.path.join(pn.data_dir, TESTNAME + ".csv")) plot(df, pn.fig_dir) glmnet/src/glmnetpp/benchmark/gaussian_benchmark.cpp0000644000175000017500000002043614140040573022574 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { template struct gaussian_fixture : benchmark::Fixture { using elnet_driver_t = ElnetDriver; elnet_driver_t elnet_driver; bool isd = true, intr = true; double flmin = 0.; double alpha = 1.0, thr = 1e-14; int maxit = 100000, nlam = 100; Eigen::VectorXd ulam; int lmu, nlp, jerr; Eigen::VectorXd y, y_cache, w, w_cache, a0; Eigen::MatrixXd X, X_cache, ca, cl, cl_cache; Eigen::VectorXd rsq, alm; Eigen::VectorXi ia, nin; void init(int p, int nx, int nlam) { lmu = 0; nlp = 0; jerr = 0; a0.setZero(nlam); ca.setZero(nx, nlam); ia.setZero(p); nin.setZero(nlam); rsq.setZero(nlam); alm.setZero(nlam); } void reset() { lmu = 0; nlp = 0; jerr = 0; X = X_cache; y = y_cache; w = w_cache; cl = cl_cache; a0.setZero(); ca.setZero(); ia.setZero(); nin.setZero(); rsq.setZero(); alm.setZero(); } void run(benchmark::State& state) { int seed = 123124; int n = state.range(0); int p = state.range(1); bool ka = state.range(2); DataGen dgen(seed); X_cache = dgen.make_X(n, p); cl_cache = dgen.make_cl(p); auto vp = dgen.make_vp(p); w_cache = dgen.make_w(n); auto jd = dgen.make_jd(p); auto beta = dgen.make_beta(p); y_cache = dgen.make_y(X_cache, beta); auto ne = dgen.make_ne(p); auto nx = dgen.make_nx(p); init(p, nx, nlam); state.counters["glmnetpp"] = do_glmnetpp; state.counters["ka"] = ka; state.counters["n"] = n; state.counters["p"] = p; state.counters["sp"] = false; for (auto _ : state) { state.PauseTiming(); reset(); state.ResumeTiming(); if constexpr (do_glmnetpp) { elnet_driver.fit(ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr, mock_setpb, InternalParams()); } else { int ika = ka + 1; int ni = X.cols(); int no = X.rows(); int iisd = isd; int iintr = intr; ::elnet_( &ika, &alpha, &no, &ni, X.data(), y.data(), w.data(), jd.data(), vp.data(), cl.data(), &ne, &nx, &nlam, &flmin, ulam.data(), &thr, &iisd, &iintr, &maxit, &lmu, a0.data(), ca.data(), ia.data(), nin.data(), rsq.data(), alm.data(), &nlp, &jerr); } } } }; BENCHMARK_TEMPLATE_DEFINE_F( gaussian_fixture, glmnetpp, true)(benchmark::State& state) { run(state); } BENCHMARK_TEMPLATE_DEFINE_F( gaussian_fixture, legacy, false)(benchmark::State& state) { run(state); } BENCHMARK_REGISTER_F(gaussian_fixture, glmnetpp) ->ArgsProduct({ {1000}, benchmark::CreateRange(1<<5, 1<<11, 2), {true, false} }) ; BENCHMARK_REGISTER_F(gaussian_fixture, legacy) ->ArgsProduct({ {1000}, benchmark::CreateRange(1<<5, 1<<11, 2), {true, false} }) ; // ========================================================================== template struct sp_gaussian_fixture : benchmark::Fixture { using elnet_driver_t = ElnetDriver; elnet_driver_t elnet_driver; bool isd = true, intr = true; double flmin = 0.; double alpha = 1.0, thr = 1e-14; int maxit = 100000, nlam = 100; Eigen::VectorXd ulam; int lmu, nlp, jerr; Eigen::VectorXd y, y_cache, w, w_cache, a0; Eigen::SparseMatrix X, X_cache; Eigen::MatrixXd ca, cl, cl_cache; Eigen::VectorXd rsq, alm; Eigen::VectorXi ia, nin, x_inner, x_outer; void init(int p, int nx, int nlam) { lmu = 0; nlp = 0; jerr = 0; a0.setZero(nlam); ca.setZero(nx, nlam); ia.setZero(p); nin.setZero(nlam); rsq.setZero(nlam); alm.setZero(nlam); } void reset() { lmu = 0; nlp = 0; jerr = 0; X = X_cache; X.makeCompressed(); x_inner = make_sp_inner_idx_1idx(X); x_outer = make_sp_outer_idx_1idx(X); y = y_cache; w = w_cache; cl = cl_cache; a0.setZero(); ca.setZero(); ia.setZero(); nin.setZero(); rsq.setZero(); alm.setZero(); } void run(benchmark::State& state) { int seed = 123124; int n = state.range(0); int p = state.range(1); bool ka = state.range(2); DataGen dgen(seed); X_cache = dgen.make_X_sparse(n, p, 0.9); cl_cache = dgen.make_cl(p); auto vp = dgen.make_vp(p); w_cache = dgen.make_w(n); auto jd = dgen.make_jd(p); auto beta = dgen.make_beta(p); y_cache = dgen.make_y(X_cache, beta); auto ne = dgen.make_ne(p); auto nx = dgen.make_nx(p); init(p, nx, nlam); state.counters["glmnetpp"] = do_glmnetpp; state.counters["ka"] = ka; state.counters["n"] = n; state.counters["p"] = p; state.counters["sp"] = true; for (auto _ : state) { state.PauseTiming(); reset(); state.ResumeTiming(); if constexpr (do_glmnetpp) { elnet_driver.fit(ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr, mock_setpb, InternalParams()); } else { int ika = ka + 1; int ni = X.cols(); int no = X.rows(); int iisd = isd; int iintr = intr; ::spelnet_( &ika, &alpha, &no, &ni, X.valuePtr(), x_outer.data(), x_inner.data(), y.data(), w.data(), jd.data(), vp.data(), cl.data(), &ne, &nx, &nlam, &flmin, ulam.data(), &thr, &iisd, &iintr, &maxit, &lmu, a0.data(), ca.data(), ia.data(), nin.data(), rsq.data(), alm.data(), &nlp, &jerr); } } } }; BENCHMARK_TEMPLATE_DEFINE_F( sp_gaussian_fixture, glmnetpp, true)(benchmark::State& state) { run(state); } BENCHMARK_TEMPLATE_DEFINE_F( sp_gaussian_fixture, legacy, false)(benchmark::State& state) { run(state); } BENCHMARK_REGISTER_F(sp_gaussian_fixture, glmnetpp) ->ArgsProduct({ {1000}, benchmark::CreateRange(1<<5, 1<<11, 2), {true, false} }) ; BENCHMARK_REGISTER_F(sp_gaussian_fixture, legacy) ->ArgsProduct({ {1000}, benchmark::CreateRange(1<<5, 1<<11, 2), {true, false} }) ; } // namespace glmnetpp glmnet/src/glmnetpp/test/0000755000175000017500000000000014140040573015264 5ustar nileshnileshglmnet/src/glmnetpp/test/translation/0000755000175000017500000000000014140040573017622 5ustar nileshnileshglmnet/src/glmnetpp/test/translation/elnet2_unittest.cpp0000644000175000017500000000550614140040573023464 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct Elnet2Pack: ElnetBasePack { Eigen::VectorXd y; const Eigen::MatrixXd& X; Elnet2Pack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, Eigen::VectorXd& _xv, Eigen::VectorXd& _ulam, Eigen::VectorXd& _vp, Eigen::MatrixXd& _cl, Eigen::VectorXi& _ju) : ElnetBasePack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , y(_y) , X(_X) {} void fit() override { transl::elnet2( alpha, ju, vp, cl, y, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } void fit_legacy() override { int ni = X.cols(); int no = X.rows(); elnet2_(const_cast(&alpha), &ni, const_cast(ju.data()), const_cast(vp.data()), const_cast(cl.data()), y.data(), &no, const_cast(&ne), const_cast(&nx), const_cast(X.data()), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), const_cast(&maxit), const_cast(xv.data()), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } }; struct elnet2_fixture : elnet_base_fixture { protected: using base_t = elnet_base_fixture; void check_pack(const Elnet2Pack& actual, const Elnet2Pack& expected) { base_t::check_pack(actual, expected); expect_float_eq_vec(actual.y, expected.y); } }; TEST_P(elnet2_fixture, elnet2_test) { Elnet2Pack actual( maxit, nx, ne, nlam, alpha, flmin, X, y, xv, ulam, vp, cl, ju); Elnet2Pack expected(actual); run(actual, expected, 4, 5); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( Elnet2Suite, elnet2_fixture, testing::Combine( testing::Values(241, 412, 23968, 31), // seed testing::Values(10, 30, 50), // n testing::Values(5, 20, 40), // p testing::Values(1, 50), // maxit testing::Values(1, 4), // nlam testing::Values(0.0, 0.5, 1.0), // alpha testing::Values(0.5, 1.0, 1.5) // flmin ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/spelnet_unittest.cpp0000644000175000017500000001431414140040573023742 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct SpElnetPack { const double thr = 1e-14; const int maxit, nx, ne, nlam; const double alpha, flmin; const bool isd, intr, ka; Eigen::SparseMatrix X; Eigen::VectorXd y; Eigen::VectorXd w; Eigen::MatrixXd cl; Eigen::MatrixXd ca; Eigen::VectorXd a0; Eigen::VectorXi ia; Eigen::VectorXi nin; Eigen::VectorXd rsq; Eigen::VectorXd alm; int nlp = 0, jerr = 0, lmu = 0; const Eigen::VectorXd& ulam; const Eigen::VectorXd& vp; const Eigen::VectorXi& jd; SpElnetPack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, bool _isd, bool _intr, bool _ka, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _jd) : maxit(_maxit), nx(_nx), ne(_ne), nlam(_nlam), alpha(_alpha) , flmin(_flmin), isd(_isd), intr(_intr), ka(_ka) , X(_X) , y(_y) , w(_w) , cl(_cl) , ca(_nx, _nlam) , a0(nlam) , ia(_nx) , nin(_nlam) , rsq(_nlam) , alm(_nlam) , ulam(_ulam) , vp(_vp) , jd(_jd) { ca.setZero(); a0.setZero(); ia.setZero(); nin.setZero(); rsq.setZero(); alm.setZero(); } void fit() { transl::spelnet( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr); } void fit_legacy() { int no = X.rows(); int ni = X.cols(); int iisd = isd; int iintr = intr; int ika = ka + 1; auto x_inner = make_sp_inner_idx_1idx(X); auto x_outer = make_sp_outer_idx_1idx(X); ::spelnet_( &ika, const_cast(&alpha), &no, &ni, X.valuePtr(), x_outer.data(), x_inner.data(), y.data(), w.data(), const_cast(jd.data()), const_cast(vp.data()), cl.data(), const_cast(&ne), const_cast(&nx), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), &iisd, &iintr, const_cast(&maxit), &lmu, a0.data(), ca.data(), ia.data(), nin.data(), rsq.data(), alm.data(), &nlp, &jerr); } }; struct sp_elnet_fixture : base_fixture , testing::WithParamInterface< std::tuple > { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X_sparse(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); jd = dgen.make_jd(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); } protected: Eigen::SparseMatrix X; Eigen::MatrixXd cl; Eigen::VectorXd y, w, ulam, vp; Eigen::VectorXi jd; int nx, ne, maxit, nlam; double alpha, flmin; bool isd, intr, ka; void check_pack(const SpElnetPack& actual, const SpElnetPack& expected) { Eigen::MatrixXd actual_X_dense = actual.X; Eigen::MatrixXd expected_X_dense = expected.X; expect_float_eq_mat(actual_X_dense, expected_X_dense); expect_float_eq_vec(actual.w, expected.w); // Fortran version uses uninitialized values in columns that are omitted (inside ju). // My version sets them to 0. // As a heuristic, if my cl contains (0,0) in a column, // we will assume that's an omitted feature, so we skip the check. // Otherwise, valgrind will complain. for (int j = 0; j < actual.cl.cols(); ++j) { if ((actual.cl.col(j).array() == 0).all()) continue; for (int i = 0; i < actual.cl.rows(); ++i) { EXPECT_FLOAT_EQ(actual.cl(i,j), expected.cl(i,j)); } } expect_near_vec(actual.ia, expected.ia, 1); expect_eq_vec(actual.nin, expected.nin); expect_float_eq_mat(actual.ca, expected.ca); expect_float_eq_vec(actual.a0, expected.a0); expect_float_eq_vec(actual.y, expected.y); expect_float_eq_vec(actual.rsq, expected.rsq); expect_float_eq_vec(actual.alm, expected.alm); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); EXPECT_EQ(actual.lmu, expected.lmu); } }; TEST_P(sp_elnet_fixture, sp_elnet_test) { SpElnetPack actual( maxit, nx, ne, nlam, alpha, flmin, isd, intr, ka, X, y, w, ulam, vp, cl, jd); SpElnetPack expected(actual); std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_legacy(); }); set_affinity(0, actual_thr.native_handle()); set_affinity(1, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpElnetSuite, sp_elnet_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka testing::Values(241, 412, 23968), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5), testing::Bool(), testing::Bool(), testing::Bool() ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/elnet1_unittest.cpp0000644000175000017500000000557414140040573023470 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct Elnet1Pack : ElnetBasePack { Eigen::VectorXd g; const Eigen::MatrixXd& X; Elnet1Pack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _xv, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _ju) : ElnetBasePack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , g(_X.transpose() * _y) , X(_X) {} void fit() override { transl::elnet1( alpha, ju, vp, cl, g, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } void fit_legacy() override { int ni = X.cols(); int no = X.rows(); elnet1_(const_cast(&alpha), &ni, const_cast(ju.data()), const_cast(vp.data()), const_cast(cl.data()), g.data(), &no, const_cast(&ne), const_cast(&nx), const_cast(X.data()), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), const_cast(&maxit), const_cast(xv.data()), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } }; struct elnet1_fixture : elnet_base_fixture { protected: using base_t = elnet_base_fixture; void check_pack(const Elnet1Pack& actual, const Elnet1Pack& expected) { base_t::check_pack(actual, expected); expect_float_eq_vec(actual.g, expected.g); } }; TEST_P(elnet1_fixture, elnet1_test) { Elnet1Pack actual( maxit, nx, ne, nlam, alpha, flmin, X, y, xv, ulam, vp, cl, ju); Elnet1Pack expected(actual); run(actual, expected, 2, 3); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( Elnet1Suite, elnet1_fixture, testing::Combine( testing::Values(241, 412, 23968, 31), // seed testing::Values(10, 30, 50), // n testing::Values(5, 20, 40), // p testing::Values(1, 50), // maxit testing::Values(1, 4), // nlam testing::Values(0.0, 0.5, 1.0), // alpha testing::Values(0.5, 1.0, 1.5) // flmin ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/spelnet1_unittest.cpp0000644000175000017500000000713214140040573024023 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct SpElnet1Pack : ElnetBasePack { Eigen::VectorXd g; const Eigen::SparseMatrix& X; const Eigen::VectorXd& w; const Eigen::VectorXd& xm; const Eigen::VectorXd& xs; SpElnet1Pack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _w, const Eigen::VectorXd& _xm, const Eigen::VectorXd& _xs, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _xv, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _ju) : ElnetBasePack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , g(_X.transpose() * _y) , X(_X) , w(_w) , xm(_xm) , xs(_xs) {} void fit() override { transl::spelnet1( alpha, ju, vp, cl, g, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } void fit_legacy() override { int ni = X.cols(); int no = X.rows(); // for god-knows what reason, spelnet1 requires 1-indexed inner and outer indices :( auto x_inner = make_sp_inner_idx_1idx(X); auto x_outer = make_sp_outer_idx_1idx(X); spelnet1_(const_cast(&alpha), &ni, g.data(), &no, const_cast(w.data()), const_cast(&ne), const_cast(&nx), const_cast(X.valuePtr()), x_outer.data(), x_inner.data(), const_cast(ju.data()), const_cast(vp.data()), const_cast(cl.data()), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), const_cast(&maxit), const_cast(xm.data()), const_cast(xs.data()), const_cast(xv.data()), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } }; struct spelnet1_fixture : spelnet_base_fixture { protected: using base_t = spelnet_base_fixture; void check_pack(const SpElnet1Pack& actual, const SpElnet1Pack& expected) { base_t::check_pack(actual, expected); expect_float_eq_vec(actual.g, expected.g); } }; TEST_P(spelnet1_fixture, spelnet1_test) { SpElnet1Pack actual( maxit, nx, ne, nlam, alpha, flmin, w, xm ,xs, X, y, xv, ulam, vp, cl, ju); SpElnet1Pack expected(actual); run(actual, expected, 8, 9); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpElnet1Suite, spelnet1_fixture, testing::Combine( testing::Values(241, 412, 23968, 31), // seed testing::Values(10, 30, 50), // n testing::Values(5, 20, 40), // p testing::Values(1, 50), // maxit testing::Values(1, 4), // nlam testing::Values(0.0, 0.5, 1.0), // alpha testing::Values(0.5, 1.0, 1.5) // flmin ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/elnet_base_fixture.hpp0000644000175000017500000001062414140040573024205 0ustar nileshnilesh#pragma once #include #include #include #include // separately unit-tested namespace glmnetpp { struct ElnetBasePack { const double thr = 1e-14; const int maxit, nx, ne, nlam; const double alpha, flmin; // will be modified Eigen::MatrixXd ao; Eigen::VectorXi ia; Eigen::VectorXi kin; Eigen::VectorXd rsqo; Eigen::VectorXd almo; int nlp = 0, jerr = 0, lmu = 0; const Eigen::VectorXd& xv; const Eigen::VectorXd& ulam; const Eigen::VectorXd& vp; const Eigen::MatrixXd& cl; const Eigen::VectorXi& ju; ElnetBasePack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _xv, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _ju) : maxit(_maxit), nx(_nx), ne(_ne), nlam(_nlam) , alpha(_alpha), flmin(_flmin), xv(_xv), ulam(_ulam) , vp(_vp), cl(_cl), ju(_ju) { int p = xv.size(); ao.setZero(nx, nlam); ia.setZero(p); kin.setZero(nlam); rsqo.setZero(nlam); almo.setZero(nlam); } virtual void fit() =0; virtual void fit_legacy() =0; }; struct elnet_base_fixture: base_fixture, testing::WithParamInterface< std::tuple > { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); auto w = dgen.make_w(n); ju = dgen.make_ju(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); xv.setOnes(p); ia.setZero(p); Eigen::VectorXd xm(p), xs(p); double ym = 0, ys = 0; Standardize1::eval(X, y, w, 1, 1, ju, xm, xs, ym, ys, xv); } protected: Eigen::MatrixXd X, cl; Eigen::VectorXd y, xv, ulam, vp; Eigen::VectorXi ju, ia; int nx, ne, maxit, nlam; double alpha, flmin; void run(ElnetBasePack& actual, ElnetBasePack& expected, int core1, int core2) const { std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_legacy(); }); set_affinity(core1, actual_thr.native_handle()); set_affinity(core2, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); } void check_pack(const ElnetBasePack& actual, const ElnetBasePack& expected) const { EXPECT_EQ(actual.lmu, expected.lmu); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); expect_eq_vec(actual.kin, expected.kin); // Legacy is 1-indexed, so ia should be shifted by 1. // Only applies up to the indicies corresponding to active variables. // The best I can think of testing this is that the absolute distance is off by at most 1. expect_near_vec(actual.ia, expected.ia, 1); expect_float_eq_mat(actual.ao, expected.ao); expect_float_eq_vec(actual.rsqo, expected.rsqo); expect_float_eq_vec(actual.almo, expected.almo); } }; struct spelnet_base_fixture : elnet_base_fixture { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X_sparse(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); ju = dgen.make_ju(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); xv.setOnes(p); ia.setZero(p); Eigen::VectorXd xv(p); xm.setZero(p); xs.setZero(p); double ym, ys; SpStandardize1::eval(X, y, w, true, true, ju, xm, xs, ym, ys, xv); } protected: Eigen::SparseMatrix X; Eigen::VectorXd xm, xs, w; }; } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/elnet_unittest.cpp0000644000175000017500000001353614140040573023404 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct ElnetPack { const double thr = 1e-14; const int maxit, nx, ne, nlam; const double alpha, flmin; const bool isd, intr, ka; Eigen::MatrixXd X; Eigen::VectorXd y; Eigen::VectorXd w; Eigen::MatrixXd cl; Eigen::MatrixXd ca; Eigen::VectorXd a0; Eigen::VectorXi ia; Eigen::VectorXi nin; Eigen::VectorXd rsq; Eigen::VectorXd alm; int nlp = 0, jerr = 0, lmu = 0; const Eigen::VectorXd& ulam; const Eigen::VectorXd& vp; const Eigen::VectorXi& jd; ElnetPack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, bool _isd, bool _intr, bool _ka, const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _jd) : maxit(_maxit), nx(_nx), ne(_ne), nlam(_nlam), alpha(_alpha) , flmin(_flmin), isd(_isd), intr(_intr), ka(_ka) , X(_X) , y(_y) , w(_w) , cl(_cl) , ca(_nx, _nlam) , a0(nlam) , ia(_nx) , nin(_nlam) , rsq(_nlam) , alm(_nlam) , ulam(_ulam) , vp(_vp) , jd(_jd) { ca.setZero(); a0.setZero(); ia.setZero(); nin.setZero(); rsq.setZero(); alm.setZero(); } void fit() { transl::elnet( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr); } void fit_legacy() { int no = X.rows(); int ni = X.cols(); int iisd = isd; int iintr = intr; int ika = ka + 1; ::elnet_( &ika, const_cast(&alpha), &no, &ni, X.data(), y.data(), w.data(), const_cast(jd.data()), const_cast(vp.data()), cl.data(), const_cast(&ne), const_cast(&nx), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), &iisd, &iintr, const_cast(&maxit), &lmu, a0.data(), ca.data(), ia.data(), nin.data(), rsq.data(), alm.data(), &nlp, &jerr); } }; struct elnet_fixture : base_fixture , testing::WithParamInterface< std::tuple > { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); jd = dgen.make_jd(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); } protected: Eigen::MatrixXd X, cl; Eigen::VectorXd y, w, ulam, vp; Eigen::VectorXi jd; int nx, ne, maxit, nlam; double alpha, flmin; bool isd, intr, ka; void check_pack(const ElnetPack& actual, const ElnetPack& expected) { expect_float_eq_mat(actual.X, expected.X); expect_float_eq_vec(actual.w, expected.w); // Fortran version uses uninitialized values in columns that are omitted (inside ju). // My version sets them to 0. // As a heuristic, if my cl contains (0,0) in a column, // we will assume that's an omitted feature, so we skip the check. // Otherwise, valgrind will complain. for (int j = 0; j < actual.cl.cols(); ++j) { if ((actual.cl.col(j).array() == 0).all()) continue; for (int i = 0; i < actual.cl.rows(); ++i) { EXPECT_FLOAT_EQ(actual.cl(i,j), expected.cl(i,j)); } } expect_near_vec(actual.ia, expected.ia, 1); expect_eq_vec(actual.nin, expected.nin); expect_float_eq_mat(actual.ca, expected.ca); expect_float_eq_vec(actual.a0, expected.a0); expect_float_eq_vec(actual.y, expected.y); expect_float_eq_vec(actual.rsq, expected.rsq); expect_float_eq_vec(actual.alm, expected.alm); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); EXPECT_EQ(actual.lmu, expected.lmu); } }; TEST_P(elnet_fixture, elnet_test) { ElnetPack actual( maxit, nx, ne, nlam, alpha, flmin, isd, intr, ka, X, y, w, ulam, vp, cl, jd); ElnetPack expected(actual); std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_legacy(); }); set_affinity(0, actual_thr.native_handle()); set_affinity(1, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( ElnetSuite, elnet_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka testing::Values(241, 412, 23968), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5), testing::Bool(), testing::Bool(), testing::Bool() ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/translation/spelnet2_unittest.cpp0000644000175000017500000000711414140040573024024 0ustar nileshnilesh#include #include #include namespace glmnetpp { struct SpElnet2Pack : ElnetBasePack { Eigen::VectorXd y; const Eigen::SparseMatrix& X; const Eigen::VectorXd& w; const Eigen::VectorXd& xm; const Eigen::VectorXd& xs; SpElnet2Pack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _w, const Eigen::VectorXd& _xm, const Eigen::VectorXd& _xs, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _xv, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _ju) : ElnetBasePack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , y(_y) , X(_X) , w(_w) , xm(_xm) , xs(_xs) {} void fit() override { transl::spelnet2( alpha, ju, vp, cl, y, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } void fit_legacy() override { int ni = X.cols(); int no = X.rows(); // for god-knows what reason, spelnet2 requires 1-indexed inner and outer indices :( auto x_inner = make_sp_inner_idx_1idx(X); auto x_outer = make_sp_outer_idx_1idx(X); spelnet2_(const_cast(&alpha), &ni, y.data(), const_cast(w.data()), &no, const_cast(&ne), const_cast(&nx), const_cast(X.valuePtr()), x_outer.data(), x_inner.data(), const_cast(ju.data()), const_cast(vp.data()), const_cast(cl.data()), const_cast(&nlam), const_cast(&flmin), const_cast(ulam.data()), const_cast(&thr), const_cast(&maxit), const_cast(xm.data()), const_cast(xs.data()), const_cast(xv.data()), &lmu, ao.data(), ia.data(), kin.data(), rsqo.data(), almo.data(), &nlp, &jerr); } }; struct spelnet2_fixture : spelnet_base_fixture { protected: using base_t = spelnet_base_fixture; void check_pack(const SpElnet2Pack& actual, const SpElnet2Pack& expected) { base_t::check_pack(actual, expected); expect_float_eq_vec(actual.y, expected.y); } }; TEST_P(spelnet2_fixture, spelnet2_test) { SpElnet2Pack actual( maxit, nx, ne, nlam, alpha, flmin, w, xm ,xs, X, y, xv, ulam, vp, cl, ju); SpElnet2Pack expected(actual); run(actual, expected, 10, 11); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpElnet2Suite, spelnet2_fixture, testing::Combine( testing::Values(241, 412, 23968, 31), // seed testing::Values(10, 30, 50), // n testing::Values(5, 20, 40), // p testing::Values(1, 50), // maxit testing::Values(1, 4), // nlam testing::Values(0.0, 0.5, 1.0), // alpha testing::Values(0.5, 1.0, 1.5) // flmin ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/0000755000175000017500000000000014140040573017141 5ustar nileshnileshglmnet/src/glmnetpp/test/testutil/data_util.hpp0000644000175000017500000001323114140040573021620 0ustar nileshnilesh#pragma once #include #include #include #include #include #include namespace glmnetpp { inline Eigen::MatrixXd read_csv(const std::string& filename) { std::vector matrixEntries; std::ifstream matrixDataFile(filename); std::string matrixRowString; std::string matrixEntry; int matrixRowNumber = 0; while (std::getline(matrixDataFile, matrixRowString)) { std::stringstream matrixRowStringStream(matrixRowString); while (std::getline(matrixRowStringStream, matrixEntry, ',')) { matrixEntries.push_back(stod(matrixEntry)); } matrixRowNumber++; } // here we convet the vector variable into the matrix and return the resulting object, // note that matrixEntries.data() is the pointer to the first memory location at which the entries of the vector matrixEntries are stored; return Eigen::Map>( matrixEntries.data(), matrixRowNumber, matrixEntries.size() / matrixRowNumber); } // Center and standardize a matrix with 0 mean columns and scaled by var/n each column template inline Eigen::Matrix center_scale(const Eigen::Matrix& X) { Eigen::Matrix out(X.rows(), X.cols()); auto n = X.rows(); for (int i = 0; i < X.cols(); ++i) { out.col(i) = X.col(i).array() - X.col(i).mean(); out.col(i) /= out.col(i).norm() / std::sqrt(n); } return out; } struct DataGen { DataGen(size_t seed) : gen(seed) {} auto make_X(size_t n, size_t p) { std::normal_distribution norm(0., 1.); Eigen::MatrixXd X = Eigen::MatrixXd::NullaryExpr( n, p, [&](auto, auto) { return norm(gen); }); return X; } auto make_X_sparse(size_t n, size_t p, double sparsity=0.4) { Eigen::SparseMatrix X(n, p); std::normal_distribution norm(0., 1.); std::bernoulli_distribution bern(sparsity); for (size_t j = 0; j < p; ++j) { X.coeffRef(0, j) = norm(gen); // always make sure first row is non-zero so that stddev > 0 for (size_t i = 1; i < n; ++i) { if (bern(gen)) X.coeffRef(i, j) = norm(gen); } } X.makeCompressed(); return X; } auto make_beta(size_t p, double sparsity=0.5) { std::bernoulli_distribution bern_sp(sparsity); Eigen::VectorXd beta = Eigen::VectorXd::NullaryExpr( p, [&](auto) { return bern_sp(gen); }); return beta; } auto make_y(const Eigen::MatrixXd& X, const Eigen::VectorXd& beta) { std::normal_distribution norm(0., 1.); Eigen::VectorXd y; auto n = X.rows(); y = X * beta + y.NullaryExpr(n, [&](auto) { return norm(gen); }); auto ym = y.mean(); auto ys = std::sqrt((y.array() - y.mean()).square().sum()); y.array() -= ym; y /= ys; return y; } auto make_w(size_t n) { std::bernoulli_distribution bern_half(0.5); Eigen::VectorXd weights; weights = weights.NullaryExpr(n, [&](auto) { return bern_half(gen) + 1; }); weights /= weights.sum(); return weights; } auto make_ju(size_t p, double inclusion_rate=0.99) { std::bernoulli_distribution bern_in(inclusion_rate); Eigen::VectorXi inclusion; inclusion = inclusion.NullaryExpr(p, [&](auto) { return bern_in(gen); }); return inclusion; } auto make_jd(size_t p) { std::uniform_int_distribution<> mult(1, p); auto n_jd = mult(gen); Eigen::VectorXi jd(n_jd+1); jd = Eigen::VectorXi::NullaryExpr(n_jd+1, [&](auto){ return mult(gen); }); jd[0] = n_jd; return jd; } auto make_vp(size_t p) { std::uniform_int_distribution<> mult(1, p); Eigen::VectorXd vp; vp = vp.NullaryExpr(p, [&](auto) { return mult(gen); }); vp /= vp.sum() / p; return vp; } auto make_cl(size_t p) { std::normal_distribution norm(0., 1.); Eigen::MatrixXd cl; cl = Eigen::MatrixXd::NullaryExpr(2, p, [&](auto i, auto) { if (i == 0) return -100 * std::abs(norm(gen)); else return 100 * std::abs(norm(gen)); }); return cl; } auto make_nx(size_t p) { std::uniform_int_distribution<> mult(1, p); return mult(gen); } auto make_ne(size_t p) { std::uniform_int_distribution<> mult(1, p); return mult(gen); } auto make_ulam(size_t nlam) { std::uniform_real_distribution unif(0.0001, 2.); Eigen::VectorXd ulam; ulam = ulam.NullaryExpr(nlam, [&](auto){ return unif(gen); }); std::sort(ulam.data(), ulam.data() + ulam.size(), std::greater()); return ulam; } private: std::mt19937 gen; }; // Make sparse matrix inner index array 1-indexed. template inline auto make_sp_inner_idx_1idx(const SparseMatType& X) { Eigen::VectorXi x_inner(X.nonZeros()); for (int i = 0; i < x_inner.size(); ++i) { x_inner[i] = X.innerIndexPtr()[i] + 1; } return x_inner; } // Make sparse matrix outer index array 1-indexed. template inline auto make_sp_outer_idx_1idx(const SparseMatType& X) { Eigen::VectorXi x_outer(X.cols() + 1); for (int i = 0; i < x_outer.size(); ++i) { x_outer[i] = X.outerIndexPtr()[i] + 1; } return x_outer; } } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/0000755000175000017500000000000014140040573021477 5ustar nileshnileshglmnet/src/glmnetpp/test/testutil/translation/elnet2.hpp0000644000175000017500000001767514140040573023421 0ustar nileshnilesh#pragma once #include #include #include #include namespace glmnetpp { namespace transl { // Note: the FloatType is really important! // In Fortran, constants are by default single-precision, // so to test compatability, we have set the FloatType = float. // But to test against our abstracted version of elnet1, // we should use FloatType = double. // Brute-force translation of legacy elnet2 Fortran. template inline void elnet2_do_b( bool& iz, bool& jz, IntType nin, IntType m, AType& a, const IAType& ia, IntType& nlp, const XType& x, YType& y, const XVType& xv, const VPType& vp, ValueType ab, ValueType dem, const CLType& cl, ValueType& rsq, ValueType thr, IntType maxit, IntType& jerr ) { using int_t = IntType; iz = true; while (1) { ++nlp; auto dlx = 0.0; for (int_t l = 0; l < nin; ++l) { auto k = ia(l); auto gk = y.dot(x.col(k)); auto ak = a(k); auto u = gk + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; auto del = a(k) - ak; rsq += del * (static_cast(2.0) * gk - del * xv(k)); y -= del * x.col(k); dlx = std::max(xv(k) * del * del, dlx); } if (dlx < thr) break; if (nlp > maxit) { jerr = -m-1; throw util::maxit_reached_error(); } } jz = false; } template inline void elnet2( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr) { using int_t = IntType; using int_param_t = InternalParams; int_t ni = x.cols(); Eigen::VectorXd a(ni); a.setZero(); Eigen::VectorXd g(ni); g.setZero(); Eigen::VectorXi mm(ni); mm.setZero(); Eigen::VectorXi ix(ni); ix.setZero(); auto bta = beta; auto omb = 1.0 - beta; auto alm = 0.0; auto alf = 1.0; if (flmin < 1.0) { auto eqs = std::max(int_param_t::eps, flmin); alf = std::pow(eqs, static_cast(1.0)/(nlam - 1)); } auto rsq = 0.0; nlp = 0; int_t nin = 0; bool iz = false; auto mnl = std::min(int_param_t::mnlam, nlam); for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; g(j) = std::abs(y.dot(x.col(j))); } for (int_t m = 0; m < nlam; ++m) { if (int_param_t::itrace != 0) mock_setpb(m); auto alm0 = alm; if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = int_param_t::big; } else { alm0 = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (vp(j) > 0.0) { alm0 = std::max(alm0, g(j) / vp(j)); } } alm0 /= std::max(bta, 1e-3); alm = alf * alm0; } auto dem = alm * omb; auto ab = alm * bta; auto rsq0 = rsq; try { bool jz = true; auto tlam = bta * (static_cast(2.0) * alm - alm0); for (int_t k = 0; k < ni; ++k) { if (ix(k) == 1) continue; if (ju[k] == 0) continue; if (g(k) > tlam * vp(k)) ix(k) = 1; } if (iz * jz != 0) { elnet2_do_b( iz, jz, nin, m, a, ia, nlp, x, y, xv, vp, ab, dem, cl, rsq, thr, maxit, jerr); } while (1) { // :again: bool converged_kkt = false; while (1) { if (nlp > maxit) { jerr = -m-1; return; } ++nlp; auto dlx = 0.0; for (int_t k = 0; k < ni; ++k) { if (ix(k) == 0) continue; auto gk = y.dot(x.col(k)); auto ak = a(k); auto u = gk + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; if (mm(k) == 0) { ++nin; if (nin > nx) break; mm(k) = nin; ia(nin-1) = k; } auto del = a(k) - ak; rsq += del * (static_cast(2.0) * gk - del * xv(k)); y -= del * x.col(k); dlx = std::max(xv(k) * del * del, dlx); } if (nin > nx) throw util::max_active_reached_error(); if (dlx < thr) { bool ixx = false; for (int_t k = 0; k < ni; ++k) { if (ix(k) == 1) continue; if (ju[k] == 0) continue; g(k) = std::abs(y.dot(x.col(k))); if (g(k) > ab * vp(k)) { ix(k) = 1; ixx = true; } } if (ixx) continue; converged_kkt = true; } break; } if (converged_kkt) break; if (nlp > maxit) { jerr = -m-1; return; } elnet2_do_b( iz, jz, nin, m, a, ia, nlp, x, y, xv, vp, ab, dem, cl, rsq, thr, maxit, jerr); } } catch (const util::max_active_reached_error&) {} catch (const util::maxit_reached_error&) { return; } if (nin > nx) { jerr = -10000-m-1; break; } if (nin > 0) { for (int_t j = 0; j < nin; ++j) { ao(j, m) = a(ia(j)); } } kin(m) = nin; rsqo(m) = rsq; almo(m) = alm; lmu = m+1; if (lmu < mnl) continue; if (flmin >= 1.0) continue; auto me = 0; for (int_t j = 0; j < nin; ++j) { if (ao(j,m)) ++me; } if (me > ne) break; if (rsq-rsq0 < int_param_t::sml*rsq) break; if (rsq > int_param_t::rsqmax) break; } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/spelnet2.hpp0000644000175000017500000002153114140040573023746 0ustar nileshnilesh#pragma once #include #include #include #include namespace glmnetpp { namespace transl { // Note: the FloatType is really important! // In Fortran, constants are by default single-precision, // so to test compatability, we have set the FloatType = float. // But to test against our abstracted version of elnet1, // we should use FloatType = double. // Brute-force translation of legacy elnet2 Fortran. template inline void spelnet2_do_b( bool& iz, bool& jz, IntType nin, IntType m, AType& a, const IAType& ia, const WType& w, IntType& nlp, const XType& x, YType& y, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, ValueType ab, ValueType dem, const CLType& cl, ValueType& o, ValueType& rsq, ValueType thr, IntType maxit, IntType& jerr ) { using int_t = IntType; iz = true; while (1) { ++nlp; auto dlx = 0.0; for (int_t l = 0; l < nin; ++l) { auto k = ia(l); auto gk = x.col(k).cwiseProduct(w).dot( (y.array() + o).matrix()) / xs(k); auto ak = a(k); auto u = gk + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; auto del = a(k) - ak; rsq += del * (static_cast(2.0) * gk - del * xv(k)); auto del_scaled = del / xs(k); y -= del_scaled * x.col(k); o += del_scaled * xm(k); dlx = std::max(xv(k) * del * del, dlx); } if (dlx < thr) break; if (nlp > maxit) { jerr = -m-1; throw util::maxit_reached_error(); } } jz = false; } template inline void spelnet2( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr) { using int_t = IntType; using int_param_t = InternalParams; int_t ni = x.cols(); Eigen::VectorXd a(ni); a.setZero(); Eigen::VectorXd g(ni); g.setZero(); Eigen::VectorXi mm(ni); mm.setZero(); Eigen::VectorXi ix(ni); ix.setZero(); auto bta = beta; auto omb = 1.0 - beta; auto alm = 0.0; auto alf = 1.0; if (flmin < 1.0) { auto eqs = std::max(int_param_t::eps, flmin); alf = std::pow(eqs, static_cast(1.0)/(nlam - 1)); } auto rsq = 0.0; nlp = 0; int_t nin = 0; bool iz = false; auto mnl = std::min(int_param_t::mnlam, nlam); auto o = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; g(j) = std::abs( x.col(j).cwiseProduct(w).dot( (y.array() + o).matrix()) / xs(j)); } for (int_t m = 0; m < nlam; ++m) { if (int_param_t::itrace != 0) mock_setpb(m); auto alm0 = alm; if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = int_param_t::big; } else { alm0 = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (vp(j) > 0.0) { alm0 = std::max(alm0, g(j) / vp(j)); } } alm0 /= std::max(bta, 1e-3); alm = alf * alm0; } auto dem = alm * omb; auto ab = alm * bta; auto rsq0 = rsq; try { bool jz = true; auto tlam = bta * (static_cast(2.0) * alm - alm0); for (int_t k = 0; k < ni; ++k) { if (ix(k) == 1) continue; if (ju[k] == 0) continue; if (g(k) > tlam * vp(k)) ix(k) = 1; } while (1) { if (iz * jz != 0) { spelnet2_do_b( iz, jz, nin, m, a, ia, w, nlp, x, y, xm, xs, xv, vp, ab, dem, cl, o, rsq, thr, maxit, jerr); } // :again: bool converged_kkt = false; while (1) { if (nlp > maxit) { jerr = -m-1; return; } ++nlp; auto dlx = 0.0; for (int_t k = 0; k < ni; ++k) { if (ix(k) == 0) continue; auto gk = x.col(k).cwiseProduct(w).dot( (y.array() + o).matrix()) / xs(k); auto ak = a(k); auto u = gk + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; if (mm(k) == 0) { ++nin; if (nin > nx) break; mm(k) = nin; ia(nin-1) = k; } auto del = a(k) - ak; rsq += del * (static_cast(2.0) * gk - del * xv(k)); auto del_scaled = del / xs(k); y -= del_scaled * x.col(k); o += del_scaled * xm(k); dlx = std::max(xv(k) * del * del, dlx); } if (nin > nx) throw util::max_active_reached_error(); if (dlx < thr) { bool ixx = false; for (int_t k = 0; k < ni; ++k) { if (ix(k) == 1) continue; if (ju[k] == 0) continue; g(k) = std::abs( x.col(k).cwiseProduct(w).dot( (y.array() + o).matrix()) / xs(k)); if (g(k) > ab * vp(k)) { ix(k) = 1; ixx = true; } } if (ixx) continue; converged_kkt = true; } break; } if (converged_kkt) break; if (nlp > maxit) { jerr = -m-1; return; } spelnet2_do_b( iz, jz, nin, m, a, ia, w, nlp, x, y, xm, xs, xv, vp, ab, dem, cl, o, rsq, thr, maxit, jerr); } } catch (const util::max_active_reached_error&) {} catch (const util::maxit_reached_error&) { return; } if (nin > nx) { jerr = -10000-m-1; break; } if (nin > 0) { for (int_t j = 0; j < nin; ++j) { ao(j, m) = a(ia(j)); } } kin(m) = nin; rsqo(m) = rsq; almo(m) = alm; lmu = m+1; if (lmu < mnl) continue; if (flmin >= 1.0) continue; auto me = 0; for (int_t j = 0; j < nin; ++j) { if (ao(j,m)) ++me; } if (me > ne) break; if (rsq-rsq0 < int_param_t::sml*rsq) break; if (rsq > int_param_t::rsqmax) break; } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/elnet.hpp0000644000175000017500000000647414140040573023332 0ustar nileshnilesh#pragma once #include #include // have been separately unittested #include #include #include #include namespace glmnetpp { namespace transl { template inline void elnet( bool ka, ValueType parm, XType& x, YType& y, WType& w, const JDType& jd, const VPType& vp, CLType& cl, IntType ne, IntType nx, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool isd, bool intr, IntType maxit, LmuType& lmu, A0Type& a0, CAType& ca, IAType& ia, NinType& nin, RsqType& rsq, AlmType& alm, IntType& nlp, IntType& jerr ) { using value_t = ValueType; using vec_t = Eigen::Matrix; using bvec_t = std::vector; if (vp.maxCoeff() <= 0) { jerr = 10000; return; } auto ni = x.cols(); vec_t vq = vp.unaryExpr([](auto x){ return std::max(x, 0.); }); vq *= ni / vq.sum(); vec_t g; // only naive version uses it vec_t xm(ni); xm.setZero(); vec_t xs(ni); xs.setZero(); vec_t xv(ni); xv.setZero(); vec_t vlam(nlam); vlam.setZero(); bvec_t ju(ni, false); Chkvars::eval(x, ju); if (jd(0) > 0) { for (int i = 1; i < jd(0) + 1; ++i) { ju[jd(i)-1] = false; } } // can't find true value in ju if (std::find_if(ju.begin(), ju.end(), [](auto x) { return x;}) == ju.end()) { jerr=7777; return; } value_t ym = 0; value_t ys = 0; // naive method if (!ka) { g.setZero(ni); Standardize::eval(x, y, w, isd, intr, ju, g, xm, xs, ym, ys, xv); } // cov method else { Standardize1::eval(x, y, w, isd, intr, ju, xm, xs, ym, ys, xv); } cl /= ys; if (isd) { for (int j = 0; j < ni; ++j) { cl.col(j) *= xs(j); } } if (flmin >= 1.0) vlam = ulam / ys; // naive method if (!ka) { elnet1(parm, ju, vq, cl, g, ne, nx, x, nlam, flmin, vlam, thr, maxit, xv, lmu, ca, ia, nin, rsq, alm, nlp, jerr); } // cov method else { elnet2(parm, ju, vq, cl, y, ne, nx, x, nlam, flmin, vlam, thr, maxit, xv, lmu, ca, ia, nin, rsq, alm, nlp, jerr); } if (jerr > 0) return; for (int k = 0; k < lmu; ++k) { alm(k) *= ys; auto nk = nin(k); for (int l = 0; l < nk; ++l) { ca(l,k) *= ys / xs(ia(l)); } a0(k)=0.0; if (intr) { for (int i = 0; i < nk; ++i) { a0(k) -= ca(i, k) * xm(ia(i)); } a0(k) += ym; } } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/spelnet.hpp0000644000175000017500000000632114140040573023664 0ustar nileshnilesh#pragma once #include #include #include #include #include #include namespace glmnetpp { namespace transl { template inline void spelnet( bool ka, ValueType parm, XType& x, YType& y, WType& w, const JDType& jd, const VPType& vp, CLType& cl, IntType ne, IntType nx, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, bool isd, bool intr, IntType maxit, LmuType& lmu, A0Type& a0, CAType& ca, IAType& ia, NinType& nin, RsqType& rsq, AlmType& alm, IntType& nlp, IntType& jerr ) { using value_t = ValueType; using vec_t = Eigen::Matrix; using bvec_t = std::vector; if (vp.maxCoeff() <= 0) { jerr = 10000; return; } auto ni = x.cols(); vec_t vq = vp.unaryExpr([](auto x){ return std::max(x, 0.); }); vq *= ni / vq.sum(); vec_t g; // only naive version uses it vec_t xm(ni); xm.setZero(); vec_t xs(ni); xs.setZero(); vec_t xv(ni); xv.setZero(); vec_t vlam(nlam); vlam.setZero(); bvec_t ju(ni, false); SpChkvars::eval(x, ju); if (jd(0) > 0) { for (int i = 1; i < jd(0) + 1; ++i) { ju[jd(i)-1] = false; } } // can't find true value in ju if (std::find_if(ju.begin(), ju.end(), [](auto x) { return x;}) == ju.end()) { jerr=7777; return; } value_t ym = 0; value_t ys = 0; if (!ka) { g.setZero(ni); SpStandardize::eval(x,y,w,isd,intr,ju,g,xm,xs,ym,ys,xv); } else { SpStandardize1::eval(x,y,w,isd,intr,ju,xm,xs,ym,ys,xv); } cl /= ys; if (isd) { for (int j = 0; j < ni; ++j) { cl.col(j) *= xs(j); } } if (flmin >= 1.0) vlam = ulam / ys; // ka == 0 if (!ka) { spelnet1( parm,ju,vp,cl,g,w,ne,nx,x,nlam,flmin,vlam,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr); } else { spelnet2( parm,ju,vp,cl,y,w,ne,nx,x,nlam,flmin,vlam,thr,maxit, xm,xs,xv,lmu,ca,ia,nin,rsq,alm,nlp,jerr); } if (jerr > 0) return; for (int k = 0; k < lmu; ++k) { alm(k) *= ys; auto nk = nin(k); for (int l = 0; l < nk; ++l) { ca(l,k) *= ys / xs(ia(l)); } a0(k)=0.0; if (intr) { for (int i = 0; i < nk; ++i) { a0(k) -= ca(i, k) * xm(ia(i)); } a0(k) += ym; } } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/spelnet1.hpp0000644000175000017500000001765514140040573023761 0ustar nileshnilesh#pragma once #include #include #include #include namespace glmnetpp { namespace transl { // Note: the FloatType is really important! // In Fortran, constants are by default single-precision, // so to test compatability, we have set the FloatType = float. // But to test against our abstracted version of elnet1, // we should use FloatType = double. // Brute-force translation of legacy elnet1 Fortran. template inline void spelnet1_do_b( bool& iz, bool& jz, IntType ni, IntType nin, IntType m, DAType& da, AType& a, const IAType& ia, IntType& nlp, GType& g, const XVType& xv, const VPType& vp, ValueType ab, ValueType dem, const JUType& ju, const CLType& cl, ValueType& rsq, const CType& c, const MMType& mm, ValueType thr, IntType maxit, IntType& jerr ) { using int_t = IntType; iz = true; for (int_t j = 0; j < nin; ++j) { da(j) = a(ia(j)); } while (1) { ++nlp; auto dlx = 0.0; for (int_t l = 0; l < nin; ++l) { auto k = ia(l); auto ak = a(k); auto u = g(k) + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; auto del = a(k) - ak; rsq += del * (static_cast(2.0) * g(k) - del * xv(k)); dlx = std::max(xv(k) * del * del, dlx); for (int_t j = 0; j < nin; ++j) { g(ia(j)) -= c(ia(j), mm(k)-1) * del; } } if (dlx < thr) break; if (nlp > maxit) { jerr = -m-1; throw std::exception(); } } for (int_t j = 0; j < nin; ++j) { da(j) = a(ia(j)) - da(j); } for (int_t j = 0; j < ni; ++j) { if (mm(j) != 0) continue; if (ju[j] != 0) { g(j) -= da.head(nin).dot(c.row(j).head(nin)); } } jz = false; } template inline void spelnet1( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr) { using int_t = IntType; using int_param_t = InternalParams; int_t ni = x.cols(); Eigen::MatrixXd c(ni, nx); c.setZero(); Eigen::VectorXd a(ni); a.setZero(); Eigen::VectorXi mm(ni); mm.setZero(); Eigen::VectorXd da(ni); da.setZero(); auto bta = beta; auto omb = 1.0 - beta; auto alm = 0.0; auto alf = 1.0; if (flmin < 1.0) { auto eqs = std::max(int_param_t::eps, flmin); alf = std::pow(eqs, static_cast(1.0)/(nlam - 1)); } auto rsq = 0.0; nlp = 0; int_t nin = 0; bool iz = false; auto mnl = std::min(int_param_t::mnlam, nlam); for (int_t m = 0; m < nlam; ++m) { if (int_param_t::itrace != 0) mock_setpb(m); if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = int_param_t::big; } else { alm = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (vp(j) <= 0.0) continue; alm = std::max(alm, std::abs(g(j)) / vp(j)); } alm = alf * alm / std::max(bta, 1e-3); } auto dem = alm * omb; auto ab = alm * bta; auto rsq0 = rsq; try { bool jz = true; while (1) { if (iz * jz != 0) { spelnet1_do_b( iz, jz, ni, nin, m, da, a, ia, nlp, g, xv, vp, ab, dem, ju, cl, rsq, c, mm, thr, maxit, jerr); } ++nlp; auto dlx = 0.0; for (int_t k = 0; k < ni; ++k) { if (ju[k] == 0) continue; auto ak = a(k); auto u = g(k) + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; if (mm(k) == 0) { ++nin; if (nin > nx) break; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (mm(j) != 0) { c(j, nin-1) = c(k, mm(j)-1); continue; } if (j == k) { c(j, nin-1) = xv(j); continue; } auto wx_k = x.col(k).cwiseProduct(w); c(j,nin-1) = (x.col(j).dot(wx_k) - xm(j) * xm(k)) / (xs(j) * xs(k)); } mm(k) = nin; ia(nin-1) = k; } auto del = a(k) - ak; rsq += del * (static_cast(2.0) * g(k) - del * xv(k)); dlx = std::max(xv(k) * del * del, dlx); for (int_t j = 0; j < ni; ++j) { if (ju[j] != 0) { g(j) -= c(j, mm(k)-1) * del; } } } if (dlx < thr) break; if (nin > nx) break; if (nlp > maxit) { jerr = -m-1; return; } spelnet1_do_b( iz, jz, ni, nin, m, da, a, ia, nlp, g, xv, vp, ab, dem, ju, cl, rsq, c, mm, thr, maxit, jerr); } } catch (const std::exception&) { return; } if (nin > nx) { jerr = -10000-m-1; break; } if (nin > 0) { for (int_t j = 0; j < nin; ++j) { ao(j, m) = a(ia(j)); } } kin(m) = nin; rsqo(m) = rsq; almo(m) = alm; lmu = m+1; if (lmu < mnl) continue; if (flmin >= 1.0) continue; auto me = 0; for (int_t j = 0; j < nin; ++j) { if (ao(j,m) != 0.0) ++me; } if (me > ne) break; if (rsq-rsq0 < int_param_t::sml*rsq) break; if (rsq > int_param_t::rsqmax) break; } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/translation/elnet1.hpp0000644000175000017500000001717414140040573023412 0ustar nileshnilesh#pragma once #include #include #include #include namespace glmnetpp { namespace transl { // Note: the FloatType is really important! // In Fortran, constants are by default single-precision, // so to test compatability, we have set the FloatType = float. // But to test against our abstracted version of elnet1, // we should use FloatType = double. // Brute-force translation of legacy elnet1 Fortran. template inline void elnet1_do_b( bool& iz, bool& jz, IntType ni, IntType nin, IntType m, DAType& da, AType& a, const IAType& ia, IntType& nlp, GType& g, const XVType& xv, const VPType& vp, ValueType ab, ValueType dem, const JUType& ju, const CLType& cl, ValueType& rsq, const CType& c, const MMType& mm, ValueType thr, IntType maxit, IntType& jerr ) { using int_t = IntType; iz = true; for (int_t j = 0; j < nin; ++j) { da(j) = a(ia(j)); } while (1) { ++nlp; auto dlx = 0.0; for (int_t l = 0; l < nin; ++l) { auto k = ia(l); auto ak = a(k); auto u = g(k) + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; auto del = a(k) - ak; rsq += del * (static_cast(2.0) * g(k) - del * xv(k)); dlx = std::max(xv(k) * del * del, dlx); for (int_t j = 0; j < nin; ++j) { g(ia(j)) -= c(ia(j), mm(k)-1) * del; } } if (dlx < thr) break; if (nlp > maxit) { jerr = -m-1; throw std::exception(); } } for (int_t j = 0; j < nin; ++j) { da(j) = a(ia(j)) - da(j); } for (int_t j = 0; j < ni; ++j) { if (mm(j) != 0) continue; if (ju[j] != 0) { g(j) -= da.head(nin).dot(c.row(j).head(nin)); } } jz = false; } template inline void elnet1( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr) { using int_t = IntType; using int_param_t = InternalParams; int_t ni = x.cols(); Eigen::MatrixXd c(ni, nx); c.setZero(); Eigen::VectorXd a(ni); a.setZero(); Eigen::VectorXi mm(ni); mm.setZero(); Eigen::VectorXd da(ni); da.setZero(); auto bta = beta; auto omb = 1.0 - beta; auto alm = 0.0; auto alf = 1.0; if (flmin < 1.0) { auto eqs = std::max(int_param_t::eps, flmin); alf = std::pow(eqs, static_cast(1.0)/(nlam - 1)); } auto rsq = 0.0; nlp = 0; int_t nin = 0; bool iz = false; auto mnl = std::min(int_param_t::mnlam, nlam); for (int_t m = 0; m < nlam; ++m) { if (int_param_t::itrace != 0) mock_setpb(m); if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = int_param_t::big; } else { alm = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (vp(j) <= 0.0) continue; alm = std::max(alm, std::abs(g(j)) / vp(j)); } alm = alf * alm / std::max(bta, 1e-3); } auto dem = alm * omb; auto ab = alm * bta; auto rsq0 = rsq; try { bool jz = true; while (1) { if (iz * jz != 0) { elnet1_do_b( iz, jz, ni, nin, m, da, a, ia, nlp, g, xv, vp, ab, dem, ju, cl, rsq, c, mm, thr, maxit, jerr); } ++nlp; auto dlx = 0.0; for (int_t k = 0; k < ni; ++k) { if (ju[k] == 0) continue; auto ak = a(k); auto u = g(k) + ak * xv(k); auto v = std::abs(u) - vp(k) * ab; a(k) = 0.0; if (v > 0.0) { a(k) = std::max(cl(0, k), std::min(cl(1, k), std::copysign(v,u)/(xv(k)+vp(k)*dem)) ); } if (a(k) == ak) continue; if (mm(k) == 0) { ++nin; if (nin > nx) break; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0) continue; if (mm(j) != 0) { c(j, nin-1) = c(k, mm(j)-1); continue; } if (j == k) { c(j, nin-1) = xv(j); continue; } c(j,nin-1) = x.col(j).dot(x.col(k)); } mm(k) = nin; ia(nin-1) = k; } auto del = a(k) - ak; rsq += del * (static_cast(2.0) * g(k) - del * xv(k)); dlx = std::max(xv(k) * del * del, dlx); for (int_t j = 0; j < ni; ++j) { if (ju[j] != 0) { g(j) -= c(j, mm(k)-1) * del; } } } if (dlx < thr) break; if (nin > nx) break; if (nlp > maxit) { jerr = -m-1; return; } elnet1_do_b( iz, jz, ni, nin, m, da, a, ia, nlp, g, xv, vp, ab, dem, ju, cl, rsq, c, mm, thr, maxit, jerr); } } catch (const std::exception&) { return; } if (nin > nx) { jerr = -10000-m-1; break; } if (nin > 0) { for (int_t j = 0; j < nin; ++j) { ao(j, m) = a(ia(j)); } } kin(m) = nin; rsqo(m) = rsq; almo(m) = alm; lmu = m+1; if (lmu < mnl) continue; if (flmin >= 1.0) continue; auto me = 0; for (int_t j = 0; j < nin; ++j) { if (ao(j,m) != 0.0) ++me; } if (me > ne) break; if (rsq-rsq0 < int_param_t::sml*rsq) break; if (rsq > int_param_t::rsqmax) break; } } } // namespace transl } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/mock_pb.hpp0000644000175000017500000000031314140040573021261 0ustar nileshnilesh#pragma once namespace glmnetpp { // TODO: this may change in the future. // Mock progress bar since our tests don't need to test this behavior. inline void mock_setpb(int) {} } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/thread.hpp0000644000175000017500000000124214140040573021120 0ustar nileshnilesh#include #ifdef GLMNETPP_HAS_PTHREAD #include #include #endif namespace glmnetpp { template void set_affinity(int i, PIDType id) { static_cast(i); static_cast(id); auto n_cpus = std::thread::hardware_concurrency(); if (n_cpus <= 1) return; #if defined(GLMNETPP_HAS_PTHREAD) && defined(__linux__) cpu_set_t mask; int status; CPU_ZERO(&mask); CPU_SET(i % n_cpus, &mask); status = pthread_setaffinity_np(id, sizeof(mask), &mask); if (status != 0) { std::cerr << "Error calling pthread_setaffinity_np: " << status << "\n"; } #endif } } // namespace glmnetpp glmnet/src/glmnetpp/test/testutil/base_fixture.hpp0000644000175000017500000000433314140040573022335 0ustar nileshnilesh#pragma once #include "gtest/gtest.h" #include namespace glmnetpp { struct base_fixture : ::testing::Test { protected: using value_t = double; using index_t = Eigen::Index; // Useful tools to test vector equality #define expect_double_eq_vec(v1, v2) \ { \ EXPECT_EQ(v1.size(), v2.size()); \ for (index_t i = 0; i < v1.size(); ++i) { \ EXPECT_DOUBLE_EQ(v1[i], v2[i]); \ } \ } #define expect_float_eq_vec(v1, v2) \ { \ EXPECT_EQ(v1.size(), v2.size()); \ for (index_t i = 0; i < v1.size(); ++i) { \ EXPECT_FLOAT_EQ( \ static_cast(v1[i]), \ static_cast(v2[i]) \ ); \ } \ } #define expect_eq_vec(v1, v2) \ { \ EXPECT_EQ(v1.size(), v2.size()); \ for (index_t i = 0; i < v1.size(); ++i) { \ EXPECT_EQ(v1[i], v2[i]); \ } \ } #define expect_double_eq_mat(m1, m2) \ { \ EXPECT_EQ(m1.rows(), m2.rows()); \ EXPECT_EQ(m1.cols(), m2.cols()); \ for (index_t j = 0; j < m1.cols(); ++j) { \ for (index_t i = 0; i < m1.rows(); ++i) { \ EXPECT_DOUBLE_EQ(m1(i,j), m2(i,j)); \ } \ } \ } #define expect_float_eq_mat(m1, m2) \ { \ EXPECT_EQ(m1.rows(), m2.rows()); \ EXPECT_EQ(m1.cols(), m2.cols()); \ for (index_t j = 0; j < m1.cols(); ++j) { \ for (index_t i = 0; i < m1.rows(); ++i) { \ EXPECT_FLOAT_EQ( \ static_cast(m1(i,j)), \ static_cast(m2(i,j))); \ } \ } \ } #define expect_near_vec(v1, v2, tol) \ { \ EXPECT_EQ(v1.size(), v2.size()); \ for (index_t i = 0; i < v1.size(); ++i) { \ EXPECT_NEAR(v1[i], v2[i], tol); \ } \ } #define expect_near_mat(m1, m2, tol) \ { \ EXPECT_EQ(m1.rows(), m2.rows()); \ EXPECT_EQ(m1.cols(), m2.cols()); \ for (index_t j = 0; j < m1.cols(); ++j) { \ for (index_t i = 0; i < m1.rows(); ++i) { \ EXPECT_NEAR(m1(i,j), m2(i,j), tol); \ } \ } \ } }; } // namespace glmnetpp glmnet/src/glmnetpp/test/internal_unittest.cpp0000644000175000017500000000455614140040573021555 0ustar nileshnilesh#include "gtest/gtest.h" #include #include #include namespace glmnetpp { struct InternalPack { double sml, eps, big, rsqmax, pmin, exmx; int mnlam, itrace; void get_int_parms() { glmnetpp::get_int_parms(sml, eps, big, mnlam, rsqmax, pmin, exmx, itrace); } void get_int_parms_() { ::get_int_parms_(&sml, &eps, &big, &mnlam, &rsqmax, &pmin, &exmx, &itrace); } }; struct internal_fixture : ::testing::Test { protected: void check_internal() const { InternalPack actual, expected; actual.get_int_parms(); expected.get_int_parms_(); EXPECT_DOUBLE_EQ(actual.sml, expected.sml); EXPECT_DOUBLE_EQ(actual.eps, expected.eps); EXPECT_DOUBLE_EQ(actual.big, expected.big); // due to single-precision floating-pt error, fortran returns 0.999 + O(1e-8) EXPECT_NEAR(actual.rsqmax, expected.rsqmax, 1e-7); EXPECT_DOUBLE_EQ(actual.pmin, expected.pmin); EXPECT_DOUBLE_EQ(actual.exmx, expected.exmx); EXPECT_EQ(actual.mnlam, expected.mnlam); EXPECT_EQ(actual.itrace, expected.itrace); } }; TEST_F(internal_fixture, get_int_parms_compat) { check_internal(); } template struct chg_fixture : internal_fixture, ::testing::WithParamInterface { protected: }; using chg_fixture_double = chg_fixture; using chg_fixture_int = chg_fixture; #define GENERATE_CHG_TEST(fixture, chg_name) \ TEST_P(fixture, chg_name##_compat) \ { \ auto arg = GetParam(); \ chg_name(arg); \ chg_name##_(&arg); \ check_internal(); \ } GENERATE_CHG_TEST(chg_fixture_double, chg_fract_dev) GENERATE_CHG_TEST(chg_fixture_double, chg_min_flmin) GENERATE_CHG_TEST(chg_fixture_double, chg_dev_max) GENERATE_CHG_TEST(chg_fixture_double, chg_big) GENERATE_CHG_TEST(chg_fixture_double, chg_min_null_prob) GENERATE_CHG_TEST(chg_fixture_double, chg_max_exp) GENERATE_CHG_TEST(chg_fixture_int, chg_min_lambdas) GENERATE_CHG_TEST(chg_fixture_int, chg_itrace) INSTANTIATE_TEST_SUITE_P( ChgDoubleSuite, chg_fixture_double, testing::Values(1., 242., 1.52, -92.2, 15.2, 23.4) ); INSTANTIATE_TEST_SUITE_P( ChgIntSuite, chg_fixture_int, testing::Values(-1, 0, 5, 2, 199, 203284, 23, -238, -52, 32) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/CMakeLists.txt0000644000175000017500000001660214140040573020031 0ustar nileshnilesh# Try to find package Threads for the use of pthreads unset(GLMNETPP_HAS_PTHREAD) find_package(Threads) if (CMAKE_USE_PTHREADS_INIT) set(GLMNETPP_HAS_PTHREAD ON) endif() if (DEFINED GLMNETPP_HAS_PTHREAD) set(GLMNETPP_HAS_PTHREAD_MACRO "-DGLMNETPP_HAS_PTHREAD") endif() ######################################################################## # Utility TEST ######################################################################## add_executable(utility_unittest ${CMAKE_CURRENT_SOURCE_DIR}/util/type_traits_unittest.cpp ) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_compile_options(utility_unittest PRIVATE -Werror -Wextra -Wpedantic) endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(utility_unittest PRIVATE -fopenmp) target_link_libraries(utility_unittest -fopenmp) endif() target_compile_options(utility_unittest PRIVATE -g -Wall) target_include_directories(utility_unittest PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) if (GLMNETPP_ENABLE_COVERAGE) target_link_libraries(utility_unittest gcov) endif() target_link_libraries(utility_unittest ${PROJECT_NAME} GTest::gtest_main Eigen3::Eigen) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(utility_unittest pthread) endif() add_test(utility_unittest utility_unittest) ######################################################################## # Translation TEST ######################################################################## add_executable(translation_unittest ${CMAKE_CURRENT_SOURCE_DIR}/translation/elnet_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/translation/elnet1_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/translation/elnet2_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/translation/spelnet_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/translation/spelnet1_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/translation/spelnet2_unittest.cpp # Explicitly add Fortran source for debugging purposes ${GLMNETPP_SOURCEDIR}/legacy/glmnet5dpclean.f ${GLMNETPP_SOURCEDIR}/legacy/pb.c # only needed for fortran code to link to this ) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # DO NOT put -Werror because Fortran gives a bunch of warnings that we unfortunately cannot change. target_compile_options(translation_unittest PRIVATE -Wextra -Wpedantic) endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(translation_unittest PRIVATE -fopenmp) target_link_libraries(translation_unittest -fopenmp) endif() target_compile_options(translation_unittest PRIVATE -g -Wall) target_compile_definitions(translation_unittest PRIVATE ${GLMNETPP_HAS_PTHREAD_MACRO}) # This is if we can't compile Fortran code, # but we still want to run our C++ method. # We enable the mocked version of the legacy Fortran code. if (GLMNETPP_MOCK_LEGACY) target_compile_definitions(translation_unittest PRIVATE -DGLMNETPP_MOCK_LEGACY) endif() target_include_directories(translation_unittest PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${R_INCLUDE_DIR} ${GLMNETPP_SOURCEDIR}) if (GLMNETPP_ENABLE_COVERAGE) target_link_libraries(translation_unittest gcov) endif() target_link_libraries(translation_unittest ${PROJECT_NAME} GTest::gtest_main Eigen3::Eigen) if (NOT GLMNETPP_MOCK_LEGACY) target_link_libraries(translation_unittest ${RLIB}) endif() if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(translation_unittest pthread) endif() add_test(translation_unittest translation_unittest) ######################################################################## # Elnet Path TEST ######################################################################## add_executable(elnet_path_unittest ${CMAKE_CURRENT_SOURCE_DIR}/elnet_path/gaussian_cov_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/elnet_path/gaussian_naive_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/elnet_path/sp_gaussian_cov_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/elnet_path/sp_gaussian_naive_unittest.cpp ) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_compile_options(elnet_path_unittest PRIVATE -Werror -Wextra -Wpedantic) endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(elnet_path_unittest PRIVATE -fopenmp) target_link_libraries(elnet_path_unittest -fopenmp) endif() target_compile_options(elnet_path_unittest PRIVATE -g -Wall) target_compile_definitions(elnet_path_unittest PRIVATE ${GLMNETPP_HAS_PTHREAD_MACRO}) target_include_directories(elnet_path_unittest PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${GLMNETPP_SOURCEDIR}) if (GLMNETPP_ENABLE_COVERAGE) target_link_libraries(elnet_path_unittest gcov) endif() target_link_libraries(elnet_path_unittest ${PROJECT_NAME} GTest::gtest_main Eigen3::Eigen) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(elnet_path_unittest pthread) endif() add_test(elnet_path_unittest elnet_path_unittest) ######################################################################## # Elnet Driver TEST (Integration Test) ######################################################################## add_executable(elnet_driver_unittest ${CMAKE_CURRENT_SOURCE_DIR}/elnet_driver/gaussian_unittest.cpp ) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_compile_options(elnet_driver_unittest PRIVATE -Werror -Wextra -Wpedantic) endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(elnet_driver_unittest PRIVATE -fopenmp) target_link_libraries(elnet_driver_unittest -fopenmp) endif() target_compile_options(elnet_driver_unittest PRIVATE -g -Wall) target_compile_definitions(elnet_driver_unittest PRIVATE ${GLMNETPP_HAS_PTHREAD_MACRO}) target_include_directories(elnet_driver_unittest PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${GLMNETPP_SOURCEDIR}) if (GLMNETPP_ENABLE_COVERAGE) target_link_libraries(elnet_driver_unittest gcov) endif() target_link_libraries(elnet_driver_unittest ${PROJECT_NAME} GTest::gtest_main Eigen3::Eigen) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(elnet_driver_unittest pthread) endif() add_test(elnet_driver_unittest elnet_driver_unittest) ######################################################################## # Core TEST ######################################################################## add_executable(core_unittest ${CMAKE_CURRENT_SOURCE_DIR}/wls_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/standardize_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/chkvars_unittest.cpp ${CMAKE_CURRENT_SOURCE_DIR}/internal_unittest.cpp ) if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_compile_options(core_unittest PRIVATE -Werror -Wextra -Wpedantic) endif() if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") target_compile_options(core_unittest PRIVATE -fopenmp) target_link_libraries(core_unittest -fopenmp) endif() target_compile_options(core_unittest PRIVATE -g -Wall) target_compile_definitions(core_unittest PRIVATE ${GLMNETPP_HAS_PTHREAD_MACRO}) target_include_directories(core_unittest PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${GLMNETPP_SOURCEDIR}) if (GLMNETPP_ENABLE_COVERAGE) target_link_libraries(core_unittest gcov) endif() target_link_libraries(core_unittest ${PROJECT_NAME} GTest::gtest_main Eigen3::Eigen) if (NOT GLMNETPP_MOCK_LEGACY) target_link_libraries(core_unittest legacy) endif() if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") target_link_libraries(core_unittest pthread) endif() add_test(core_unittest core_unittest) glmnet/src/glmnetpp/test/wls_unittest.cpp0000644000175000017500000001313614140040573020540 0ustar nileshnilesh#include #include #include #include #include namespace glmnetpp { struct WLSPack { // these values either really won't impact the algorithm // or just needs to be default-initialized int jerr = 0; int nlp = 0; int nino = 0; int m = 1242; double rsqc = 0; double thr = 1e-14; int iz = 1; // initialized upon construction double aint; int nx; const Eigen::MatrixXd& X, cl; const Eigen::VectorXd& vp, w; const Eigen::VectorXi& ju; Eigen::VectorXd beta, y, a, g, r, xv; Eigen::VectorXi ia ,iy, mm; double alm0; double almc; double alpha; int intr; int maxit; WLSPack(double _aint, int _nx, const Eigen::MatrixXd& _X, const Eigen::MatrixXd& _cl, const Eigen::VectorXd& _vp, const Eigen::VectorXd& _w, const Eigen::VectorXi& _ju, const Eigen::VectorXd& _beta, const Eigen::VectorXd& _y, const Eigen::VectorXd& _a, const Eigen::VectorXd& _g, const Eigen::VectorXd& _r, const Eigen::VectorXd& _xv, const Eigen::VectorXi& _ia, const Eigen::VectorXi& _iy, const Eigen::VectorXi& _mm, double _alm0, double _almc, double _alpha, int _intr, int _maxit) : aint(_aint), nx(_nx), X(_X), cl(_cl), vp(_vp), w(_w), ju(_ju) , beta(_beta), y(_y), a(_a), g(_g), r(_r), xv(_xv) , ia(_ia), iy(_iy), mm(_mm) , alm0(_alm0) , almc(_almc) , alpha(_alpha) , intr(_intr) , maxit(_maxit) {} void run_wls() { int n = X.rows(); int p = X.cols(); wls(alm0, almc, alpha, m, n, p, X, r, xv, w, intr, ju, vp, cl, nx, thr, maxit, a, aint, g, ia, iy, iz, mm, nino, rsqc, nlp, jerr); } void run_wls_() { int n = X.rows(); int p = X.cols(); wls_(&alm0, &almc, &alpha, &m, &n, &p, const_cast(X.data()), r.data(), const_cast(w.data()), &intr, const_cast(ju.data()), const_cast(vp.data()), const_cast(cl.data()), &nx, &thr, &maxit, a.data(), &aint, g.data(), ia.data(), iy.data(), &iz, mm.data(), &nino, &rsqc, &nlp, &jerr); } }; struct wls_fixture : base_fixture, ::testing::WithParamInterface< std::tuple > { protected: void test_wls_pack(const WLSPack& actual, const WLSPack& expected) { expect_double_eq_vec(actual.r, expected.r); expect_double_eq_vec(actual.a, expected.a); EXPECT_DOUBLE_EQ(actual.aint, expected.aint); expect_double_eq_vec(actual.g, expected.g); expect_double_eq_vec(actual.ia, expected.ia); expect_double_eq_vec(actual.iy, expected.iy); EXPECT_EQ(actual.iz, expected.iz); expect_double_eq_vec(actual.mm, expected.mm); EXPECT_EQ(actual.nino, expected.nino); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); EXPECT_DOUBLE_EQ(actual.rsqc, expected.rsqc); } }; TEST_P(wls_fixture, wls_test) { double alm0 = 0.002; double almc = 0.001; double alpha = 0.9; int seed, n, p; bool intr, maxit; std::tie(seed, n, p, intr, maxit) = GetParam(); DataGen dgen(seed); auto X = dgen.make_X(n, p); auto cl = dgen.make_cl(p); auto vp = dgen.make_vp(p); auto weights = dgen.make_w(n); auto inclusion = dgen.make_ju(p); auto beta = dgen.make_beta(p); auto y = dgen.make_y(X, beta); double aint = 0; auto nx = dgen.make_nx(p); Eigen::VectorXd a(p); a.setZero(); Eigen::VectorXd g = X.transpose() * y; Eigen::VectorXd r(n); r.setZero(); Eigen::VectorXd xv(p); xv.setOnes(); Eigen::VectorXi ia(p); ia.setZero(); Eigen::VectorXi iy(p); iy.setZero(); Eigen::VectorXi mm(p); mm.setZero(); for (int i = 0; i < std::min(p, 10); ++i) { ia(i) = mm(i) = i+1; } WLSPack actual_pack( aint, nx, X, cl, vp, weights, inclusion, beta, y, a, g, r, xv, ia, iy, mm, alm0, almc, alpha, intr, maxit); WLSPack expected_pack(actual_pack); std::thread actual_thr([&]() { actual_pack.run_wls(); }); std::thread expected_thr([&]() { expected_pack.run_wls_(); }); set_affinity(0, actual_thr.native_handle()); set_affinity(1, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); test_wls_pack(actual_pack, expected_pack); } INSTANTIATE_TEST_SUITE_P( WLSSuite, wls_fixture, // combination of inputs: (seed, n, p, intr, maxit) testing::Combine( testing::Values(10, 23, 145, 241, 412, 23968, 31), testing::Values(10, 20, 30, 50), testing::Values(5, 10, 20, 30, 40), testing::Values(0, 1), testing::Values(1, 50, 100) ), // more informative test name [](const testing::TestParamInfo& info) { std::string name = std::string("seed_") + std::to_string(std::get<0>(info.param)) + '_' + "n_" + std::to_string(std::get<1>(info.param)) + '_' + "p_" + std::to_string(std::get<2>(info.param)) + '_' + "intr_" + std::to_string(std::get<3>(info.param)) + '_' + "maxit_" + std::to_string(std::get<4>(info.param)); return name; } ); } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_path/0000755000175000017500000000000014140040573017407 5ustar nileshnileshglmnet/src/glmnetpp/test/elnet_path/sp_gaussian_naive_unittest.cpp0000644000175000017500000000712314140040573025553 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include namespace glmnetpp { struct SpGaussianNaivePack : GaussianPack { Eigen::VectorXd y; const Eigen::VectorXd& w; const Eigen::VectorXd& xm; const Eigen::VectorXd& xs; const Eigen::SparseMatrix& X; SpGaussianNaivePack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _w, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _xm, const Eigen::VectorXd& _xs, Eigen::VectorXd& _xv, Eigen::VectorXd& _ulam, Eigen::VectorXd& _vp, Eigen::MatrixXd& _cl, Eigen::VectorXi& _ju) : GaussianPack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , y(_y) , w(_w) , xm(_xm) , xs(_xs) , X(_X) {} void fit() override { using internal_t = SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, double, int, int>; using elnet_point_t = SpElnetPoint< util::glm_type::gaussian, util::mode_type::naive, internal_t>; using elnet_path_t = SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, elnet_point_t>; elnet_path_t elnet_path; elnet_path.fit( alpha, ju, vp, cl, y, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() override { transl::spelnet2( alpha, ju, vp, cl, y, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } }; struct sp_gaussian_naive_fixture : sp_gaussian_fixture { protected: using base_t = sp_gaussian_fixture; void check_pack(const SpGaussianNaivePack& actual, const SpGaussianNaivePack& expected) { base_t::check_pack(actual, expected); expect_double_eq_vec(actual.y, expected.y); } }; TEST_P(sp_gaussian_naive_fixture, sp_gaussian_naive_test) { SpGaussianNaivePack actual( maxit, nx, ne, nlam, alpha, flmin, w, X, y, xm, xs, xv, ulam, vp, cl, ju); SpGaussianNaivePack expected(actual); run(actual, expected, 12, 13); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpGaussianNaiveSuite, sp_gaussian_naive_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin testing::Values(241, 412, 23968, 31, 87201, 746, 104), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5) ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_path/sp_gaussian_cov_unittest.cpp0000644000175000017500000000707614140040573025247 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include namespace glmnetpp { struct SpGaussianCovPack : GaussianPack { Eigen::VectorXd g; const Eigen::VectorXd& w; const Eigen::VectorXd& xm; const Eigen::VectorXd& xs; const Eigen::SparseMatrix& X; SpGaussianCovPack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _w, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _xm, const Eigen::VectorXd& _xs, Eigen::VectorXd& _xv, Eigen::VectorXd& _ulam, Eigen::VectorXd& _vp, Eigen::MatrixXd& _cl, Eigen::VectorXi& _ju) : GaussianPack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , g(_X.transpose() * _y) , w(_w) , xm(_xm) , xs(_xs) , X(_X) {} void fit() override { using internal_t = SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, double, int, int>; using elnet_point_t = SpElnetPoint< util::glm_type::gaussian, util::mode_type::cov, internal_t>; using elnet_path_t = SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, elnet_point_t>; elnet_path_t elnet_path; elnet_path.fit( alpha, ju, vp, cl, g, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() override { transl::spelnet1( alpha, ju, vp, cl, g, w, ne, nx, X, nlam, flmin, ulam, thr, maxit, xm, xs, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } }; struct sp_gaussian_cov_fixture : sp_gaussian_fixture { protected: using base_t = sp_gaussian_fixture; void check_pack(const SpGaussianCovPack& actual, const SpGaussianCovPack& expected) { base_t::check_pack(actual, expected); expect_double_eq_vec(actual.g, expected.g); } }; TEST_P(sp_gaussian_cov_fixture, sp_gaussian_cov_test) { SpGaussianCovPack actual( maxit, nx, ne, nlam, alpha, flmin, w, X, y, xm, xs, xv, ulam, vp, cl, ju); SpGaussianCovPack expected(actual); run(actual, expected, 6, 7); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpGaussianCovSuite, sp_gaussian_cov_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin testing::Values(241, 412, 23968, 31, 87201, 746, 104), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5) ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_path/gaussian_base.hpp0000644000175000017500000001147414140040573022733 0ustar nileshnilesh#pragma once #include #include #include #include #include namespace glmnetpp { struct GaussianPack { const double thr = 1e-14; const int maxit, nx, ne, nlam; const double alpha, flmin; // will be modified Eigen::MatrixXd ao; Eigen::VectorXi ia; Eigen::VectorXi kin; Eigen::VectorXd rsqo; Eigen::VectorXd almo; int nlp = 0, jerr = 0, lmu = 0; const Eigen::VectorXd& xv; const Eigen::VectorXd& ulam; const Eigen::VectorXd& vp; const Eigen::MatrixXd& cl; const Eigen::VectorXi& ju; GaussianPack(int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::VectorXd& _xv, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _ju) : maxit(_maxit), nx(_nx), ne(_ne), nlam(_nlam) , alpha(_alpha), flmin(_flmin) , ao(_nx, _nlam) , ia(_ju.size()) , kin(_nlam) , rsqo(_nlam) , almo(_nlam) , xv(_xv), ulam(_ulam), vp(_vp), cl(_cl), ju(_ju) { ao.setZero(); ia.setZero(); kin.setZero(); rsqo.setZero(); almo.setZero(); } virtual void fit() =0; virtual void fit_transl() =0; }; struct gaussian_fixture : base_fixture , testing::WithParamInterface< std::tuple > { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); ju = dgen.make_ju(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); xv.setOnes(p); ia.setZero(p); } protected: Eigen::MatrixXd X, cl; Eigen::VectorXd y, xv, ulam, vp; Eigen::VectorXi ju, ia; int nx, ne, maxit, nlam; double alpha, flmin; void run(GaussianPack& actual, GaussianPack& expected, int core1, int core2) const { std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_transl(); }); set_affinity(core1, actual_thr.native_handle()); set_affinity(core2, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); } void check_pack(const GaussianPack& actual, const GaussianPack& expected) { EXPECT_EQ(actual.lmu, expected.lmu); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); expect_eq_vec(actual.kin, expected.kin); // Legacy is 1-indexed, so ia should be shifted by 1. // Only applies up to the indicies corresponding to active variables. // The best I can think of testing this is that the absolute distance is off by at most 1. expect_near_vec(actual.ia, expected.ia, 1); expect_near_mat(actual.ao, expected.ao, 1e-15); expect_near_vec(actual.rsqo, expected.rsqo, 1e-15); // This check loosens expect_near_vec. // When almo is large (>= 1), check for exact equality. // Otherwise, if too small, put a tolerance of 1e-15. EXPECT_EQ(actual.almo.size(), expected.almo.size()); for (int i = 0; i < actual.almo.size(); ++i) { if (actual.almo[i] < 1) { EXPECT_NEAR(actual.almo[i], expected.almo[i], 1e-15); } else { EXPECT_DOUBLE_EQ(actual.almo[i], expected.almo[i]); } } } }; struct sp_gaussian_fixture : gaussian_fixture { void SetUp() override { size_t seed, n, p; std::tie(seed, n, p, maxit, nlam, alpha, flmin) = GetParam(); DataGen dgen(seed); // hehe X = dgen.make_X_sparse(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); ju = dgen.make_ju(p); vp = dgen.make_vp(p); cl = dgen.make_cl(p); nx = dgen.make_nx(p); ne = dgen.make_ne(p); ulam = dgen.make_ulam(nlam); xv.setOnes(p); ia.setZero(p); // compute xm and xs Eigen::MatrixXd X_dense = X; xm = X_dense.transpose() * w; xs = ((X_dense.array().square().matrix().transpose() * w).array() - xm.array().square()).sqrt().matrix(); } protected: Eigen::SparseMatrix X; Eigen::VectorXd xm, xs, w; }; } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_path/gaussian_cov_unittest.cpp0000644000175000017500000000643714140040573024545 0ustar nileshnilesh#include #include #include #include #include #include #include #include namespace glmnetpp { struct GaussianCovPack : GaussianPack { Eigen::VectorXd g; const Eigen::MatrixXd& X; GaussianCovPack(int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, Eigen::VectorXd& _xv, Eigen::VectorXd& _ulam, Eigen::VectorXd& _vp, Eigen::MatrixXd& _cl, Eigen::VectorXi& _ju) : GaussianPack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , g(_X.transpose() * _y) , X(_X) {} void fit() override { using internal_t = ElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, double, int, int>; using elnet_point_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, internal_t>; using elnet_path_t = ElnetPath< util::glm_type::gaussian, util::mode_type::cov, elnet_point_t>; elnet_path_t elnet_path; elnet_path.fit( alpha, ju, vp, cl, g, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() override { transl::elnet1( alpha, ju, vp, cl, g, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } }; struct gaussian_cov_fixture : gaussian_fixture { protected: using base_t = gaussian_fixture; void check_pack(const GaussianCovPack& actual, const GaussianCovPack& expected) { base_t::check_pack(actual, expected); expect_near_vec(actual.g, expected.g, 1e-15); expect_double_eq_mat(actual.X, expected.X); } }; TEST_P(gaussian_cov_fixture, gaussian_cov_test) { GaussianCovPack actual( maxit, nx, ne, nlam, alpha, flmin, X, y, xv, ulam, vp, cl, ju); GaussianCovPack expected(actual); run(actual, expected, 0, 1); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( GaussianCovSuite, gaussian_cov_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin testing::Values(241, 412, 23968, 31, 87201, 746, 104), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5) ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_path/gaussian_naive_unittest.cpp0000644000175000017500000000624114140040573025051 0ustar nileshnilesh#include #include #include #include #include #include #include #include namespace glmnetpp { struct GaussianNaivePack : GaussianPack { Eigen::VectorXd y; const Eigen::MatrixXd& X; GaussianNaivePack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, const Eigen::MatrixXd& _X, Eigen::VectorXd& _y, Eigen::VectorXd& _xv, Eigen::VectorXd& _ulam, Eigen::VectorXd& _vp, Eigen::MatrixXd& _cl, Eigen::VectorXi& _ju) : GaussianPack(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _xv, _ulam, _vp, _cl, _ju) , y(_y) , X(_X) {} void fit() override { using internal_t = ElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, double, int, int>; using elnet_point_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, internal_t>; using elnet_path_t = ElnetPath< util::glm_type::gaussian, util::mode_type::naive, elnet_point_t>; elnet_path_t elnet_path; elnet_path.fit( alpha, ju, vp, cl, y, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() override { transl::elnet2( alpha, ju, vp, cl, y, ne, nx, X, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr); } }; struct gaussian_naive_fixture : gaussian_fixture { protected: using base_t = gaussian_fixture; void check_pack(const GaussianNaivePack& actual, const GaussianNaivePack& expected) { base_t::check_pack(actual, expected); expect_double_eq_vec(actual.y, expected.y); expect_double_eq_mat(actual.X, expected.X); } }; TEST_P(gaussian_naive_fixture, gaussian_naive_test) { GaussianNaivePack actual( maxit, nx, ne, nlam, alpha, flmin, X, y, xv, ulam, vp, cl, ju); GaussianNaivePack expected(actual); run(actual, expected, 4, 5); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( GaussianNaiveSuite, gaussian_naive_fixture, testing::Combine( testing::Values(241, 412, 23968, 31, 87201, 746, 104), testing::Values(10, 30, 50), testing::Values(5, 20, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 4, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5) ) ); } // namespace glmnetpp glmnet/src/glmnetpp/test/standardize_unittest.cpp0000644000175000017500000002374514140040573022252 0ustar nileshnilesh#include #include #include #include #include namespace glmnetpp { struct Standardize1Pack { Eigen::MatrixXd X; Eigen::VectorXd y, w, xm, xs, xv; Eigen::VectorXi ju; bool isd, intr; double ym = 0, ys = 0; int jerr = 0; Standardize1Pack(const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXi& _ju, bool _isd, bool _intr) : X(_X), y(_y), w(_w), xm(_ju.size()) , xs(_ju.size()), xv(_ju.size()), ju(_ju), isd(_isd), intr(_intr) { xm.setZero(); xs.setZero(); xv.setZero(); } void standardize() { Standardize1::eval(X, y, w, isd, intr, ju, xm, xs, ym, ys, xv); } void standardize_legacy() { int i_isd = isd; int i_intr = intr; int no = X.rows(); int ni = X.cols(); standard1_(&no, &ni, X.data(), y.data(), w.data(), &i_isd, &i_intr, ju.data(), xm.data(), xs.data(), &ym, &ys, xv.data(), &jerr); } }; struct StandardizePack : Standardize1Pack { using base_t = Standardize1Pack; Eigen::VectorXd g; StandardizePack(const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXi& _ju, bool _isd, bool _intr) : base_t(_X, _y, _w, _ju, _isd, _intr) , g(_ju.size()) { g.setZero(); } void standardize() { Standardize::eval(X, y, w, isd, intr, ju, g, xm, xs, ym, ys, xv); } void standardize_legacy() { int i_isd = isd; int i_intr = intr; int no = X.rows(); int ni = X.cols(); standard_(&no, &ni, X.data(), y.data(), w.data(), &i_isd, &i_intr, ju.data(), g.data(), xm.data(), xs.data(), &ym, &ys, xv.data(), &jerr); } }; struct standardize_fixture : base_fixture, testing::WithParamInterface< std::tuple > { void SetUp() override { int seed, n, p; std::tie(seed, n, p, isd, intr) = GetParam(); DataGen dgen(seed); X = dgen.make_X(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); ju = dgen.make_ju(p); } protected: Eigen::MatrixXd X; Eigen::VectorXd y, w; Eigen::VectorXi ju; bool isd, intr; // double precision error: // absolute difference shouldn't be any more than this amount static constexpr double tol = 1e-14; void check_standardize1_pack(const Standardize1Pack& actual, const Standardize1Pack& expected) { expect_near_mat(actual.X, expected.X, tol); expect_near_vec(actual.y, expected.y, tol); expect_near_vec(actual.w, expected.w, tol); expect_near_vec(actual.xm, expected.xm, tol); expect_near_vec(actual.xs, expected.xs, tol); expect_near_vec(actual.xv, expected.xv, tol); EXPECT_NEAR(actual.ym, expected.ym, tol); EXPECT_NEAR(actual.ys, expected.ys, tol); } void check_standardize_pack(const StandardizePack& actual, const StandardizePack& expected) { check_standardize1_pack(actual, expected); expect_near_vec(actual.g, expected.g, tol); } }; TEST_P(standardize_fixture, standardize1_elnet) { Standardize1Pack actual( X, y, w, ju, isd, intr); Standardize1Pack expected(actual); actual.standardize(); expected.standardize_legacy(); check_standardize1_pack(actual, expected); } TEST_P(standardize_fixture, standardize_elnet) { StandardizePack actual( X, y, w, ju, isd, intr); StandardizePack expected(actual); actual.standardize(); expected.standardize_legacy(); check_standardize_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( StandardizeSuite, standardize_fixture, // combination of inputs: (seed, n, p, isd, intr) testing::Combine( testing::Values(10, 23, 145, 241, 412, 23968, 31), testing::Values(10, 20, 30, 50), testing::Values(10, 20, 30, 40), testing::Bool(), testing::Bool() ), // more informative test name [](const testing::TestParamInfo& info) { std::string name = std::string("seed_") + std::to_string(std::get<0>(info.param)) + '_' + "n_" + std::to_string(std::get<1>(info.param)) + '_' + "p_" + std::to_string(std::get<2>(info.param)) + '_' + "isd_" + std::to_string(std::get<3>(info.param)) + '_' + "intr_" + std::to_string(std::get<4>(info.param)); return name; } ); // ================================================================================= struct SpStandardize1Pack : Standardize1Pack { using base_t = Standardize1Pack; const Eigen::SparseMatrix X; SpStandardize1Pack( const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXi& _ju, bool _isd, bool _intr) : base_t({}, _y, _w, _ju, _isd, _intr) , X(_X) {} void standardize() { SpStandardize1::eval(X, y, w, isd, intr, ju, xm, xs, ym, ys, xv); } void standardize_legacy() { int i_isd = isd; int i_intr = intr; int no = X.rows(); int ni = X.cols(); auto x_inner = make_sp_inner_idx_1idx(X); auto x_outer = make_sp_outer_idx_1idx(X); spstandard1_( &no, &ni, const_cast(X.valuePtr()), x_outer.data(), x_inner.data(), y.data(), w.data(), ju.data(), &i_isd, &i_intr, xm.data(), xs.data(), &ym, &ys, xv.data(), &jerr); } }; struct SpStandardizePack : StandardizePack { using base_t = StandardizePack; const Eigen::SparseMatrix X; SpStandardizePack( const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXi& _ju, bool _isd, bool _intr) : base_t({}, _y, _w, _ju, _isd, _intr) , X(_X) {} void standardize() { SpStandardize::eval(X, y, w, isd, intr, ju, g, xm, xs, ym, ys, xv); } void standardize_legacy() { int i_isd = isd; int i_intr = intr; int no = X.rows(); int ni = X.cols(); auto x_inner = make_sp_inner_idx_1idx(X); auto x_outer = make_sp_outer_idx_1idx(X); spstandard_( &no, &ni, const_cast(X.valuePtr()), x_outer.data(), x_inner.data(), y.data(), w.data(), ju.data(), &i_isd, &i_intr, g.data(), xm.data(), xs.data(), &ym, &ys, xv.data(), &jerr); } }; struct sp_standardize_fixture : base_fixture, testing::WithParamInterface< std::tuple > { void SetUp() override { int seed, n, p; std::tie(seed, n, p, isd, intr) = GetParam(); DataGen dgen(seed); X = dgen.make_X_sparse(n, p); auto beta = dgen.make_beta(p); y = dgen.make_y(X, beta); w = dgen.make_w(n); ju = dgen.make_ju(p); } protected: Eigen::SparseMatrix X; Eigen::VectorXd y, w; Eigen::VectorXi ju; bool isd, intr; // double precision error: // absolute difference shouldn't be any more than this amount static constexpr double tol = 1e-14; void check_standardize1_pack(const Standardize1Pack& actual, const Standardize1Pack& expected) { expect_near_vec(actual.y, expected.y, tol); expect_near_vec(actual.w, expected.w, tol); expect_near_vec(actual.xm, expected.xm, tol); expect_near_vec(actual.xs, expected.xs, tol); expect_near_vec(actual.xv, expected.xv, tol); EXPECT_NEAR(actual.ym, expected.ym, tol); EXPECT_NEAR(actual.ys, expected.ys, tol); } void check_standardize_pack(const StandardizePack& actual, const StandardizePack& expected) { check_standardize1_pack(actual, expected); expect_near_vec(actual.g, expected.g, tol); } }; TEST_P(sp_standardize_fixture, sp_standardize1_elnet) { SpStandardize1Pack actual( X, y, w, ju, isd, intr); SpStandardize1Pack expected(actual); actual.standardize(); expected.standardize_legacy(); check_standardize1_pack(actual, expected); } TEST_P(sp_standardize_fixture, sp_standardize_elnet) { SpStandardizePack actual( X, y, w, ju, isd, intr); SpStandardizePack expected(actual); actual.standardize(); expected.standardize_legacy(); check_standardize_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpStandardizeSuite, sp_standardize_fixture, // combination of inputs: (seed, n, p, isd, intr) testing::Combine( testing::Values(10, 23, 145, 241, 412, 23968, 31), testing::Values(10, 20, 30, 50), testing::Values(10, 20, 30, 40), testing::Bool(), testing::Bool() ), // more informative test name [](const testing::TestParamInfo& info) { std::string name = std::string("seed_") + std::to_string(std::get<0>(info.param)) + '_' + "n_" + std::to_string(std::get<1>(info.param)) + '_' + "p_" + std::to_string(std::get<2>(info.param)) + '_' + "isd_" + std::to_string(std::get<3>(info.param)) + '_' + "intr_" + std::to_string(std::get<4>(info.param)); return name; } ); } // namespace glmnetpp glmnet/src/glmnetpp/test/chkvars_unittest.cpp0000644000175000017500000001170114140040573021370 0ustar nileshnilesh#include #include #include #include #include #include #include namespace glmnetpp { struct ChkvarsPack { const Eigen::MatrixXd& X; Eigen::VectorXi ju; ChkvarsPack(const Eigen::MatrixXd& _X) : X(_X), ju(X.cols()) { ju.setZero(); } void chkvars() { Chkvars::eval(X, ju); } void chkvars_legacy() { int no = X.rows(); int ni = X.cols(); chkvars_(&no, &ni, const_cast(X.data()), ju.data()); } }; struct chkvars_fixture : base_fixture, testing::WithParamInterface< std::tuple > { protected: auto gen_data(size_t seed, size_t n, size_t p, bool force_same_value) { std::mt19937 gen; gen.seed(seed); std::normal_distribution norm(0.,1.); Eigen::MatrixXd X; X = X.NullaryExpr(n, p, [&](auto, auto) { return norm(gen); }); if (force_same_value) { std::bernoulli_distribution bern_half(0.5); for (size_t j = 0; j < p; ++j) { if (bern_half(gen)) { X.col(j).array() = X(0, j); } } } return X; } void check_pack(const ChkvarsPack& actual, const ChkvarsPack& expected) { expect_eq_vec(actual.ju, expected.ju); } }; TEST_P(chkvars_fixture, chkvars_elnet) { int seed, n, p; bool force_same_value; std::tie(seed, n, p, force_same_value) = GetParam(); auto X = gen_data(seed, n, p, force_same_value); ChkvarsPack actual(X), expected(X); actual.chkvars(); expected.chkvars_legacy(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( ChkvarsSuite, chkvars_fixture, testing::Combine( testing::Values(10, 23, 145, 241, 412, 23968, 31), testing::Values(10, 20, 30, 50), testing::Values(10, 20, 30, 40), testing::Bool() ), [](const testing::TestParamInfo& info) { std::string name = std::string("seed_") + std::to_string(std::get<0>(info.param)) + '_' + "n_" + std::to_string(std::get<1>(info.param)) + '_' + "p_" + std::to_string(std::get<2>(info.param)) + '_' + "force_" + std::to_string(std::get<3>(info.param)); return name; } ); // ======================================================== struct SpChkvarsPack { const Eigen::SparseMatrix& X; Eigen::VectorXi ju; SpChkvarsPack(const Eigen::SparseMatrix& _X) : X(_X), ju(X.cols()) { ju.setZero(); } void chkvars() { SpChkvars::eval(X, ju); } void chkvars_legacy() { int no = X.rows(); int ni = X.cols(); spchkvars_(&no, &ni, const_cast(X.valuePtr()), const_cast(X.outerIndexPtr()), ju.data()); } }; struct sp_chkvars_fixture : base_fixture, testing::WithParamInterface< std::tuple > { protected: auto gen_data(size_t seed, size_t n, size_t p, bool force_same_value) { DataGen dgen(seed); auto X = dgen.make_X_sparse(n, p); if (force_same_value) { std::mt19937 gen(seed); std::bernoulli_distribution bern_half(0.5); for (int j = 0; j < X.cols(); ++j) { if (bern_half(gen)) { auto t = X.coeff(0,j); for (int i = 0; i < X.rows(); ++i) { X.coeffRef(i, j) = t; } } } } X.makeCompressed(); return X; } void check_pack(const SpChkvarsPack& actual, const SpChkvarsPack& expected) { expect_eq_vec(actual.ju, expected.ju); } }; TEST_P(sp_chkvars_fixture, sp_chkvars_elnet) { int seed, n, p; bool force_same_value; std::tie(seed, n, p, force_same_value) = GetParam(); auto X = gen_data(seed, n, p, force_same_value); SpChkvarsPack actual(X), expected(X); actual.chkvars(); expected.chkvars_legacy(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpChkvarsSuite, sp_chkvars_fixture, testing::Combine( testing::Values(10, 23, 145, 241, 412, 23968, 31), testing::Values(10, 20, 30, 50), testing::Values(10, 20, 30, 40), testing::Bool() ), [](const testing::TestParamInfo& info) { std::string name = std::string("seed_") + std::to_string(std::get<0>(info.param)) + '_' + "n_" + std::to_string(std::get<1>(info.param)) + '_' + "p_" + std::to_string(std::get<2>(info.param)) + '_' + "force_" + std::to_string(std::get<3>(info.param)); return name; } ); } // namespace glmnetpp glmnet/src/glmnetpp/test/util/0000755000175000017500000000000014140040573016241 5ustar nileshnileshglmnet/src/glmnetpp/test/util/type_traits_unittest.cpp0000644000175000017500000000335614140040573023262 0ustar nileshnilesh#include "gtest/gtest.h" #include #include #include namespace glmnetpp { namespace util { // Pair is expected to be a tuple // where T is the type to test and bool is true if T is expected to be dense. template struct is_dense_fixture : ::testing::Test {}; using list_t = ::testing::Types< std::tuple, std::tuple, std::tuple, std::tuple, std::false_type>, std::tuple>, std::false_type>, std::tuple>, std::false_type>, std::tuple, std::false_type>, std::tuple, std::tuple, std::tuple, std::tuple, std::tuple, std::tuple, std::tuple, std::tuple >; TYPED_TEST_SUITE(is_dense_fixture, list_t,); template struct CheckDense { static_assert(is_dense::value, "Dense check failed."); }; template struct CheckDense { static_assert(!is_dense::value, "Sparse check failed."); }; TYPED_TEST(is_dense_fixture, test_double) { using T = std::tuple_element_t<0, TypeParam>; using is_dense_type = std::tuple_element_t<1, TypeParam>; CheckDense::value, T> dummy; static_cast(dummy); } } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/test/elnet_driver/0000755000175000017500000000000014140040573017746 5ustar nileshnileshglmnet/src/glmnetpp/test/elnet_driver/gaussian_unittest.cpp0000644000175000017500000002577614140040573024244 0ustar nileshnilesh#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace glmnetpp { struct GaussianDriverPackBase { const double thr = 1e-14; const int maxit, nx, ne, nlam; const double alpha, flmin; const bool isd, intr, ka; Eigen::VectorXd y; Eigen::VectorXd w; Eigen::MatrixXd cl; Eigen::MatrixXd ca; Eigen::VectorXd a0; Eigen::VectorXi ia; Eigen::VectorXi nin; Eigen::VectorXd rsq; Eigen::VectorXd alm; int nlp = 0, jerr = 0, lmu = 0; const Eigen::VectorXd& ulam; const Eigen::VectorXd& vp; const Eigen::VectorXi& jd; GaussianDriverPackBase( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, bool _isd, bool _intr, bool _ka, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _jd) : maxit(_maxit), nx(_nx), ne(_ne), nlam(_nlam), alpha(_alpha) , flmin(_flmin), isd(_isd), intr(_intr), ka(_ka) , y(_y) , w(_w) , cl(_cl) , ca(_nx, _nlam) , a0(nlam) , ia(_nx) , nin(_nlam) , rsq(_nlam) , alm(_nlam) , ulam(_ulam) , vp(_vp) , jd(_jd) { ca.setZero(); a0.setZero(); ia.setZero(); nin.setZero(); rsq.setZero(); alm.setZero(); } }; struct GaussianDriverPack : GaussianDriverPackBase { using base_t = GaussianDriverPackBase; Eigen::MatrixXd X; GaussianDriverPack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, bool _isd, bool _intr, bool _ka, const Eigen::MatrixXd& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _jd) : base_t(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _isd, _intr, _ka, _y, _w, _ulam, _vp, _cl, _jd) , X(_X) {} void fit() { using elnet_driver_t = ElnetDriver< util::glm_type::gaussian>; elnet_driver_t elnet_driver; elnet_driver.fit( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() { transl::elnet( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr); } }; struct gaussian_driver_fixture_base : base_fixture , testing::WithParamInterface< std::tuple > { protected: static constexpr double tol = 1e-15; void check_pack(const GaussianDriverPackBase& actual, const GaussianDriverPackBase& expected) { expect_double_eq_vec(actual.w, expected.w); expect_double_eq_mat(actual.cl, expected.cl); expect_near_vec(actual.a0, expected.a0, tol); expect_near_mat(actual.ca, expected.ca, tol); expect_near_vec(actual.ia, expected.ia, 1); expect_eq_vec(actual.nin, expected.nin); expect_near_vec(actual.rsq, expected.rsq, tol); expect_double_eq_vec(actual.alm, expected.alm); EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); EXPECT_EQ(actual.lmu, expected.lmu); expect_near_vec(actual.y, expected.y, tol); } }; struct gaussian_driver_fixture : gaussian_driver_fixture_base { protected: using base_t = gaussian_driver_fixture_base; using base_t::tol; void check_pack(const GaussianDriverPack& actual, const GaussianDriverPack& expected) { base_t::check_pack(actual, expected); expect_double_eq_mat(actual.X, expected.X); } }; TEST_P(gaussian_driver_fixture, gaussian_driver_test) { int seed, n, p, maxit, nlam; double alpha, flmin; bool isd, intr, ka; std::tie(seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka) = GetParam(); DataGen dgen(seed); auto X = dgen.make_X(n, p); auto beta = dgen.make_beta(p); auto y = dgen.make_y(X, beta); auto w = dgen.make_w(n); auto cl = dgen.make_cl(p); auto vp = dgen.make_vp(p); auto nx = dgen.make_nx(p); int ne = dgen.make_ne(p); auto ulam = dgen.make_ulam(nlam); auto jd = dgen.make_jd(p); GaussianDriverPack actual( maxit, nx, ne, nlam, alpha, flmin, isd, intr, ka, X, y, w, ulam, vp, cl, jd); GaussianDriverPack expected(actual); std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_transl(); }); set_affinity(14, actual_thr.native_handle()); set_affinity(15, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( GaussianDriverSuite, gaussian_driver_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka testing::Values(241, 412, 23968), testing::Values(10, 30, 50), testing::Values(5, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5), testing::Bool(), testing::Bool(), testing::Bool() ) ); // ======================================================= struct SpGaussianDriverPack : GaussianDriverPackBase { using base_t = GaussianDriverPackBase; Eigen::SparseMatrix X; SpGaussianDriverPack( int _maxit, int _nx, int _ne, int _nlam, double _alpha, double _flmin, bool _isd, bool _intr, bool _ka, const Eigen::SparseMatrix& _X, const Eigen::VectorXd& _y, const Eigen::VectorXd& _w, const Eigen::VectorXd& _ulam, const Eigen::VectorXd& _vp, const Eigen::MatrixXd& _cl, const Eigen::VectorXi& _jd) : base_t(_maxit, _nx, _ne, _nlam, _alpha, _flmin, _isd, _intr, _ka, _y, _w, _ulam, _vp, _cl, _jd) , X(_X) {} void fit() { using elnet_driver_t = ElnetDriver< util::glm_type::gaussian>; elnet_driver_t elnet_driver; elnet_driver.fit( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr, mock_setpb, InternalParams()); } void fit_transl() { transl::spelnet( ka, alpha, X, y, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, lmu, a0, ca, ia, nin, rsq, alm, nlp, jerr); } }; struct sp_gaussian_driver_fixture : gaussian_driver_fixture_base { protected: using base_t = gaussian_driver_fixture_base; using base_t::tol; void check_pack(const SpGaussianDriverPack& actual, const SpGaussianDriverPack& expected) { Eigen::MatrixXd actual_X_dense = actual.X; Eigen::MatrixXd expected_X_dense = expected.X; expect_double_eq_mat(actual_X_dense, expected_X_dense); expect_double_eq_vec(actual.w, expected.w); expect_double_eq_mat(actual.cl, expected.cl); expect_near_vec(actual.a0, expected.a0, tol); expect_near_mat(actual.ca, expected.ca, 1e-14); expect_near_vec(actual.ia, expected.ia, 1); expect_eq_vec(actual.nin, expected.nin); expect_near_vec(actual.rsq, expected.rsq, 1e-14); // This check loosens expect_near_vec. // When alm is large (>= 1), check for exact equality. // Otherwise, if too small, put a tolerance of 1e-15. EXPECT_EQ(actual.alm.size(), expected.alm.size()); for (int i = 0; i < actual.alm.size(); ++i) { if (actual.alm[i] < 1) { EXPECT_NEAR(actual.alm[i], expected.alm[i], 1e-15); } else { EXPECT_DOUBLE_EQ(actual.alm[i], expected.alm[i]); } } EXPECT_EQ(actual.nlp, expected.nlp); EXPECT_EQ(actual.jerr, expected.jerr); EXPECT_EQ(actual.lmu, expected.lmu); expect_near_vec(actual.y, expected.y, 1e-14); } }; TEST_P(sp_gaussian_driver_fixture, sp_gaussian_driver_test) { int seed, n, p, maxit, nlam; double alpha, flmin; bool isd, intr, ka; std::tie(seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka) = GetParam(); DataGen dgen(seed); auto X = dgen.make_X_sparse(n, p); auto beta = dgen.make_beta(p); auto y = dgen.make_y(X, beta); auto w = dgen.make_w(n); auto jd = dgen.make_jd(p); auto vp = dgen.make_vp(p); auto cl = dgen.make_cl(p); auto nx = dgen.make_nx(p); int ne = dgen.make_ne(p); auto ulam = dgen.make_ulam(nlam); SpGaussianDriverPack actual( maxit, nx, ne, nlam, alpha, flmin, isd, intr, ka, X, y, w, ulam, vp, cl, jd); SpGaussianDriverPack expected(actual); std::thread actual_thr([&]() { actual.fit(); }); std::thread expected_thr([&]() { expected.fit_transl(); }); set_affinity(14, actual_thr.native_handle()); set_affinity(15, expected_thr.native_handle()); actual_thr.join(); expected_thr.join(); check_pack(actual, expected); } INSTANTIATE_TEST_SUITE_P( SpGaussianDriverSuite, sp_gaussian_driver_fixture, testing::Combine( // seed, n, p, maxit, nlam, alpha, flmin, isd, intr, ka testing::Values(241, 412, 23968), testing::Values(10, 30, 50), testing::Values(5, 40, 60), testing::Values(1, 50, 100), testing::Values(1, 10), testing::Values(0.0, 0.5, 1.0), testing::Values(0.5, 1.0, 1.5), testing::Bool(), testing::Bool(), testing::Bool() ) ); } // namespace glmnetpp glmnet/src/glmnetpp/docs/0000755000175000017500000000000014140040573015235 5ustar nileshnileshglmnet/src/glmnetpp/docs/figs/0000755000175000017500000000000014140040573016165 5ustar nileshnileshglmnet/src/glmnetpp/docs/figs/gaussian_benchmark_fig.png0000644000175000017500000012151514140040573023351 0ustar nileshnileshPNG  IHDRXr9tEXtSoftwareMatplotlib version3.4.3, https://matplotlib.org/̞D pHYsaa?iIDATxw|TṲLB:%B*QPQ*ֲ ߟ+ qq_Y)t&JB 2dH!e$1f=s&9\1SV_7 ,5Xk$H`_#F ~ ,5Xk$H`_#U4gY,]M*Ĕ'v]*ذaٳWgoV.]dX4lذ S6mAi]r.B*..N}zcƌꫯJzz74n8曲l+?x۰a.R7wܡYf믯q ukz饗#_.2y5r4j(͝;Ww^yiذaڵkh@sرC/:^z9@d4)g6L߾}Mpp9tC=zx\tcѣ:thV^m$W_}ս-77t$%%y+L֭Mff{ۻk$EըΪ*y֬YS{1!!!fm/6̙3}زό$3|4eƎÖ p8Ǎ1̟?H2K,mX@Ot鷓 Zg?f]wf5qDZJ$9-^X7|"""eǏ0vu 8Pڱc$髯ҨQԦMSNzt:rvڹ >\]vxc׮]k'ݮxxtX,'[n0`/_^>I,66V\s{[˖-u 7諯R~~WhssJ8'<<\111^ h|ѡC?^qiӦ²*,,\Z|Ν[w}y $+:uYϕ}I:{|9b۶mڻwϟ뮻νg)Sv˖-Zv  Iխ[7=3/$I?.*{޽j߾o_|q ,@UqG h:v[nEf?^n IN8q*v2"vݽ-_:k7n$ij۶Aɓ'.H3gԯ>}H*p|;{PR:]7-ҕW^ &%%:Ij׮FfS>}|8f8fs&P{$&G}ӧWdRFyyylu-ܢm߾d֭zꩧpx+=cǎرc}tСz!OtEnfaԥK2wU999:zÇW %khs8ǓG h:v쨛odyӫt␐2@UnZ*dy7%ߜl?loFZgM\s5ojڴi2224tPEDDSNd/rRVVYgԲeKwJ̾T[nV}Wohڴi駟_ ^-[fTܷ%&#Ω\MG z)}z5}Ւ%Kp8<#]z{$SZvnwmذc[U묉_;w38pݏN;wԟjp(44KVUV_~Q%IW_}/ qu?_|^dرc?} qNx?@2{l#ɬYc]f3L=re˖iv1bDWg}H2K,?wwyX,棏>2s1m믿nx ӯ_?ӧO**MMN[oe^z%mze<&&&J/M۶mɓ;cz-s@j*w9IgϞE3/ILL4vlܸ***26aaafԩѣ 7k4~99g8$s8;X@SQ`g1&Lj`W0xg=Gyř`s盅 [+̐!Cn7-[4&M2Lbo^i{N;v 0 ,0?ӦM?,Zk1lٲŌ1„(3n8Z\-+~3~ԩ&&&\r%?G9IfҤI?6]tq1Ƥ'͛P3trV(qqΙ)Ωq%@X)5*11QuSj۶mѣk5Y,M4IoZqN54iC7n|xݒ%K䕠4<9F`o&@cE?ً,5F`_.\.>pY,_74a""oM#1_#X4NSn$f4 8cX4b*##MA)QQQB458.cX4b%]VJnj1QZZ$u>n 10wWXV edZN;k޼SBBB$Iiiijժ &bT1CZqJn(Y"44-J kvP}8.cX~`LwaKC5!6p?aWK`O>^^,U_742$RJ.]c2z-:w_mE P ɓ'M=@SE}2|x a=@'r1wj堶8d*ȥJV?pNZt.8qBQQQ_|}0Zh1#|lwZr jdӵ%5ur ڷ;$IU_GV4od$K otrLL|"Ʃ:X>V2}gH RCI`|8 C1}eZzTthذa4ydhB#GԖ-[tW(,,L[tXş};<+..N7t$I%\"Ibѭ>w'xB *Sw>}sϹ_{:sdս{w;^z'@cBSs$|lé&Di`X-:9mY94sIcw=&Iݷ2-pX?۠BX71)(GMn(((H?Owׯ֮] ȑ#*3<_|Q۷oK/~Z|A jW1NMba ,t*'!J͂Կ]~ޗvMqx۝)p_(wk}Wj-բ7i~ _ [Թ,n{nBvtE$^P~K/JHHΝ;յk2~;v[o?_YYY SLL$UVC֣GGsO?-84h:w,Izgk$uA۶m̙35a„jԌbq1N͐QB~M9)8%IC:7$nOqHu,<@A6nMս"g}7Ѐ 0|ƍZd܏ݻK7[NW]uڵkp :T\v7NsΕT~qIgM8ѣm/BM1N0ˇve",Xm"풤 ;.}L.;c7e4Wk H ?ۏi /$ЦmύyYfYYYꪫ/)׺u2۲5rH9R|Zld9ڋ;V=~3f]YGfAS3$|d>QL},Ȧ9ږP϶>laYKwՊ]d˻Ю-kkdQZͺ</X,jO?Oo^goǏkJHH$]֣LPP$|p||O>DԪU+IRllڴi~%85)>Tz6w<5FXtm$MvCt[hmdӊ]t5)(˦I&)==]cǎ՚5kg-ZHv[Yv/z=$&&b믿ѣG4gܸq7oϟ_&:uMzK;w͛5{l<hqm<)3%'|$?K`}ve)*4P]ҥZ\>P͂lZqeTM6m?tjĈի&O(YeÙ-[jΜ9?=\M>]Gmjԩz_wu:~rrrtW{;{iի9sC^;hqbjr_Fp(22Rsfs%ILQA};ROj[.{UO;WK^[cYzsu5%&g/Ҁh͹|Z$i޽СvR*6ގK|PqW]8MF_oꑼajB~ٟ֕e{t,@Z4ӸA5h}r En [ŖƆ,;>>Ţ ;:X3rފǯ讠;+*4Pdh{?)#zwKM_'O.Ţɓ'WZn޽vzo~X UŸzu4CF:{ԧwVL m9wW+=$(oXk̙֬3ջwJ˭\Rcǎĉ~z]}պꫵe˖zjic )I`m:)vf$IO:Gku:BaڞY?XV_&4n8o/\>9=߿~zjmdXVlVz)?&*D[6H?v[X]~m˝X[]c5jGNY?)͑/X&MҨQ4|]jUr#GԪU*<&??_QJֿ.{`wS(>F#yoztd:;OVa쿒:ҮiYqOJ$r|. 1kPg5мy/h͚5U*XubccZ1ӦMԩSkp0CR vA>YȥUtE&*NסE3}vWƾ~;1Viն YPPV>-[*((ȫSQ}ѣZ :A8F#ցjŲuv)S衇rv8JHHiӁ+ZDRZ=G֑d~=aAgXz9gk?iJ9X 1r~hlV:t蠔>|A)j׮Vu@]#_uUkݺuJKKSۜN/_~[<ȑ#ێ9 `6.͇*a@qq]7 Z(evItYWߏI|t_I;'+Ib5X͛[1 RvTTT$@fS@@@}S8B:֥^͛7{lԽ{w=ceWN'Ovo[x꺹5,e)4Ȧ.ZO%[%S.4ƶud>S fk̟4A2bQ``}!iZjpѬY35o\={$?^SLqj…z믿vZw}F6ZgH٬gJ,c2e$Is'Fo~Db#$uiTGIN-*UJIIq2dΝYfO>ϵ`wl<{׿*?1ZV=iYuزb//UN.BúUex5Xudnv)Yti%__? MU {M;hŮca1u=ÚZ?]ڜ"Ezs.Â靃uWkanJ1X綉u@=ip#B8$I}⣪|X]4KR-/_vI tNkIE7 ;O|Nj?iD hH`գ):7 R|tHTUtI۾ޔ ]~[ɽhH`գ.}5= TV~6ZC˛ zyᯒZEؽ~o:W[u},PH`գ%Vtj.Iaq냕tDb#u^75綁1FYE^X4f$U\4:X{Vzv^[Ȉn u,8@o ;PvSY+Ad귣ْ{u'Pv~vd^mk{޺d{Ю-[mhŮn$I]ń*YPo:~)s{fɒF#r{M3oKR~K?X%;|,e$IUbGa;}\Fvo!nh6͸yF"u϶#n"Xdé;W%JL?}k՞#Y-r9ϗի .:-ܒf/!UO6b%w"5e׸oInN[ո.h}ڨe4iz}鰯V=+tN-ԹUzq=-‚uNW9^z\H[9ûԸ`52zZj) h 61W꺰ssmOq]>m}|nS.!Ijv բWEX{Pp<1:XƘjފߔSۨvA{/?جMnNH~Q~Nu@ j`vQ͢C.w,]gI;KwUH`5@}#Bm;8ke;jΣ Y롅b]%I,ܡ7ǭUA Y5csIg_2z_%I+y:o?{px=:$SFwsVuA֏gI`c8rR!)tMݤK:+ϑ$zy!I, d!5ҕW,LV~^v$K(*4\IߖыN ?E*Lƒ_/O[f=:CuznzꞒ~ث$~VeXܣ[+%3WM.-5^X9+[@ р]p*U:X.ڡBqݴƁu}dHN֔/6jJXe*3н}ˡL}!IғΑbIn+El=F9IbH`5`qvujLH~+e &Iݷ$D ꭱdZ/?6uhH`5pgitXnlZmכ)jWyTH ֌3ԻwoEDD(""BIII]a9sbx]֭ڵkNG֭[+<&""B)))ž5SsY-oDzڷ;۱l5o{uu΍լ[SPUѽS~IUW]+R]tQ׮]/*,,L?SX,Źbߊ׹7I˺*V5toƟ=Mw}Ny$o~*tj޼yVRRR岲֒|9GCvA[i >lMrqז} iΣヵ- _-4yf)88Xw}K{֭}}Wr4dZo_7 8EFF8.ilqhjeLÇWN4s*O?Jk&p C}N(͹}"Xo EގKs?mL 暹\.o+t:yfnݺ[oB1X!%9CZ9n%L˗k߾}ڼyLKjܸqkʔ)=[o_t7k;|4r#靃35?Dv@w iJIIQddzE.$%''j=w;qN*::Z ʕ+^PS綉м4CLmB E7*$P@EaZeTEX6jjwI{o86ŵ"E'=T˝sTXp8]FEN_RAǿS2#:kk`ƨ1ߍέµaڞrR9)ԉSfDN2Ko)PvSꭟ`x$N:$V诐 [P3NH<+7R~SN;tqs3QE}wIu@Y$Z Ѐ*/r*3Н:SNr)TfnNd'J?cY:U5Vd(/XHN2y2ANgIQI"#Txf*t:UVd)(lZd;:x_n.@C g6 UZ8Gu:=T,=e_G~:6 Gv?l,2$P6_@E1UL N'ΒLptRc ._ ,~*AdJJ XO%JJReNow'OW6e8n7& lR111ެhBl QȐ*cQvS'Ϝ:Sӯ3s 2RvS:[FɭȒ$W ܗN)sƜytQFNG+LrG$9E'<J6dgq\PUV RN`>^#F]wݥ?P Ea PB5.ɼSK[FyJ8*Yd^$ɑW$G^ӫ~NE* 9jVvQ 5 B9(r6:3+/UPj .5rGg*3Qs\_Xo^|EuIW]ux iF!!!JOOז-[b 1B _u[mPGV"CjV .9r O:}$Wg*)ҳ ]mwr,QRwy61UetFR̵$<.tzG%JUmDRP#yTMXk֬գGr8P~iZb , Y<,XÂu\^ӝ:3UfReJ>z2_GOVo}/{EJ'X>*4P5\'ikA-PyǕ]죚jxõ:S0vyIʦM y,x=3G$٬$b}^"##_7@c[[(g-29a)ѡAY-ExtRmYl2$I(I XtF2*٬yTq qI.Cus9NX, PhPDUoaE2Kx.x+1RV~tD/-f)QЪQyǝ9k nA_|>Ӿ}dѼytzTPcE@EZ.#Gn{ZcFNNdH&嬇`TX˗/דO>)I/eQFF> $4h6E͂,H=jq9233#IZpZjԨQڵkN&k ZJZpF!I:qvN&kS'Oq),,L6l⩅zix-ujРAJNNe]&xpWǎ /x4hbhFSXӧOWnnnʮ^Z׿js:4AJ`m۶Mڵӽޫ:z{_QQ6mڤwyGC ј1c^i?P7nonIC6Mɑ$OwqnVFjc7*r\ڴi߯\hB}U-Q}r8Tff"""|Єy;.!1qZ۷*ڭ5Xk$H`y={n-ZH$/MXǏյkW]yJII$M8Q?N&k ?O PrrBCCnj V3fwފPDDcϟݻnW^oj%[ˊޥK߿kZn֮]w=znZn+Wjر8q֯_ZW_}lR?x-1Dzz\UW]+R]tQ׮]/*,,L?S|M]~zGu9Wo׸/^K`]tEݯ-\.^y]r%5tj޼yVRRReVZÇ{l9rVZUar8ƀ84FުW^ѥ^kת@uVV]7oVRR/R{neSSS-66V?m4M:Zmhs@cX={Ν;uj5\׫SNժ[nڰaV^{G&LжmۼTM2EǁV7/k#$)22RO>d RΝ%I К5koj̙eȑ#ێ9 ֺ\ qhӦM&Cu\/w_RR;M<ٽmk j:vX}ENJL2EW\qڵk'OjܹZt-Z$I?~ڶmiӦI|A :TFyiڵ5kXzry<4?^ݺuӥ^5khѢE$IJIIq2dΝYfO>ϵ`[]Y1EDDhvp8LEDD9 v\BAmctRoUHXozXBzR``x[@֧~oVv]K.bqX,$P#^K`=䓚:uqY^&k3k٦ &>Vu$/N!t:zWh"ݻ"oN&k ͛7_~-[x+;P^K`-Y[UnVX\s̙]s5/js*4QJ`EFF׷Jj={{9=#={z S*++mʨuv]KVk`ڵYX8$N]P'ժU+oTxX]j=ry@j= K$H`_ִit+<<\ZW_;vTz̜9sdX<vZ w e˖iҤI駟xbjĈήﯧ.gZp9sUVZn. X,3effJbbb*-D%$$hںuk}4uF`r4yd]pٳgu_{Vff^{5 2D[nU|||wv8u~F#5i$mٲEͫ\RRƏ}jС/ԲeK͜9ӦMSddPw91cQO_}/_:T믿^O+Ʉeff*""V á%96)/t%N6oެ+ mSqh.5i$͝;W_}Õ*ITHH$ij۶M&Iz4x`uYzW~q>X3f̐$ 6cٳu뭷Je^ĉ;h 0@+WԹ[_@5Smaxj]XM\AM߭rRy:S89JNс9J..W?:h1:SRsuDqPF /zjqhr t =SRIgX-jP%Ą(!&P%D(YP=H`BK3rT'NOKN.8kIP)~ajx ,z`ѓ+WaPBTI*!:T!3dCuTRt*GO*,w *>DUO XUWԡ(&9*=jZGAU2/&8Qa6i~u)NQ#8)U:Au*Iuđ:7 :5/TDUڬГƅh21:SWSSgYBT͂lJ =5/Tun6QШHO>#Iu =GJZPNR,(i~hP .u/~:Q9:]p:b#KAU<ݯy\]6֡+$_1|SdU,?Ez.^*A6*D@[t^A ԻBH95/T*GO*UV4vI? >^WS_鎼JZ֑!e֟*y",XV5$@9]F8Zfӣ"H`CyN:STѡjeWZ=m~r\:T@'y?I~KHIn !fzg\zcN}jv_WL2Odhe?3ܙb WHoc$UL F_M7@$E\E6ho[(j/U'2LJo(o.ut$X=Ž*9=Gr[6s'VVa1e [\tt~}LA6R( HVhE3<$P'飉udb) bQ8_#F ~ ,5Xk$H`_#F ~ ,5Xk$H`#Ir8>n hJ⑒s?mCKɓ'%I >n @'O*22+H9?4Ʊo}׀\.>|XX,5p(!!APDD[7n 7n 'OM6ZkqN5~7>K~7 ~ZmcF`IZJ]~QwBݴoW%sj))YM nZM"k$H`yIpp}Y)~呂~呂~7~WWS}bb%MM:q_c ,5Xk$H`y_Wo^v] ?&شit+<<\ZW_;vx6l,(QF)44TZң>Jw>uݽ?//O&MRkVG񨣡Yڷo_E&MxuUWM6X,Z`~cynZ!!!>|vQ&==]ƍSDD4qDeeeyٴi."v%$$W^U~S^ԬY3iFǏÇ=(gdeR%[o-ӧ/ܣLcޒ]X,zWeg~/]TWpp:w9su1N+vH9eڼyLPPy֭[͝wȋ#G|ݴ9r={ٲeٰa+MvLVVСC͝wiRRR܏L"ӳgO3|p~z7ߘ-Z)SKUϚ=zxѣw}IHH0}Yv3믿UVxԑh{9Z1f„ /SzzGv1MII1X,fϞ=2 zW37jz!m6l6pzo}#i{خ1318L$`fҤINӴiL6͇򞴴4#,[̽mСoXV6c a벹5Ϛ>}/##绷m߾H2V24>4:u2.8z.řW_}ս-##O?c̶mی$fwbCcywLttG{1ӭ[:QՔaH2woKLL4?S1 &L0GrGm~ylk,olzq1cƘ#Gu|GS11NSލ118)TPPuimVUÇתU|2̔$xlOԢE SSLQNN{ߪUԫW/ƺ9RC[n]ԦMuQƍSrr$iݺu*,,ݻwWv׹}ǺeX.m޽JMM4hy.3|pYV^]/VPPȑ#c8qzS;X,>}t5o\ӫ1両{ҥjժu{GǏwk ȑ#׿'אYj*:J44=bb8Qg^Eqg# ݱct:=~$)66VZ=.K'O\={tMJLLT6mi&=cڱc IRjjjI>4h ͙3GݺuSJJN.H[lQjj|ƺ| (##Cz{[cg*igy(}}[j? @111e:tP}u~oc=c*""½P+Wjʔ)JIIo!a/5\:hϞ=z'tWhժUlMz 5\㱽!_>2C .1N#!!!!iכ8bҤIڲe~wuy^Ժuk]zڳg:uT+w4ho~aw]qjӦ{[c(P7p11cǾzw iڴi zō7~ޫW/[:uҥKu饗e׸qd=7]gPbbxQ11N~s ahB6*GQ\\Zwk-YD4h$iݒrߓ} ATTvݻw+..N(S:7>߿_twTZ1^vV{4EEEJOOo?%xbo&3h i߾}nKرZhsX$XB;v8pwEY]QF )?q5111N&)F4`}wm.K}|ز3ӗ_~0lذAԺukIRRR6oDZCsϭv{[VV٣֭[k ;vPrr:7>Ϟ=[ZҨQ*-uq}V^q}322n:w^.&%%i*,,tYxu!) vڥ?j޼YٰaV{yC<Ǐ{\7] >}_}fywRRG%eg}Uk{8kךb z+73fΜ9f۶m殻2QQQ7$s4K.hNN1Ƙݻw{ά]ݻ|Wcǎ/vQr#F 6 -[mK{ҥK޽{͏?hnZhaҌ1ŷ'm׮ڵkMRRIJJr\tvڙ{c{c'O4ׯ7ׯ7oa֯_MTTꫯ̦Mѣ˽t~ի?`tqጌ kne3o<[V?Ǜ 6xܑdʕ̆ ̞={lZliƏ>GCɓ'#._7 Vq Xiƌn L!_#f1_7#H`_#F ~ ,5Xk$H`_#F ~ ,9sbhڵn hH`MLIbuС2 ={z\}nfuEEÆ l~~{1iF!!!4h/^\nٕ+W /Thh(++Vu6$?^ 0@X,n4k{8 8YYYzgu+&&FEsuDkuz3f諯RBB+-{뭷7иqof+?QnÆ K7xCwqf͚믿u64|{=Y,uzj1αcsiӧ4\@2{l#Cy:tѣWΕlN1Ƙ=zC[nFyWrssMNLRRG+´ndffFYhQꬪn͚55:[RSSMNN1ƘI&. !Wyyy&%%c̚5k$3{l6 h4QOsl6u]mv]'NԪUtIŋu7+""]v ?jY['N;vH+5JmڴQpp:u^N+猍UHHW*nM V֭5zh۷]}oV}n׹瞫/+mkh܈qqJ'88Xqqq^ h|ѡC?^qiӦ²*,,HӧOiBBB_f{^^{+*[:j5q-( @۷o/w=[ꩧ//33cǎ~q`zƏ_;w.sK]Jۧ8v >khzqnbA h:v쨛o eyUPPpֺBBBY6nZ*d8}ɷ%ߞl?loGZgM\s5ojڴi2224tPEDDSNd/rRVVYgԲe*&OJ ,ТEOkڴiկ_jt:u*QPPfq*WwzJ^~r_s5u>D߾}d9IW^/I={T@@֮]n]@6lV:kWΝ3(22#(^t?/B_|{޽{k%:u꤇~X?vڥ}_.{nc<ܹs$B=p@ևkhqnbA :uꤛoY3gTbb<4]w^{5͚5˽f~~fϞAÇ?O?pIG},]ծ~i9M2E{$(Iwق;e!rrrdZkgH?e(>|X_~{Sá?P}uůu}M1Nx?$H*[G};vGj>˵|rIѣG^xAt4hzM2Eiiiܹ>۷O=|5d :Twu<_]#F_.W:[SN7e2335i$o֐!C &bG}앨G}$IZv$Dr-Æ Ӳe=;wK/ 7ܠs=W/uxev'j͚5￯#Gh25]: "!u#Io222w?|X nہ_zs?iCKRxx71""ǭMPBB;>-j#""_t?Oj㰈; ,5Xk@t:UXXf@R``l6@@?:!@#fQjj222|8-@SC2!@#VصjJ$L|IZn0qH`H9Nw`׼ys_7HԪU+PM8cq*Y"44-J kvP}8.cX4r ?\jSSׄh ɓ'M=@S5@tR]r%:q™ۿ aXcX(((^S/ Ʃ:3l0w}PPP~GM>]ԯ_?]V .ԑ#Gt 7Tx|aaymܸQ ,о}\BBJvءe7n~gٳǽm֭ڴin&I'|gyF/o߮^zIO?>jԌb985BB}fQws#TK.zW$I/맗^zɽWBBvܩ]9ow?رz-RXX{}V<և(GӧΝ~ZRq07h uY_5\#IСmۦ3gj„ 3_8RbaK p?߸q,Y0{ai֭UW]v)<<\C$%''Wƍܹs%駟jܸqlٳG'Nh /Pa@FS3 is#}rj֬yVV*eʵnݺ̶l9R#G'|-[*99Y#GTAAA1vX=c_h̘1vIһ[f }5AS3$hB,Kڷo_5}t%$$H֮]Q?NҺ5tP}'e]VZIbccզMoo,@#98L!~oҤIJOOرcfٳG-mVnp֮];/~7y2X,uQ77n͛ NiӦ魷Ν;yf͞=[ow:-b#^6m?tjĈի&O(YeÙ-[jΜ9?=\M>]Gmjԩz_wu:~rrrtW{;{iի9sC^;hqbjr_Fp(22Rus<ݻW:tnusPJeq q!_u0 ~_'O.Ţɓ'WZn޽vzo~: 5kh̙ݻwV\cjĉZ~j]}ڲeK=;y:fL`eeeiܸqzw]i7|S_~}Qs9zտx CT{}V9L`M4IFZvժUeʍ9RVyu.pitǿhl_7 Јpa5iq ,QHH[rr (F/queW ŋe<ӦMԩS8[[Z`6nXyf2!!Anq2PcI[96*Dߓ֑Y)];cr iP;xuO~5fQjj222|r?"##k:>qj֥^͛7{lԽ{w=cf𒒒wy$/^ `: ヵrȡ͂ā$:BoOw~VsW's0~aY,nZZRaao%K2q*={zlk֬7o>~xmVӦM$=:t^u5Jڵk5k֬zo9]ozޛ}p@ulVscomStIV>h1luP1NXJNNVJJ!C4w\͚5K} Ie?7?ۏ((&m#+,E4tڑz[ ?j ,_<̳1m+բ<@{ \﫵zoF.P0Иy;.8*j4X ѡ\}$k{W)y%IAVj>3ᱬ|똖:E*y7SbL5UbLڷhV>o7hxH`Փm#+]2Zq4ar!unItr +_Ժ'b1-yT[;<,*tejLm:Y{UbB.7UbPk^MTm ej .BSCO-آWPteuv>c~;;GXqxN_Wkg#Y6B$U-y =GvՊ]Grq/ߥU.RuiAԽ?^<1x(SW$8x-jSjͺ[9qmL(O8_r6qi_U{xTx ԅ[B#CU I*/^g+[2r_Ү,J+ݭ6Qv%4sծ&@b7kwWҽ:ϗwePV:廎'TTj^`բuqKKl)[#bWtpF{*Rg+U-ÃszV[Q5N @e#F`5a絏ҟ>ۨwm:97E]Z-5c>jq YO%IjϘuΜ}s['ud/V=}j\1Q秷Nc}xάsٞgs3\RbVYX-fj-me궞qVK Z=,go-< ;-K]Gv$hHI.-ծy[,b#슍k`23s KMG<=zklqd^rh!ZdGlҫlZ&Vz{ɹ2-$H,3Tlٲp#H`5_M@8uQ'@R2r :p"#[(GkE*~!"K}ůUT*Ut}p_,_҄3)Y>w,eW1e)kyqR89<+9Fg/=.:?w.S~^rt.k3\g?iTy8\ey\Eɝ.#,SP<7wk&*WIrlVlVUi4=F?jVkh'_7 klƆ)׸Jjrplg&*JڹK9+NڹKU+۾Ru:˶DGy FQ=m܉9˙ GJޫ|4g"x&IZ+8ߙo ,V(1g$ʌ+;dZe ?$W1ΊRqg$X%{q|%xC{usXxZh͢&ēTJFU6UA]%?b*ŲF<]h& Xj(=Uifwf+u& ,R ~ ,5Xk$H`_#F ~%f̘޽{+""BJJJҿ ϙ3GaKn5}tuE}=z֯_=z{LDDv~mX꫹c~ꪫ<^⋚1c~ XEqqq<3BXԼy󔝝 eee)11Q =znZ@]Xyf%%%)//Oaaa/u[[nzջwoeff^Ӑ!CuVǗ{L~~ݯG/G`uM6lիu=h„ ڶm[e4~xWC_|-[j̙?m4EFF uzE#1g3|puԩҤTi_駟o&&"##kT/rybq:ڼy +88[91֔)StW]v:yΝKjѢEǫm۶6m$չsgeddW_uwK`iJIIQddzE.$%''j=t׉'tw*55U0`V\YhXXu0q qI.@ Xk$H`y.JMMUNNZloV&#N<3fhСPu9e˖JLLԝwީ5kxhjz7Ծ}{͞=[Çׂ aܹSVҳ>"1B_~vv5khѣGo]4{lXB]t)*駟V\ppڜ MT݅phھ}{]MX7p~mIRnn;

}7cǎԻwoeeeո꫚8qb}cƌQvvko߾V"##Pۘk#=F^HOOWpppt:7oTnUViFUV/^K`]tEݯ-\.^y]r%ժk Sppn}:s-XmJMM|9@c@oU+K/ڵkUPP?ںu?VnݺiÆ  &hٲe&kڴi:uW'91={jΝ 5zheggkթSjΝ;k6m7|ܲqqq:rǶ#G(..LLj"F`IRdd|IoV)Ir\/w_RR;M<ٽm%I5^ ȫ C_רQ4o<]Vff#^K`-\PǏױcX,r:U'--MǏWJJ"##ջwo-ZH]v$)99YV陏C ܹsSO'P.]`;OY1uE#F3B 5֓O>SK P^4h̘1$U^6M0A}$yq +EweqwQ_dC0?H@ V ?T)HbaRiU52vKT[X;E~j PP# b~첛lM9s'y✳EЅD-z4l0IRYYYغ_EM6EkW@w\ )ShٲeД)Sl믷E+ *333*B+Ztϟ\K.Vv駟ӧvXf~ES] .b\}.yzp 6mH3YPPjeddDF]P'joKgϞ5kh֭_)d 6LFB.!43͞=[+Wƍտϧ{O{39 bZzUQQ!ITn$I3fUW]I5zh 8PUUUZp9|0f@t8.Zx$iܸqa˗.]_tQN/5sLUTTG1bmۦo 蛸wH Z]P'hoM{`hXp4,8 FG#`hXp4,8 FG#`hXp4,8 FG#`hXp4X%%%[Mb{o-аY:SC t] fϞ5khӦMӧOmTYYRyyy{^edd=Ag9nK=yض)ssAUۃťzu'Έ>?[f+Okˇǵy|vInF1zj0y)kz)k tAu ~j}_ ~M >0( U3|R~c풒oZ/b{{[o;|53g]xi\M1k}8Vk؋7xJ0! e̓3Opycxva&i)p-P~! 铩JMĺkhr5K&槣]LRژƒFBޣ6B/p`mO mz޸,BR! 6XJgE.'D.:̸A97('vɭ`45[6$$ ,`,t:_x|/̷-6=cF:,;{-e/LA ?Xq+VZގM!Z3B, ךyVl}mE(WJ05ޏA$]Yg^rY.mdB4\,@BsJ" A~yfmYhf/H!h)%}(@3C/llZehq Ѻ{ckh~Y3H0鬳HuoBB3b]ʇvYgR.l>?̤`.|XCC_F3=YgT,C$*TK3MhXÅhWxcG4Ѿe-/ۼHZiۘhInmy({ m,}.۵zYg^h*$c=Xr)%Ӱ(4D = -->] @Ǻ3ȹ"`hXp4,8ZR;f&IqO@WGI{Q'hoC%ԩSѩSH9.qY{/~}gJOO庬}Ԩ@|222Cb܌+`܌+`ԩSϗ=P\88fq3q3XkoXn}edd8懣31qw-ka3sگ++Yb] Z3&p4,8Vx^=zJb܌+`܌+`]k_UW>uqw1Kqw q;nG ,8 F%?jjԨQڹsgtJJJt뭷*==]999}222x@Ok;Pjj _֦]__sjȐ!޽5c }ga3`64nI[iĉamm%E]w\ZpaM{>|^e˖u'>{qqqqB%|KYH9-mŊbK,fΜiYYYVYY] &ҥKk7o߾v`c̙3رcGuuup}CC <؊lϞ=vZٳ͛7/C$O>tMacσ~a+(( 6خ]lvm̎?6כ$۴i%\]~_믿nlʕa,X`j*{w۹sm&NhC۷[oeiӦWWW[nnM>W^nݺ /YlqWUUYQQ꫶~+--#Gڈ#ѯ_??~@߃x}g'N ɓ'$|Yx;fK,1e cV4~Gf>۷Ϟ{9x$Yii#3g]s5,1A[^^-\0ʼ^+ffo>dmx s\~[==w\4hPD:7sNdG .ׯ=쳭n{iu2s}[?On {Sڄ :zH1ER<qQ4ơ 5uS]]vޭ2ۭ"ưgS]]-I[E={5o<={6TC Qnnnpل TSSs:~8| 0@ӧOѣG%IwV}}}<_۷opu̡|r? .OĹuaUTToffF6YYY׾lSTT$ۭ;vی3F)))6&LPyyNMTWWr)+++l tWjذaZpa):͛7+''G ҬY_u?=-|7?fEwiii>mX 5Nbqqq–158YH9I@;q|>_$j1U~=#5x}{ׯjܹ*//믿.I= sQFiٲe4h;~ZwqTQQxqͭZJUUUq 38B7'''l}RR߿>z!kܹ6m222hֶm4o<;vL-8qLСCI&TKK/tM2%ly2dnf]s5ڼy,YDӧOWjjjx֎Y@$888h' :O!l={eebԫ={֬YM6O>m5j$ߓx뮻NT^^TUU&t}Gћo|v8ׁ~{Ǐohhɓ'g P9rDׯHF}ǒwܡ ={\'|K[oK3߭6 P4J5Nd8888iDN)))1b6l\aưg4{l\R7nlqe${$[TXX{/cq7vH:tz#F(999lu<.]u]mKĹ߿淦F;vߪ*޽;fƍP[nU}}}5hРrܚ@aw曺+/޽{v㸛O_\'|1bzѶN0l6z8GY"5555NasŊ+ڲel߾}CYVVV]ɬY,336ogϞ53m׮]va[z 0ƌG:Ǐo{uY^á{1ۼy>|YQQӎ?nfOڷo_۸qڵ 0}<9Y߾}mܹaiO:e{={$[hٳ'I4 ,,[zv=DaÆَ;߶k6#,77׾[YYXbѻmnӧݻ7=$۶mg}k˗[^lƌq:uq+--ÇۛoiÇkΟ?Gw@uuŋ[l}cYt~>^'>yG~tQq/55555N|7uN8(yo߾b#G۷ǺKMRҥKѣ6f6k'xªۤI[nֳgO{Ǭ>#4SN޽{[JJ]uU6uT;x`ps?;v,l6&Ö'\oڴ)}gf3_rssڝw_شi+ ~`N k;׿uzvUWق :k5Çi&33۽{5233-55n{gŠ ٳgm֫W/KNN~̙3[<;^nݺYUUUq/v2M6-b)))6`Hd8y ECCC*; j3\ff_-3q,8 FG#`hXp4,8 @7n,Id͚5Kϗq.5' ,^zIIIIڹs~iѢEӟn 5pźƍǏo~3׾}b;CI8 `a|1@Pp ,8D;^o߾]^{<Oz~8 ѣzGU^^W^yE=̙n 5Hu ̘1CΝȑ#x4g=C@Pp ,dŋc\BG#b 5G#`hXp4,8 FG#`hXp4,8M['c IENDB`glmnet/src/glmnetpp/docs/data/0000755000175000017500000000000014140040573016146 5ustar nileshnileshglmnet/src/glmnetpp/docs/data/gaussian_benchmark.csv0000644000175000017500000000320014140040573022502 0ustar nileshnilesh,p,ka,sp,glmnet,glmnetpp,relative 0,32,1,0,0.00133818,0.00034166700000000003,3.916620569150664 1,64,1,0,0.009778110000000001,0.00242633,4.030000041214509 2,128,1,0,0.0033386500000000003,0.0008830820000000001,3.7806794838984374 3,256,1,0,0.103276,0.026508700000000003,3.8959285064903217 4,512,1,0,0.0244327,0.0064630700000000004,3.780355156295692 5,1024,1,0,0.0775025,0.020471600000000003,3.7858545497176572 6,2048,1,0,0.230163,0.0616297,3.7346117212967123 7,32,0,0,0.00038210100000000003,0.000100932,3.7857270241350616 8,64,0,0,0.0010737700000000001,0.000250609,4.284642610600578 9,128,0,0,0.0015930100000000002,0.00045602200000000005,3.493274447285438 10,256,0,0,0.0364775,0.0085952,4.24393847728965 11,512,0,0,0.026987900000000002,0.007884440000000001,3.42293174911598 12,1024,0,0,0.12407900000000001,0.0375855,3.301246491333094 13,2048,0,0,0.374,0.109894,3.4032795239048537 14,32,1,1,0.0041264100000000005,0.0020586000000000003,2.0044739143106964 15,64,1,1,0.0016474900000000002,0.000897911,1.8348032266004093 16,128,1,1,0.00561619,0.00296602,1.89351049554622 17,256,1,1,0.040640300000000004,0.0216837,1.8742327185858503 18,512,1,1,0.0211299,0.0113767,1.8572960524580941 19,1024,1,1,0.0611751,0.0332487,1.8399245684793692 20,2048,1,1,1.28555,0.739626,1.7381081789985748 21,32,0,1,0.00118464,0.000603914,1.9616038045152124 22,64,0,1,0.0018147900000000002,0.000967264,1.876209597379826 23,128,0,1,0.00446886,0.00232363,1.9232235769033794 24,256,0,1,0.06593360000000001,0.033328300000000005,1.9783067243153716 25,512,0,1,0.050835200000000004,0.02614,1.9447283856159145 26,1024,0,1,0.282744,0.145406,1.9445139815413393 27,2048,0,1,2.53844,1.06691,2.379244734794875 glmnet/src/glmnetpp/include/0000755000175000017500000000000014140040573015730 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/0000755000175000017500000000000014140040573020577 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point.hpp0000644000175000017500000000115514140040573023632 0ustar nileshnilesh#pragma once #include #include #include #include #include #include #include #include #include #include glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/0000755000175000017500000000000014140304132023111 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_naive.hpp0000644000175000017500000000124214140040573027325 0ustar nileshnilesh#pragma once #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_cov.hpp0000644000175000017500000000457314140040573026322 0ustar nileshnilesh#pragma once #include #include #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::cov, ElnetPointInternalPolicy> >; using typename base_t::update_type; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using base_t::partial_fit; using base_t::update; public: using base_t::base_t; void partial_fit(index_t m, value_t ab, value_t dem) { this->compress_active(); base_t::partial_fit(m, ab, dem); // update gradient to leave invariant this->update_compressed_active(); this->update_grad_compressed_active(); } template void update(index_t k, value_t ab, value_t dem) { value_t beta_diff = 0; auto state = base_t::template update(k, ab, dem, beta_diff); if (state == state_t::continue_) return; // update gradient util::if_else( [=]() { std::for_each( this->all_begin(), this->all_end(), [=](auto j) { if (!this->is_excluded(j)) { this->update_grad(j, k, beta_diff); } }); }, [=]() { std::for_each( this->active_begin(), this->active_end(), [=](auto j) { this->update_grad(j, k, beta_diff); }); }); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/sp_gaussian_cov.hpp0000644000175000017500000000123414140040573027013 0ustar nileshnilesh#pragma once #include namespace glmnetpp { template struct SpElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy> : ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy> { private: using base_t = ElnetPoint< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointInternalPolicy>; public: using base_t::base_t; }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/decl.hpp0000644000175000017500000000073514140040573024544 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template mode , class ElnetPointInternalPolicy=ElnetPointInternal > struct ElnetPoint; template mode , class SpElnetPointInternalPolicy=SpElnetPointInternal > struct SpElnetPoint; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/0000755000175000017500000000000014140040573024733 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_naive.hpp0000644000175000017500000000601514140040573031144 0ustar nileshnilesh#pragma once #include #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template SpElnetPointInternal( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, YType& y, const WType& w, const XType& X, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , y_(y.data(), y.size()) , w_(w.data(), w.size()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xs.size()) { base_t::compute_abs_grad([this](index_t k) { return compute_grad(k); }); } void update_beta(index_t k, value_t ab, value_t dem) { base_t::update_beta(k, ab, dem, compute_grad(k)); } void update_resid(index_t k, value_t beta_diff) { auto beta_diff_scaled = beta_diff / xs_(k); y_ -= beta_diff_scaled * X_.col(k); o_ += beta_diff_scaled * xm_(k); } bool check_kkt(value_t ab) { return base_t::check_kkt(ab, [this](index_t k) { return compute_grad(k); }); } private: value_t compute_grad(index_t k) const { return X_.col(k).cwiseProduct(w_).dot( (y_.array() + o_).matrix() ) / xs_(k); } using typename base_t::vec_t; using typename base_t::mat_t; using spmat_t = Eigen::SparseMatrix; value_t o_ = 0.0; // mean shift correction when updating gradient Eigen::Map X_; // data matrix (sparse) Eigen::Map y_; // unscaled residual vector Eigen::Map w_; // weights for each column of X_ Eigen::Map xm_; // col-wise mean Eigen::Map xs_; // col-wise stddev // (may not be actually the stddev of X, but something passed by user) }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_cov.hpp0000644000175000017500000000452014140040573030126 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { /* * Dense elastic-net point solver for Gaussian covariance method. */ template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternal(value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const XType& X, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, g, xv, vp, cl, ju) , X_(X.data(), X.rows(), X.cols()) {} void update_active(index_t k) { base_t::update_active(k); for (index_t j = 0; j < X_.cols(); ++j) { if (this->is_excluded(j)) continue; // Note: j == k case is after the is_active(j) check in legacy code. // Since base_t::update_active adds mm_(k), // it will now be considered active, // so we have to first check this case. if (j == k) { c_(j, nin_-1) = xv_(j); continue; } if (this->is_active(j)) { c_(j, nin_-1) = c_(k, mm_(j)-1); continue; } c_(j, nin_-1) = X_.col(j).dot(X_.col(k)); } } private: using typename base_t::mat_t; using base_t::xv_; using base_t::c_; using base_t::nin_; using base_t::mm_; Eigen::Map X_; // data matrix }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/sp_gaussian_cov.hpp0000644000175000017500000000574014140040573030635 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct SpElnetPointInternal< util::glm_type::gaussian, util::mode_type::cov, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianCovBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template SpElnetPointInternal( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const WType& w, const XType& X, const XMType& xm, const XSType& xs, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, g, xv, vp, cl, ju) , w_(w.data(), w.size()) , X_(X.rows(), X.cols(), X.nonZeros(), X.outerIndexPtr(), X.innerIndexPtr(), X.valuePtr(), X.innerNonZeroPtr()) , xm_(xm.data(), xm.size()) , xs_(xs.data(), xs.size()) {} void update_active(index_t k) { base_t::update_active(k); for (index_t j = 0; j < X_.cols(); ++j) { if (this->is_excluded(j)) continue; // Note: j == k case is after the is_active(j) check in legacy code. // Since base_t::update_active adds mm_(k), // it will now be considered active, // so we have to first check this case. if (j == k) { c_(j, nin_-1) = xv_(j); continue; } if (this->is_active(j)) { c_(j, nin_-1) = c_(k, mm_(j)-1); continue; } auto wx_k = X_.col(k).cwiseProduct(w_); c_(j, nin_-1) = (X_.col(j).dot(wx_k) - xm_(j) * xm_(k)) / (xs_(j) * xs_(k)); } } private: using typename base_t::vec_t; using spmat_t = Eigen::SparseMatrix; using base_t::c_; using base_t::xv_; using base_t::nin_; using base_t::mm_; Eigen::Map w_; // weights for each column of X_ Eigen::Map X_; // data matrix (sparse) Eigen::Map xm_; // col-wise mean Eigen::Map xs_; // col-wise stddev // (may not be actually the stddev of X, but something passed by user) }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/decl.hpp0000644000175000017500000000074014140040573026354 0ustar nileshnilesh#pragma once #include namespace glmnetpp { template mode , class ValueType = double , class IndexType = int , class BoolType = bool> struct ElnetPointInternal; template mode , class ValueType = double , class IndexType = int , class BoolType = bool> struct SpElnetPointInternal; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_naive.hpp0000644000175000017500000000446414140040573030450 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct ElnetPointInternal< util::glm_type::gaussian, util::mode_type::naive, ValueType, IndexType, BoolType> : ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianNaiveBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternal(value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, YType& y, const XType& X, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , X_(X.data(), X.rows(), X.cols()) , y_(y.data(), y.size()) { base_t::compute_abs_grad([this](index_t k) { return compute_grad(k); }); } void update_beta(index_t k, value_t ab, value_t dem) { base_t::update_beta(k, ab, dem, compute_grad(k)); } void update_resid(index_t k, value_t beta_diff) { y_ -= beta_diff * X_.col(k); } bool check_kkt(value_t ab) { return base_t::check_kkt(ab, [this](index_t k) { return compute_grad(k); }); } private: value_t compute_grad(index_t k) const { return y_.dot(X_.col(k)); } using typename base_t::vec_t; using typename base_t::mat_t; Eigen::Map X_; // data matrix Eigen::Map y_; // scaled residual vector // Note: this is slightly different from sparse version residual vector. // Sparse one will not be scaled by sqrt(weights), but this one will. }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/internal/gaussian_base.hpp0000644000175000017500000002723214140040573030256 0ustar nileshnilesh#pragma once #include #include #include #include #include #include #include #include namespace glmnetpp { /* * Base class for internal implementation of Gaussian elastic-net point solver. * This contains all the common interface and members across all versions of gaussian: * - dense gaussian cov * - dense gaussian naive * - sparse gaussian cov * - sparse gaussian naive */ template struct ElnetPointInternalGaussianBase { using value_t = ValueType; using index_t = IndexType; using bool_t = BoolType; template ElnetPointInternalGaussianBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : thr_(thr) , maxit_(maxit) , nx_(nx) , a_(xv.size()) , mm_(xv.size()) , nlp_(nlp) , ia_(ia.data(), ia.size()) , xv_(xv.data(), xv.size()) , vp_(vp.data(), vp.size()) , cl_(cl.data(), cl.rows(), cl.cols()) , ju_(util::init_bvec::eval(ju)) { a_.setZero(); mm_.setZero(); } bool is_warm() const { return !jz_; } bool is_warm_ever() const { return iz_; } void set_warm() { jz_ = false; } void reset_warm() { jz_ = true; } void set_warm_ever() { iz_ = true; } index_t& passes() { return nlp_; } void coord_desc_reset() { dlx_ = 0.0; } auto active_begin() const { return util::one_to_zero_iterator(ia_.data()); } auto active_end() const { return util::one_to_zero_iterator(ia_.data() + nin_); } constexpr auto all_begin() const { return util::counting_iterator(0); } auto all_end() const { return util::counting_iterator(a_.size()); } bool has_converged() const { return dlx_ < thr_; } bool has_reached_max_passes() const { return nlp_ > maxit_; } bool has_reached_max_active() const { return nin_ > nx_; } bool is_active(index_t j) const { return mm_(j) != 0; } template constexpr void coord_desc( Iter begin, Iter end, UpdatePolicy update_pol, SkipPolicy skip_pol) const { for (; begin != end; ++begin) { auto k = *begin; if (skip_pol(k)) continue; update_pol(k); } } auto beta(index_t k) const { return a_(k); } void update_beta(index_t k, value_t ab, value_t dem, value_t gk) { auto ak = a_(k); auto u = gk + ak * xv_(k); auto v = std::abs(u) - vp_(k) * ab; a_(k) = 0.0; if (v > 0.0) { a_(k) = std::max(cl_(0,k), std::min(cl_(1,k), std::copysign(v,u) / (xv_(k) + vp_(k) * dem)) ); } } void update_active(index_t k) { ++nin_; if (has_reached_max_active()) { throw util::max_active_reached_error(); } mm_(k) = nin_; ia_(nin_-1) = k+1; } void update_rsq(index_t k, value_t beta_diff, value_t gk) { rsq_ += beta_diff * (2.0 * gk - beta_diff * xv_(k)); } void update_dlx(index_t k, value_t beta_diff) { dlx_ = std::max(xv_(k) * beta_diff * beta_diff, dlx_); } auto rsq() const { return rsq_; } auto n_active() const { return nin_; } protected: using vec_t = Eigen::Matrix; using mat_t = Eigen::Matrix; using ivec_t = Eigen::Matrix; using bvec_t = util::bvec_t; using ju_t = typename std::conditional< std::is_same::value, const bvec_t&, Eigen::Map >::type; // internal non-captures bool iz_ = false; // true if a partial fit was done with a previous lambda (warm ever) bool jz_ = true; // true if requires a partial fit for current lambda (not warm) value_t dlx_ = 0.0; // measures convergence of each coord-desc value_t rsq_ = 0.0; // R^2 const value_t thr_; // threshold for convergence const index_t maxit_; // max number of passes index_t nin_ = 0; // number of active variables const index_t nx_; // max number of active variables vec_t a_; // uncompressed beta ivec_t mm_; // index k is j if feature k is the jth feature active // captures index_t& nlp_; // number of passes Eigen::Map ia_; // active set (important that it's 1-indexed!) Eigen::Map xv_; // variance of columns of x Eigen::Map vp_; // penalties on features Eigen::Map cl_; // limits on each feature (2 x nvars) ju_t ju_; // exclusion type }; /* * Base class for internal implementation of Gaussian covariance method. * This contains all the common interface and members for gaussian covariance methods: * - dense gaussian cov * - sparse gaussian cov */ template struct ElnetPointInternalGaussianCovBase : ElnetPointInternalGaussianBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternalGaussianCovBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, GType& g, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , da_(g.size()) , g_(g.data(), g.size()) , c_(g.size(), nx) {} bool is_excluded(index_t j) const { return ju_[j] == 0; } constexpr void initialize(value_t, value_t, value_t) const { return; } template constexpr bool initial_fit(InitialFitIntType f) const { bool converged = false, kkt_passed = false; std::tie(converged, kkt_passed) = f(); return kkt_passed; } void update_beta(index_t k, value_t ab, value_t dem) { base_t::update_beta(k, ab, dem, g_(k)); } void compress_active() { for (index_t j = 0; j < nin_; ++j) { da_(j) = a_(ia_(j)-1); } } void update_compressed_active() { for (index_t j = 0; j < nin_; ++j) { da_(j) -= a_(ia_(j)-1); } } void update_grad_compressed_active() { for (index_t j = 0; j < a_.size(); ++j) { if (this->is_active(j)) continue; if (!is_excluded(j)) { g_(j) += da_.head(nin_).dot(c_.row(j).head(nin_)); } } } void update_rsq(index_t k, value_t beta_diff) { base_t::update_rsq(k, beta_diff, g_(k)); } void update_grad(index_t j, index_t k, value_t beta_diff) { g_(j) -= c_(j, mm_(k)-1) * beta_diff; } constexpr bool check_kkt(value_t) const { return true; } protected: using typename base_t::vec_t; using typename base_t::mat_t; using base_t::ju_; using base_t::nin_; using base_t::a_; using base_t::ia_; using base_t::mm_; using base_t::xv_; vec_t da_; // compressed beta Eigen::Map g_; // gradient mat_t c_; // X^TX cache but of dimension (nvars, max_nvars) }; /* * Base class for internal implementation of Gaussian naive method. * This contains all the common interface and members for gaussian naive methods: * - dense gaussian naive * - sparse gaussian naive */ template struct ElnetPointInternalGaussianNaiveBase : ElnetPointInternalGaussianBase< ValueType, IndexType, BoolType> { private: using base_t = ElnetPointInternalGaussianBase< ValueType, IndexType, BoolType>; public: using typename base_t::value_t; using typename base_t::index_t; template ElnetPointInternalGaussianNaiveBase( value_t thr, index_t maxit, index_t nx, index_t& nlp, IAType& ia, const XVType& xv, const VPType& vp, const CLType& cl, const JUType& ju) : base_t(thr, maxit, nx, nlp, ia, xv, vp, cl, ju) , g_(ju.size()) , ix_(ju.size(), false) { g_.setZero(); } bool is_excluded(index_t j) const { return !ix_[j]; } void initialize(value_t beta, value_t alm, value_t alm0) { auto tlam = beta * (2.0 * alm - alm0); for (index_t k = 0; k < g_.size(); ++k) { if (!is_excluded(k) || ju_[k] == 0) continue; if (g_(k) > tlam * vp_(k)) ix_[k] = true; } } template constexpr bool initial_fit(InitialFitIntType f) const { // Keep doing initial fit until either doesn't converge or // converged and kkt passed. while (1) { if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } bool converged = false, kkt_passed = false; std::tie(converged, kkt_passed) = f(); if (!converged) break; if (kkt_passed) return true; } return false; } void update_beta(index_t k, value_t ab, value_t dem, value_t gk) { gk_cache_ = gk; base_t::update_beta(k, ab, dem, gk_cache_); } void update_rsq(index_t k, value_t beta_diff) { base_t::update_rsq(k, beta_diff, gk_cache_); } template bool check_kkt(value_t ab, GradFType grad_f) { bool kkt_passed = true; for (index_t k = 0; k < g_.size(); ++k) { if (!is_excluded(k) || ju_[k] == 0) continue; g_(k) = std::abs(grad_f(k)); if (g_(k) > ab * vp_(k)) { ix_[k] = true; kkt_passed = false; } } return kkt_passed; } const auto& abs_grad() const { return g_; } protected: template void compute_abs_grad(GradFType grad_f) { for (index_t j = 0; j < g_.size(); ++j) { if (ju_[j] == 0) continue; g_(j) = std::abs(grad_f(j)); } } using typename base_t::vec_t; using typename base_t::mat_t; using base_t::ju_; using base_t::vp_; value_t gk_cache_ = 0.0; // caches gradient at k when updating beta vec_t g_; // cwise-absolute gradient std::vector ix_; // strong set indicators }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_naive.hpp0000644000175000017500000000312414140040573026624 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, ElnetPointInternalPolicy> : ElnetPointGaussianBase< ElnetPoint< util::glm_type::gaussian, util::mode_type::naive, ElnetPointInternalPolicy> > { private: using base_t = ElnetPointGaussianBase< ElnetPoint::naive, ElnetPointInternalPolicy> >; using typename base_t::update_type; using typename base_t::value_t; using typename base_t::index_t; using typename base_t::state_t; using base_t::partial_fit; using base_t::update; public: using base_t::base_t; using base_t::abs_grad; void partial_fit(index_t m, value_t ab, value_t dem) { base_t::partial_fit(m, ab, dem); // For some reason, naive version does this extra check. if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } } template void update(index_t k, value_t ab, value_t dem) { value_t beta_diff = 0; auto state = base_t::template update(k, ab, dem, beta_diff); if (state == state_t::continue_) return; this->update_resid(k, beta_diff); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/gaussian_base.hpp0000644000175000017500000000721514140040573026441 0ustar nileshnilesh#pragma once #include #include #include namespace glmnetpp { template struct ElnetPointGaussianBase : protected details::traits::internal_t { private: using derived_t = ElnetPointGaussianDerived; using internal_t = typename details::traits::internal_t; derived_t& self() { return static_cast(*this); } const derived_t& self() const { return static_cast(*this); } protected: using typename internal_t::value_t; using typename internal_t::index_t; public: using internal_t::internal_t; using internal_t::beta; using internal_t::rsq; using internal_t::n_active; template void fit(const PointConfigPack& pack) { auto m = pack.m; auto ab = pack.ab; auto dem = pack.dem; auto alm0 = pack.alm0; auto alm = pack.alm; auto beta = pack.beta; this->reset_warm(); this->initialize(beta, alm, alm0); if (this->is_warm_ever()) { self().partial_fit(m, ab, dem); } while (1) { auto initial_fit_internal = [=]() -> std::pair{ ++this->passes(); this->coord_desc_reset(); this->coord_desc( this->all_begin(), this->all_end(), [=](auto k) { self().template update(k, ab, dem); }, [=](auto k) { return this->is_excluded(k); } ); if (this->has_converged()) { return {true, this->check_kkt(ab)}; } if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } return {false, false}; }; bool converged_kkt = this->initial_fit(initial_fit_internal); if (converged_kkt) return; self().partial_fit(m, ab, dem); } } protected: using state_t = util::control_flow; void partial_fit(index_t m, value_t ab, value_t dem) { this->set_warm_ever(); // fit on partial subset while (1) { ++this->passes(); this->coord_desc_reset(); this->coord_desc( this->active_begin(), this->active_end(), [=](auto k) { self().template update(k, ab, dem); }, [](auto) { return false; } // no skip ); if (this->has_converged()) break; if (this->has_reached_max_passes()) { throw util::maxit_reached_error(); } } this->set_warm(); } enum class update_type { full, partial }; template state_t update(index_t k, value_t ab, value_t dem, value_t& diff) { auto ak = this->beta(k); this->update_beta(k, ab, dem); if (this->beta(k) == ak) return state_t::continue_; // update active set stuff if full util::if_else( [=]() { if (!this->is_active(k)) { this->update_active(k); } }, []() {}); diff = this->beta(k) - ak; this->update_rsq(k, diff); this->update_dlx(k, diff); return state_t::noop_; } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_point/traits.hpp0000644000175000017500000000155614140040573025145 0ustar nileshnilesh#pragma once #include #include #include namespace glmnetpp { namespace details { template struct traits; template m , class ElnetPointInternalType> struct traits > { static constexpr util::glm_type glm = g; static constexpr util::mode_type mode = m; using internal_t = ElnetPointInternalType; }; template m , class ElnetPointInternalType> struct traits > { static constexpr util::glm_type glm = g; static constexpr util::mode_type mode = m; using internal_t = ElnetPointInternalType; }; } // namespace details } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/0000755000175000017500000000000014140304132022714 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_naive.hpp0000644000175000017500000001302014140040573027125 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> : private ElnetPathGaussianBase , ElnetPathBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> > { private: using gaussian_base_t = ElnetPathGaussianBase; using base_t = ElnetPathBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::naive, SpElnetPointPolicy> >; using elnet_point_t = SpElnetPointPolicy; template struct FitPack : gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType> { using base_t = gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; YType& y; const WType& w; const XMType& xm; const XSType& xs; FitPack( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param, YType& _y, const WType& _w, const XMType& _xm, const XSType& _xs ) : base_t{beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param} , y(_y) , w(_w) , xm(_xm) , xs(_xs) {} }; public: using gaussian_base_t::initialize_path; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param) const { FitPack< ValueType , JUType , VPType , CLType , YType , WType , IntType , XType , ULamType , XMType , XSType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack (beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param, y, w, xm, xs); base_t::fit(pack); } template auto get_elnet_point(PackType&& pack) const { return elnet_point_t( pack.thr, pack.maxit, pack.nx, pack.nlp, pack.ia, pack.y, pack.w, pack.x, pack.xm, pack.xs, pack.xv, pack.vp, pack.cl, pack.ju); } template auto initialize_point( IntType m, PackType&& pack, PathConfigPackType&& path_pack, ElnetPointType&& elnet_point) const { return gaussian_base_t::initialize_point(m, pack, path_pack, elnet_point.abs_grad()); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_cov.hpp0000644000175000017500000001165014140040573026117 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::gaussian, util::mode_type::cov, ElnetPointPolicy> : private ElnetPathGaussianBase , ElnetPathBase< ElnetPath< util::glm_type::gaussian, util::mode_type::cov, ElnetPointPolicy> > { private: using gaussian_base_t = ElnetPathGaussianBase; using base_t = ElnetPathBase< ElnetPath::cov, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; template struct FitPack : gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType> { using base_t = gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; GType& g; FitPack( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param, GType& _g ) : base_t{beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param} , g(_g) {} }; public: using gaussian_base_t::initialize_path; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param) const { FitPack< ValueType , JUType , VPType , CLType , GType , IntType , XType , ULamType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack (beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param, g); base_t::fit(pack); } template auto get_elnet_point(PackType&& pack) const { return elnet_point_t( pack.thr, pack.maxit, pack.nx, pack.nlp, pack.ia, pack.g, pack.x, pack.xv, pack.vp, pack.cl, pack.ju); } template auto initialize_point( IntType m, PackType&& pack, PathConfigPackType&& path_pack, ElnetPointType&&) const { return gaussian_base_t::initialize_point(m, pack, path_pack, pack.g); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/sp_gaussian_cov.hpp0000644000175000017500000001275514140040573026630 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> : private ElnetPathGaussianBase , ElnetPathBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> > { private: using gaussian_base_t = ElnetPathGaussianBase; using base_t = ElnetPathBase< SpElnetPath< util::glm_type::gaussian, util::mode_type::cov, SpElnetPointPolicy> >; using elnet_point_t = SpElnetPointPolicy; template struct FitPack : gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType> { using base_t = gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; GType& g; const WType& w; const XMType& xm; const XSType& xs; FitPack( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param, GType& _g, const WType& _w, const XMType& _xm, const XSType& _xs ) : base_t{beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param} , g(_g) , w(_w) , xm(_xm) , xs(_xs) {} }; public: using gaussian_base_t::initialize_path; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, GType& g, const WType& w, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XMType& xm, const XSType& xs, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param) const { FitPack< ValueType , JUType , VPType , CLType , GType , WType , IntType , XType , ULamType , XMType , XSType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack (beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param, g, w, xm, xs); base_t::fit(pack); } template auto get_elnet_point(PackType&& pack) const { return elnet_point_t( pack.thr, pack.maxit, pack.nx, pack.nlp, pack.ia, pack.g, pack.w, pack.x, pack.xm, pack.xs, pack.xv, pack.vp, pack.cl, pack.ju); } template auto initialize_point( IntType m, PackType&& pack, PathConfigPackType&& path_pack, ElnetPointType&&) const { return gaussian_base_t::initialize_point(m, pack, path_pack, pack.g); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/decl.hpp0000644000175000017500000000101214140040573024334 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template mode , class ElnetPointPolicy=ElnetPoint > struct ElnetPath; template mode , class SpElnetPointPolicy=SpElnetPoint > struct SpElnetPath; template struct ElnetPathBase; struct ElnetPathGaussianBase; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_naive.hpp0000644000175000017500000001213214140040573026426 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct ElnetPath< util::glm_type::gaussian, util::mode_type::naive, ElnetPointPolicy> : private ElnetPathGaussianBase , ElnetPathBase< ElnetPath< util::glm_type::gaussian, util::mode_type::naive, ElnetPointPolicy> > { private: using gaussian_base_t = ElnetPathGaussianBase; using base_t = ElnetPathBase< ElnetPath::naive, ElnetPointPolicy> >; using elnet_point_t = ElnetPointPolicy; template struct FitPack : gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType> { using base_t = gaussian_base_t::template FitPack< ValueType, JUType, VPType, CLType, IntType, XType, ULamType, XVType, AOType, IAType, KinType, RSQOType, ALMOType, SetpbFType, IntParamType>; YType& y; FitPack( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param, YType& _y ) : base_t{beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param} , y(_y) {} }; public: using gaussian_base_t::initialize_path; template void fit( ValueType beta, const JUType& ju, const VPType& vp, const CLType& cl, YType& y, IntType ne, IntType nx, const XType& x, IntType nlam, ValueType flmin, const ULamType& ulam, ValueType thr, IntType maxit, const XVType& xv, IntType& lmu, AOType& ao, IAType& ia, KinType& kin, RSQOType& rsqo, ALMOType& almo, IntType& nlp, IntType& jerr, SetpbFType setpb_f, IntParamType int_param) const { FitPack< ValueType , JUType , VPType , CLType , YType , IntType , XType , ULamType , XVType , AOType , IAType , KinType , RSQOType , ALMOType , SetpbFType , IntParamType> pack (beta, ju, vp, cl, ne, nx, x, nlam, flmin, ulam, thr, maxit, xv, lmu, ao, ia, kin, rsqo, almo, nlp, jerr, setpb_f, int_param, y); base_t::fit(pack); } template elnet_point_t get_elnet_point(PackType&& pack) const { return elnet_point_t( pack.thr, pack.maxit, pack.nx, pack.nlp, pack.ia, pack.y, pack.x, pack.xv, pack.vp, pack.cl, pack.ju); } template auto initialize_point( IntType m, PackType&& pack, PathConfigPackType&& path_pack, ElnetPointType&& elnet_point) const { return gaussian_base_t::initialize_point(m, pack, path_pack, elnet_point.abs_grad()); } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/gaussian_base.hpp0000644000175000017500000001052214140040573026237 0ustar nileshnilesh#pragma once #include #include #include namespace glmnetpp { struct ElnetPathGaussianBase { private: template struct PathConfigPack { using value_t = ValueType; using int_t = IntType; value_t omb; value_t alm; value_t alf; int_t ni; int_t mnl; value_t sml0; value_t rsqmax0; }; template struct PointConfigPack { using value_t = ValueType; using int_t = IntType; int_t m; value_t ab; value_t dem; value_t alm0; value_t alm; value_t beta; }; protected: template struct FitPack { using value_t = ValueType; using int_t = IntType; value_t beta; const JUType& ju; const VPType& vp; const CLType& cl; int_t ne; int_t nx; const XType& x; int_t nlam; value_t flmin; const ULamType& ulam; value_t thr; int_t maxit; const XVType& xv; int_t& lmu; AOType& ao; IAType& ia; KinType& kin; RSQOType& rsqo; ALMOType& almo; int_t& nlp; int_t& jerr; SetpbFType setpb_f; IntParamType int_param; }; template auto initialize_path(PackType&& pack) const { using pack_t = std::decay_t; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; auto& x = pack.x; auto beta = pack.beta; auto eps = pack.int_param.eps; auto mnlam = pack.int_param.mnlam; auto sml = pack.int_param.sml; auto rsqmax = pack.int_param.rsqmax; int_t ni = x.cols(); value_t omb = 1.0 - beta; value_t alm = 0.0; value_t alf = 1.0; if (pack.flmin < 1.0) { auto eqs = std::max(eps, pack.flmin); alf = std::pow(eqs, 1.0 / (pack.nlam - 1.)); } pack.nlp = 0; auto mnl = std::min(mnlam, pack.nlam); using pack_config_t = PathConfigPack; return pack_config_t{omb, alm, alf, ni, mnl, sml, rsqmax}; } template auto initialize_point( IntType m, PackType&& pack, PathConfigPackType&& path_pack, const GType& g) const { using pack_t = std::decay_t; using value_t = typename pack_t::value_t; using int_t = typename pack_t::int_t; auto flmin = pack.flmin; auto beta = pack.beta; auto ni = path_pack.ni; const auto& ulam = pack.ulam; const auto& ju = pack.ju; const auto& vp = pack.vp; auto& alm = path_pack.alm; auto alf = path_pack.alf; auto omb = path_pack.omb; auto setpb_f = pack.setpb_f; auto big = pack.int_param.big; auto itrace = pack.int_param.itrace; if (itrace) setpb_f(m); auto alm0 = alm; if (flmin >= 1.0) { alm = ulam(m); } else if (m > 1) { alm *= alf; } else if (m == 0) { alm = big; } else { alm0 = 0.0; for (int_t j = 0; j < ni; ++j) { if (ju[j] == 0 || (vp(j) <= 0.0)) continue; // TODO: for gaussian_naive, std::abs is redundant // profile to see if it's worth changing alm0 = std::max(alm0, std::abs(g(j)) / vp(j)); } alm0 /= std::max(beta, 1e-3); alm = alm0 * alf; } auto dem = alm * omb; auto ab = alm * beta; using point_config_pack_t = PointConfigPack; return point_config_pack_t{m, ab, dem, alm0, alm, beta}; } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/elnet_path/base.hpp0000644000175000017500000000444714140040573024356 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { template struct ElnetPathBase { private: using derived_t = ElnetPathDerived; derived_t& self() { return static_cast(*this); } const derived_t& self() const { return static_cast(*this); } protected: template void fit(PackType&& pack) const { using pack_t = std::decay_t; using int_t = typename pack_t::int_t; auto path_config_pack = self().initialize_path(pack); const auto& alm = path_config_pack.alm; const auto& mnl = path_config_pack.mnl; const auto& sml0 = path_config_pack.sml0; const auto& rsqmax0 = path_config_pack.rsqmax0; const auto& flmin = pack.flmin; const auto& ne = pack.ne; auto& lmu = pack.lmu; auto& ao = pack.ao; const auto& ia = pack.ia; auto& kin = pack.kin; auto& rsqo = pack.rsqo; auto& almo = pack.almo; auto& jerr = pack.jerr; auto&& elnet_point = self().get_elnet_point(pack); for (int_t m = 0; m < pack.nlam; ++m) { auto point_config_pack = self().initialize_point(m, pack, path_config_pack, elnet_point); auto rsq0 = elnet_point.rsq(); try { elnet_point.fit(point_config_pack); } catch (const util::elnet_error& e) { jerr = e.err_code(m); return; } auto n_active = elnet_point.n_active(); if (n_active > 0) { for (int_t j = 0; j < n_active; ++j) { ao(j, m) = elnet_point.beta(ia(j)-1); } } auto rsq = elnet_point.rsq(); kin(m) = n_active; rsqo(m) = rsq; almo(m) = alm; lmu = m + 1; if (lmu < mnl || flmin >= 1.0) continue; int_t me = 0; for (int_t j = 0; j < n_active; ++j) { if (ao(j, m)) ++me; } if ((me > ne) || (rsq - rsq0 < sml0 * rsq) || (rsq > rsqmax0)) break; } } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/chkvars.hpp0000644000175000017500000000271314140040573022754 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { struct Chkvars { template static void eval(const XType& X, JUType& ju) { using index_t = typename std::decay_t::Index; for (index_t j = 0; j < X.cols(); ++j) { ju[j] = false; auto t = X.coeff(0,j); auto x_j_rest = X.col(j).tail(X.rows()-1); ju[j] = (x_j_rest.array() != t).any(); } } }; struct SpChkvars { template static void eval(const XType& X, JUType& ju) { using index_t = typename std::decay_t::Index; for (index_t j = 0; j < X.cols(); ++j) { ju[j] = false; auto begin_j = X.outerIndexPtr()[j]; auto end_j = X.outerIndexPtr()[j+1]; auto n_j = begin_j - end_j; if (n_j == 0) continue; if (n_j < X.rows()) { for (auto i = begin_j; i < end_j; ++i) { if (X.valuePtr()[i] == 0) continue; ju[j] = true; break; } } else { auto t = X.valuePtr()[begin_j]; for (auto i = begin_j + 1; i < end_j; ++i) { if (X.valuePtr()[i] == t) continue; ju[j] = true; break; } } } } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/internal.hpp0000644000175000017500000000141714140040573023127 0ustar nileshnilesh#pragma once namespace glmnetpp { struct InternalParams { static double sml; static double eps; static double big; static int mnlam; static double rsqmax; static double pmin; static double exmx; static int itrace; }; void get_int_parms(double& sml, double& eps, double& big, int& mnlam, double& rsqmax, double& pmin, double& exmx, int& itrace); void chg_fract_dev(double arg); void chg_min_flmin(double arg); void chg_dev_max(double arg); void chg_big(double arg); void chg_min_lambdas(int irg); void chg_min_null_prob(double arg); void chg_max_exp(double arg); void chg_itrace(int irg); } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/standardize.hpp0000644000175000017500000001343314140040573023624 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { struct Standardize1 { template static void eval( XType& x, YType& y, WType& w, bool isd, bool intr, const JUType& ju, XMType& xm, XSType& xs, ValueType& ym, ValueType& ys, XVType& xv) { using value_t = typename std::decay_t::Scalar; using vec_t = typename Eigen::Matrix; auto ni = x.cols(); w /= w.sum(); vec_t v = w.array().sqrt().matrix(); // without intercept if (!intr) { ym = 0.0; y.array() *= v.array(); // trevor changed 3/24/2020 ys = y.norm(); y /= ys; for (int j = 0; j < ni; ++j) { if (!ju[j]) continue; xm(j) = 0.0; auto x_j = x.col(j); x_j.array() *= v.array(); xv(j) = x_j.squaredNorm(); if (isd) { auto xbq = x_j.dot(v); xbq *= xbq; auto vc = xv(j) - xbq; xs(j) = std::sqrt(vc); x_j /= xs(j); xv(j) = 1.0 + xbq/vc; } else { xs(j) = 1.0; } } } // with intercept else { for (int j = 0; j < ni; ++j) { if (!ju[j]) continue; auto x_j = x.col(j); xm(j) = x_j.dot(w); x_j.array() = v.array() * (x_j.array() - xm(j)); xv(j) = x_j.squaredNorm(); if (isd) xs(j) = std::sqrt(xv(j)); } if (!isd) { xs.setOnes(); } else { for (int j = 0; j < ni; ++j) { if (!ju[j]) continue; auto x_j = x.col(j); x_j /= xs(j); } xv.setOnes(); } ym = y.dot(w); y.array() = v.array() * (y.array() - ym); ys = y.norm(); y /= ys; } } }; struct Standardize { template static void eval( XType& x, YType& y, WType& w, bool isd, bool intr, const JUType& ju, GType& g, XMType& xm, XSType& xs, ValueType& ym, ValueType& ys, XVType& xv) { auto ni = x.cols(); Standardize1::eval(x, y, w, isd, intr, ju, xm, xs, ym, ys, xv); g.setZero(); for (int j = 0; j < ni; ++j) { if (ju[j]) g(j) = x.col(j).dot(y); } } }; struct SpStandardize1 { template static void eval( const XType& x, YType& y, WType& w, bool isd, bool intr, const JUType& ju, XMType& xm, XSType& xs, ValueType& ym, ValueType& ys, XVType& xv) { auto ni = x.cols(); w /= w.sum(); // without intercept if (!intr) { ym = 0.0; // trevor changed 3/24/2020 ys = std::sqrt(y.array().square().matrix().dot(w)); y /= ys; for (int j = 0; j < ni; ++j) { if (!ju[j]) continue; xm(j) = 0.0; auto x_j = x.col(j); xv(j) = x_j.cwiseProduct(x_j).dot(w); if (isd) { auto xbq = x_j.dot(w); xbq *= xbq; auto vc = xv(j) - xbq; xs(j) = std::sqrt(vc); xv(j) = 1.0 + xbq/vc; } else { xs(j) = 1.0; } } } // with intercept else { for (int j = 0; j < ni; ++j) { if (!ju[j]) continue; auto x_j = x.col(j); xm(j) = x_j.dot(w); xv(j) = x_j.cwiseProduct(x_j).dot(w) - xm(j) * xm(j); if (isd) xs(j) = std::sqrt(xv(j)); } if (!isd) { xs.setOnes(); } else { xv.setOnes(); } ym = y.dot(w); y.array() -= ym; ys = std::sqrt(y.array().square().matrix().dot(w)); y /= ys; } } }; struct SpStandardize { template static void eval( const XType& x, YType& y, WType& w, bool isd, bool intr, const JUType& ju, GType& g, XMType& xm, XSType& xs, ValueType& ym, ValueType& ys, XVType& xv) { auto ni = x.cols(); SpStandardize1::eval(x, y, w, isd, intr, ju, xm, xs, ym, ys, xv); g.setZero(); for (int j = 0; j < ni; ++j) { if (ju[j]) { g(j) = x.col(j).dot( (y.array() * w.array()).matrix() ) / xs(j); } } } }; } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/wls.hpp0000644000175000017500000004023414140040573022120 0ustar nileshnilesh#include #include #include #include #include #include #include namespace glmnetpp { namespace details { // Non-dense version. template struct Square { static auto eval(const XType& x) { return x.cwiseProduct(x); } }; // Dense version. template struct Square::value>::type> { static auto eval(const XType& x) { return x.array().square().matrix(); } }; // Some helper utilities that need to be distinguished // between sparse and dense matrices. template inline auto square(const XType& x) { return Square::eval(x); } template struct Dot { static auto eval(const XType& x, const RType& r, ValueType r_sum) { auto d = x.dot(r); d -= r_sum * x.mean(); d /= x.sd(); return d; } }; template struct Dot::value>::type > { static auto eval(const XType& x, const RType& r, ValueType r_sum) { return x.dot(r); } }; // Compute the dot product between r and x vectors. // If x is sparse, it must support dot(r, r_sum), // where r_sum is the sum of all entries of r. // // @param r_sum sum of all entries in r. It is more efficient to supply // this argument to compute the dot product for sparse x. // It is ignored if x is dense. template inline auto dot( const XType& x, const RType& r, ValueType r_sum) { return Dot::eval(x, r, r_sum); } template struct ComputeXV { static auto eval(const XType& x, const VType& v, ValueType v_sum) { auto xv = square(x).dot(v); xv -= 2 * x.mean() * x.dot(v); return (xv + v_sum * x.mean() * x.mean()) / (x.sd() * x.sd()); } }; template struct ComputeXV::value>::type > { static auto eval(const XType& x, const VType& v, ValueType v_sum) { return square(x).dot(v); } }; // Compute xv temporary helper. template inline auto compute_xv(const XType& x, const VType& v, ValueType v_sum) { return ComputeXV::eval(x,v,v_sum); } template struct UpdateR { static auto eval(RType& r, const XType& x, const VType& v, ValueType d) { auto d_scaled = d / x.sd(); r -= d_scaled * x.cwiseProduct(v); r += (d_scaled * x.mean()) * v; } }; template struct UpdateR::value>::type > { static auto eval(RType& r, const XType& x, const VType& v, ValueType d) { r -= (d * v.array() * x.array()).matrix(); } }; // Update residual vector template inline void update_r(RType& r, const XType& x, const VType& v, ValueType d) { using r_t = typename std::decay::type; UpdateR::eval(r, x, v, d); } template struct UpdateRSum { static_assert(!do_update, "Not a valid parameter list for updating r_sum."); static void eval(ValueType& r_sum, const RType& r) {} }; template struct UpdateRSum { static void eval(ValueType&, const RType&) {} }; template struct UpdateRSum::value>::type > { static void eval(ValueType& r_sum, const RType& r) { r_sum = r.sum(); } }; template struct UpdateRSum::value>::type > { static void eval(ValueType& r_sum, const RType& r) { r_sum = r; } }; template inline void update_r_sum(ValueType& r_sum, const RType& r) { using value_t = typename std::decay::type; UpdateRSum::eval(r_sum, r); } template struct GetUpdateRSumVal { template static auto eval(ValueType diff, const RType&) { return diff; } }; template <> struct GetUpdateRSumVal { template static auto eval(ValueType, const RType& r) { return r; } }; // Update intercept term (and other invariant quantities) template inline void update_intercept( IntType intr, ValueType xmz, const VType& v, RType& r, ValueType& r_sum, ValueType& aint, ValueType& rsqc, ValueType& dlx) { double d = 0.0; if (intr != 0) { // if we haven't been updating r_sum, // we must at least update here for the next if-block. update_r_sum(r_sum, r); d = r_sum / xmz; } if (d != 0.0) { aint += d; auto diff = r_sum - d * xmz; rsqc += d * (r_sum + diff); dlx = std::max(dlx, xmz * d * d); r -= d * v; auto&& update_val = GetUpdateRSumVal::eval(diff, r); update_r_sum(r_sum, update_val); } } template struct AddActive { template static void eval(IntType k, AddActiveFType add_active_f) { add_active_f(k); } }; template <> struct AddActive { template static void eval(IntType, AddActiveFType) {} }; template inline void coord_desc( Iter begin, Iter end, const XType& x, const VType& v, RType& r, ValueType& r_sum, AType& a, const XVType& xv, const VPType& vp, ValueType ab, ValueType dem, const CLType& cl, ValueType& rsqc, ValueType& dlx, SkipFType skip_f, AddActiveFType add_active_f) { for (; begin != end; ++begin) { auto k = *begin; if (skip_f(k)) continue; // check if ST threshold for descent is met // if it goes to 0, set a(k) = 0.0 // if not, set a(k) to the post-gradient descent value // u is the kth partial residual auto gk = details::dot(x.col(k), r, r_sum); auto ak = a(k); auto u = gk + ak * xv(k); auto au = std::abs(u) - vp(k) * ab; if (au < 0.0) { a(k) = 0.0; } else { a(k) = std::max(cl(0,k), std::min(cl(1,k), std::copysign(au, u) / (xv(k) + vp(k) * dem) ) ); } // if the update didn't change the coefficient value, go to // the next feature/variable if (a(k) == ak) continue; AddActive::eval(k, add_active_f); // update residual r, rsqc, and dlx (diff exp from wls) auto d = a(k) - ak; rsqc += d * (2.0 * gk - d * xv(k)); details::update_r(r, x.col(k), v, d); details::update_r_sum(r_sum, r); dlx = std::max(xv(k) * d * d, dlx); } } // Partially fit WLS on the active set. template inline void wls_partial_fit( IntType& iz, IntType& jz, IntType& nlp, IntType& nino, IntType maxit, IntType intr, ValueType thr, ValueType xmz, IAType& ia, RType& r, ValueType& r_sum, const XMapType& x, const VType& v, AType& a, ValueType& aint, const XVType& xv, const VPType& vp, const CLType& cl, ValueType& rsqc, ValueType ab, ValueType dem, IntType& jerr) { using value_t = ValueType; iz = 1; while (1) { ++nlp; value_t dlx = 0.0; coord_desc( ia.data(), ia.data() + nino, x, v, r, r_sum, a, xv, vp, ab, dem, cl, rsqc, dlx, [](auto) { return false; }, [](auto) {} ); // updating of intercept term update_intercept( intr, xmz, v, r, r_sum, aint, rsqc, dlx); if (dlx < thr) break; if (nlp > maxit) { throw util::maxit_reached_error(); } } // set jz = 0 so that we have to go into :again: tag // to check KKT conditions. jz = 0; } } // namespace details /* * Experimental C++ implementation of WLS * * alm0: previous lambda value * almc: current lambda value * alpha: alpha * m: current lambda iteration no. * no: no of observations * ni: no of variables * x: x matrix * r: weighted residual! v * (y - yhat) * v: weights * intr: 1 if fitting intercept, 0 otherwise * ju: ju(k) = 1 if feature k is included for consideration in model * vp: relative penalties on features (sum to ni) * cl: coordinate limits * nx: limit for max no. of variables ever to be nonzero * thr: threshold for dlx * maxit: max no. of passes over the data for all lambda values * a, aint: warm start for coefs, intercept * g: abs(dot_product(r,x(:,j))) * ia: mapping nino to k * iy: ever-active set (for compatibility with sparse version) * iz: flag for loop. 0 for first lambda, 1 subsequently * mm: mapping k to nino * nino: no. of features that have ever been nonzero * rsqc: R^2 * nlp: no. of passes over the data * jerr: error code */ template inline void wls( ValueType alm0, ValueType almc, ValueType alpha, IntType m, IntType no, IntType ni, const XMapType& x, RType& r, XVType& xv, const VType& v, IntType intr, const JUType& ju, const VPType& vp, const CLType& cl, IntType nx, ValueType thr, IntType maxit, AType& a, ValueType& aint, GType& g, IAType& ia, IYType& iy, IntType& iz, MMType& mm, IntType& nino, ValueType& rsqc, IntType& nlp, IntType& jerr) { using value_t = ValueType; using int_t = IntType; // compute xmz (for intercept later) const value_t xmz = v.sum(); // only update r_sum if X is not dense. constexpr bool do_update_r_sum = !util::is_dense::value; value_t r_sum = 0.0; details::update_r_sum(r_sum, r); // compute g initialization for (int_t j = 0; j < ni; ++j) { if (ju(j) == 0) continue; g(j) = std::abs(details::dot(x.col(j), r, r_sum)); } // compute xv // Note: weight v may have changed from previous call to wls, // so we must update existing xv values. for (int_t j = 0; j < ni; ++j) { if (iy(j) > 0) { xv(j) = details::compute_xv(x.col(j), v, xmz); } } // ab: lambda * alpha, dem: lambda * (1 - alpha) value_t ab = almc * alpha; value_t dem = almc * (1.0 - alpha); // strong rules: iy(k) = 1 if we don't discard feature k value_t tlam = alpha * (2.0 * almc - alm0); for (int_t k = 0; k < ni; ++k) { if (iy(k) == 1 || ju(k) == 0) continue; if (g(k) > tlam * vp(k)) { iy(k) = 1; xv(k) = details::compute_xv(x.col(k), v, xmz); } } int_t jz = 1; try { if (iz*jz != 0) { details::wls_partial_fit( iz, jz, nlp, nino, maxit, intr, thr, xmz, ia, r, r_sum, x, v, a, aint, xv, vp, cl, rsqc, ab, dem, jerr); } while (1) { bool converged = false; while (1) { // :again: ++nlp; value_t dlx = 0.0; details::coord_desc( util::counting_iterator(0), util::counting_iterator(ni), x, v, r, r_sum, a, xv, vp, ab, dem, cl, rsqc, dlx, [&](auto k) { return iy(k) == 0; }, [&, nx](auto k) { // if coef for feature k was previously 0, we now have a // new non-zero coef. update nino, mm(k) and ia(nino). // This is only needed if we are _not_ iterating over active set // because mm(k) will always be > 0 if k is in active set, by def. if (mm(k) == 0) { ++nino; if (nino > nx) throw util::max_active_reached_error(); mm(k) = nino; // Note: ia is never used outside this function call, // so it is OK to store indices in 0-index. ia(nino-1) = k; } } ); // updating of intercept term details::update_intercept( intr, xmz, v, r, r_sum, aint, rsqc, dlx); // in wls, this leads to KKT checks. here, we exit // the loop instead. if (dlx < thr) { bool ixx = false; for (int_t k = 0; k < ni; ++k) { if (iy(k) == 1 || ju(k) == 0) continue; g(k) = std::abs(details::dot(x.col(k), r, r_sum)); if (g(k) > ab * vp(k)) { iy(k) = 1; xv(k) = details::compute_xv(x.col(k), v, xmz); ixx = true; } } if (!ixx) { converged = true; break; } } else break; } // end :again: while if (converged) break; // if we've gone over max iterations, return w error if (nlp > maxit) { throw util::maxit_reached_error(); } // this is like the :b: loop in wls (M) details::wls_partial_fit( iz, jz, nlp, nino, maxit, intr, thr, xmz, ia, r, r_sum, x, v, a, aint, xv, vp, cl, rsqc, ab, dem ,jerr); } // end outer while } catch (const util::elnet_error& e) { jerr = e.err_code(m); // m is 1-indexed; err_code expects 0-indexed } } } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/0000755000175000017500000000000014140271007021552 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/0000755000175000017500000000000014140040573023405 5ustar nileshnileshglmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/one_to_zero_iterator.hpp0000644000175000017500000000315314140040573030353 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { namespace util { // forward declaration template struct one_to_zero_iterator; template inline constexpr bool operator==(const one_to_zero_iterator& it1, const one_to_zero_iterator& it2) { return it1.curr_ == it2.curr_; } template inline constexpr bool operator!=(const one_to_zero_iterator& it1, const one_to_zero_iterator& it2) { return it1.curr_ != it2.curr_; } template struct one_to_zero_iterator { using difference_type = int32_t; using value_type = IntType; using pointer = value_type*; using reference = value_type&; using iterator_category = std::bidirectional_iterator_tag; one_to_zero_iterator(const value_type* begin) : curr_(begin) {} one_to_zero_iterator& operator++() { ++curr_; return *this; } one_to_zero_iterator& operator--() { --curr_; return *this; } one_to_zero_iterator operator++(int) { auto tmp = *this; ++curr_; return tmp; } one_to_zero_iterator operator--(int) { auto tmp = *this; --curr_; return tmp; } value_type operator*() { return *curr_ - 1; } friend constexpr bool operator==<>(const one_to_zero_iterator&, const one_to_zero_iterator&); friend constexpr bool operator!=<>(const one_to_zero_iterator&, const one_to_zero_iterator&); private: const value_type* curr_; }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/iterator/counting_iterator.hpp0000644000175000017500000000304714140040573027661 0ustar nileshnilesh#pragma once #include #include namespace glmnetpp { namespace util { // forward declaration template struct counting_iterator; template inline constexpr bool operator==(const counting_iterator& it1, const counting_iterator& it2) { return it1.curr_ == it2.curr_; } template inline constexpr bool operator!=(const counting_iterator& it1, const counting_iterator& it2) { return it1.curr_ != it2.curr_; } template struct counting_iterator { using difference_type = int32_t; using value_type = IntType; using pointer = value_type*; using reference = IntType&; using iterator_category = std::bidirectional_iterator_tag; counting_iterator(value_type begin) : curr_(begin) {} counting_iterator& operator++() { ++curr_; return *this; } counting_iterator& operator--() { --curr_; return *this; } counting_iterator operator++(int) { auto tmp = *this; ++curr_; return tmp; } counting_iterator operator--(int) { auto tmp = *this; --curr_; return tmp; } reference operator*() { return curr_; } friend constexpr bool operator==<>(const counting_iterator&, const counting_iterator&); friend constexpr bool operator!=<>(const counting_iterator&, const counting_iterator&); private: value_type curr_; }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/types.hpp0000644000175000017500000000150314140040573023430 0ustar nileshnilesh#pragma once namespace glmnetpp { namespace util { // Types of glm supported. enum class glm_type { gaussian, binomial }; // A struct that defines the enum class of mode types // for a specific glm. template struct Mode; // Specializations template <> struct Mode { enum class type { naive, cov }; }; template <> struct Mode { enum class type { two_class, multi_class }; }; // Helper alias to get the mode enum class type for each glm. template using mode_type = typename Mode::type; // Represents a state of a function as a way for the caller to do control flow. enum class control_flow { noop_, continue_, break_, return_ }; } // namespace util } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/mapped_sparse_matrix_wrapper.hpp0000644000175000017500000000411614140040573030236 0ustar nileshnilesh#pragma once #include namespace glmnetpp { // A wrapper class to represent a standardized sparse (column) vector. template struct MappedSparseVectorStandardized : public ColVecType { private: using base_t = ColVecType; using value_t = typename base_t::Scalar; public: MappedSparseVectorStandardized( const base_t& c, value_t mean, value_t sd) : base_t(c) , mean_(mean) , sd_(sd) {} value_t mean() const { return mean_; } value_t sd() const { return sd_; } private: const value_t mean_; const value_t sd_; }; // A wrapper class to represent a standardized sparse matrix. template struct MappedSparseMatrixWrapper : public SparseMatMapType { private: using base_t = SparseMatMapType; using value_t = typename SparseMatMapType::Scalar; using vec_t = Eigen::Matrix; using map_vec_t = Eigen::Map; using index_t = typename SparseMatMapType::Index; public: MappedSparseMatrixWrapper( const base_t& m, const map_vec_t& mean, const map_vec_t& sd) : base_t(m) , mean_(mean) , sd_(sd) {} auto col(index_t j) const { auto col_j = base_t::col(j); using col_t = std::decay_t; return MappedSparseVectorStandardized( col_j, mean_(j), sd_(j)); } auto col(index_t j) { auto col_j = base_t::col(j); using col_t = std::decay_t; return MappedSparseVectorStandardized( col_j, mean_(j), sd_(j)); } private: const map_vec_t mean_; const map_vec_t sd_; }; template inline auto make_mapped_sparse_matrix_wrapper( const SparseMatMapType& m, const MeanType& mean, const SDType& sd) { return MappedSparseMatrixWrapper(m, mean, sd); } } // namespace glmnetpp glmnet/src/glmnetpp/include/glmnetpp_bits/util/type_traits.hpp0000644000175000017500000000367414140040573024646 0ustar nileshnilesh#pragma once #include #include #include namespace glmnetpp { namespace util { namespace details { // Dummy class that defines a conversion from F to this class. template