lavaan/0000755000176200001440000000000014630576512011525 5ustar liggesuserslavaan/MD50000644000176200001440000003452314630576512012044 0ustar liggesusers9b30044b7e8935dae4fd15feaacc6f10 *DESCRIPTION e845666fba3bf351c2d3b7b6615c066a *NAMESPACE 96833ba5971965b7b7617c9a29aebbbe *R/00class.R 9e060278a4d58aca560211a91ffc3b98 *R/00generic.R 4d4ff3fdd6a4f78ebca8f6b57444b35e *R/ctr_estfun.R b8f7f69f58c63a74f6a3015dc6bc63a2 *R/ctr_informative_testing.R c8f8b160d187dd1bc24c81a5a89af044 *R/ctr_modelcov.R f41fb65f9a1387a3ff8c73f91ae4804d *R/ctr_mplus2lavaan.R 7de4535d2a2b5d8dd762089d6fe9a345 *R/ctr_pairwise_fit.R 7d0833a2311352b0d2d12e1d33012367 *R/ctr_pairwise_table.R e3c766296a4c3c5af4b927f36032e3cd *R/ctr_pml_doubly_robust_utils.R bf3f3ada17ca98a78d2471c0362c6fe4 *R/ctr_pml_plrt.R d31f41b3c3cb53a22b2056b3958f05ec *R/ctr_pml_plrt2.R 9435e7c3eede6539f21181369071d947 *R/ctr_pml_plrt_nested.R 5a6bcfe41ce673d44c471b71aea2005d *R/ctr_pml_utils.R 306d9e526a5258cc62e14254c199de2a *R/lav_bootstrap.R a0c9da7dff6444d3409725c7c3e41538 *R/lav_bootstrap_lrt.R bf30861b559626ba6321992171ff605d *R/lav_bvmix.R d7aa7b4c182271c81a1d9daa3584b041 *R/lav_bvord.R 02fb629e7d215413a1d449676edb0e04 *R/lav_bvreg.R 1168b41d06920bc1410666f5126cf634 *R/lav_cfa_1fac.R 5aa1eb500f645112ff9eeb63893eabd0 *R/lav_cfa_bentler1982.R 22b7a7936a4d78d50011bed9e3c7df7a *R/lav_cfa_fabin.R 9132d7c1c3e8128cee46dbeaec249174 *R/lav_cfa_guttman1952.R 33e438e7529be8d2bf58be205dee69b2 *R/lav_cfa_jamesstein.R 60514061d2226d147da29d9f89053d3a *R/lav_cfa_utils.R 0a8bdf966463a311ed8a8118cc264eb3 *R/lav_constraints.R 03cffd0df760cee459c5a78edfa76168 *R/lav_cor.R 743171517fbbc1c7bd6be952663ad351 *R/lav_data.R eba748c8e8f3dd4e0145c6bee6575197 *R/lav_data_patterns.R 124d84cfca2218a7436929549ad5addb *R/lav_data_print.R 28f2651575fe554cd3510050d3665b49 *R/lav_data_update.R bb86c0099ffd856a4d61b11d492e3c10 *R/lav_dataframe.R 79bc444ebb6e81ce76df9106833157e3 *R/lav_efa_extraction.R 3d257a6c5baa19542586319275d0ace8 *R/lav_efa_pace.R 1d7eec9da477de74c77efe255ec63c5a *R/lav_efa_print.R 049853bda936008dfd4eb275e34d12c8 *R/lav_efa_summary.R 701c77ab8c2f410c682281f674d9b4e2 *R/lav_efa_utils.R 8bddcf70defe9d0a19d3068fbcc7d2cc *R/lav_export.R 42a4d463f45f353a3cfb7f32235d1536 *R/lav_export_bugs.R cf83e19cc23908d55e5ba364aac5956d *R/lav_export_estimation.R c1bb6b61c67750516a4bbfb7208908fd *R/lav_export_mplus.R 5816d418b55f0d0a78a7949999191c70 *R/lav_fit.R 1c11379c3fb1573d6d9cb7fd83ad59c0 *R/lav_fit_aic.R 20f81b0fbe32ac33493dbb499fdcb1b2 *R/lav_fit_cfi.R 3c81cb7c46147a1b8928b39562aa5360 *R/lav_fit_gfi.R ef25df4c321b2bbde1e8f3d3ec22b34a *R/lav_fit_measures.R 1c03848e0222070d97f9f24335e78ccd *R/lav_fit_other.R 38a54451d0ba9a8eacd3492a5cc396f0 *R/lav_fit_rmsea.R c15dfe6f0060f547a3a9b5eb10ebf4e3 *R/lav_fit_srmr.R ac6694f5aa54fc75c639d86cfc20f617 *R/lav_fit_utils.R 2aca70332c94e473d6fe3f6013b2e97b *R/lav_fsr.R 4d8a4fb1cc45d95de817116d271d1bbc *R/lav_func_deriv.R c37fa590e1a6144baa2d9811ed4d4c76 *R/lav_graphics.R 7e1aee83e4cdd9bd83947bff75445cf2 *R/lav_h1_implied.R 42c92e118835385e5b1973bdfca8bc53 *R/lav_h1_logl.R e37d9d00dddcb0fdaa68d0520a5af54b *R/lav_integrate.R 3ec96ebea2be504f87ae11f4b26f7280 *R/lav_lavaanList_inspect.R 6b0c9128b703ec388de709e6e183996a *R/lav_lavaanList_methods.R 91e687b1fa2e53885ee40a6bd3c04fb5 *R/lav_lavaanList_multipleGroups.R be0f35fbaf8ce1bf5bbfc56fcb47aeac *R/lav_lavaanList_multipleImputation.R 09eac9a7693f7d33ff492035a367dc23 *R/lav_lavaanList_simulate.R 74645dc2df99d559462d92090be6b84a *R/lav_lavaan_step00_init.R 2f044d0dc0dbae547e1fc343ca3470b8 *R/lav_lavaan_step01_ovnames.R d23fb931fff75b6d56f70221ed433f7a *R/lav_lavaan_step02_options.R 8fe4102a2230f88ebe8343e0764bc804 *R/lav_lavaan_step03_data.R 7cd9333084d40a5e4661773d6f8bec65 *R/lav_lavaan_step04_partable.R f5cfc8464466fd283e9f3afb67a35397 *R/lav_lavaan_step05_samplestats.R 303157158b077b5f00bf1cbbb14814fb *R/lav_lavaan_step06_h1.R 3d40b0b2b964d909f965b9fc89790702 *R/lav_lavaan_step07_bounds.R d3759c76abed6cfaf9f9926353db7d4e *R/lav_lavaan_step08_start.R d58a07a08a874f643816c2ba8fc2184e *R/lav_lavaan_step09_model.R 3283bf33fe93fa42d66490c5ad3b578a *R/lav_lavaan_step10_cache.R 257061a9bfd96efa0cdfbeed8d730592 *R/lav_lavaan_step11_optim.R df4468f1490655ad5ac7ff973b4b9c00 *R/lav_lavaan_step12_implied.R 2fee15a3600cdf9c628983d7dadc85a8 *R/lav_lavaan_step13_vcov.R 0172865d321ea5355350aa51afd20a95 *R/lav_lavaan_step14_test.R 939093b7598f7a7d56864d25b09aa3c5 *R/lav_lavaan_step15_baseline.R 11ab5a67d53ac05c16f0ab9d7249930a *R/lav_lavaan_step16_rotation.R 7d9b073133982f7af850c91ad89447c6 *R/lav_lavaan_step17_lavaan.R f5f405d583018d4fa54c91437c68bf69 *R/lav_matrix.R 1ad278e23689d490d4760bd97eada6be *R/lav_matrix_rotate.R 3752526bf6cf87f856eb01e3b907943a *R/lav_matrix_rotate_methods.R 10d28fd2a26a20c8e2ce02c69b4398c7 *R/lav_matrix_rotate_utils.R 38c5752bc087b4e748dc252e9141dc30 *R/lav_model.R a27c999595ee83db5115c25793de491b *R/lav_model_compute.R 8eb414fd44598b3b4a570bdc100a9c8d *R/lav_model_efa.R 2d26af73a818a3fb5ee3dc413817b04b *R/lav_model_estimate.R bbb22841d219865eb7c65fb0165c37a3 *R/lav_model_gradient.R 4031be2b67d507161f25d453a8746026 *R/lav_model_gradient_mml.R 3407162cfbe355d6cae7f08617690f21 *R/lav_model_gradient_pml.R b7bd526efe6d1ebce30d261220e1477e *R/lav_model_h1_information.R 2c52893e931444e017db8eb3097f357e *R/lav_model_h1_omega.R 706cc28430137fe7b21e9a71b3e023a5 *R/lav_model_hessian.R 1ff1feecda761e155dc612999a04b128 *R/lav_model_implied.R 4497af335129bb3569f7be1326a64850 *R/lav_model_information.R e51026cdab588d59cfa3551d2223d695 *R/lav_model_lik.R 0fc8f89d133fb25008d1e64d917bd8f8 *R/lav_model_loglik.R b0dd34ff6f6c66bec467ac2735b79b7a *R/lav_model_objective.R 154a87ed477f1381d6692bccb45d1cca *R/lav_model_properties.R 4ecd6f5d3b5bc43028f03751bf07ca6e *R/lav_model_utils.R 5b7afb6672fd16bbbf8600b7052e18c3 *R/lav_model_vcov.R c73ccc19d7a4865c355a799dbfbe5a65 *R/lav_model_wls.R 9fcb91c4f4ac5f73fdb2647244be4523 *R/lav_modification.R 3dcc43976c5178e4cec94ef948088ce2 *R/lav_mplus.R 435a4bc871d58e008859fc3941d333b5 *R/lav_msg.R c36cea5c3cbee35ed94b9ac7a5a71683 *R/lav_muthen1984.R 7e1ad0633188392614e8f78fb0d7f755 *R/lav_mvnorm.R c995f42bc522bde286beebd233a2a8fe *R/lav_mvnorm_cluster.R 2b69c0870380b47437ea18b48995d0ea *R/lav_mvnorm_cluster_missing.R f096e68e8ba79cdf88cd86cd23fd8396 *R/lav_mvnorm_h1.R bb2cd9e9332a368bc88e097608e35221 *R/lav_mvnorm_missing.R d06c1d0bc6eb3b1a7c685fb59f44647b *R/lav_mvnorm_missing_h1.R 0c3006fcfa626df72270c1bec8cf5a12 *R/lav_mvreg.R 37d13b4de4a759a555d1d1ecca9dbd6f *R/lav_mvreg_cluster.R fa756931da5e627c46956d64ddd8a828 *R/lav_norm.R 202bb5559117ba7624b53b849dcd36fd *R/lav_object_generate.R 35d37cc9f9c4c81b8e4016e49f1ed360 *R/lav_object_inspect.R a80b701e1723311dc5f95051e8aee364 *R/lav_object_methods.R fffe1371732a1c9b83c1b60dc32a5644 *R/lav_object_post_check.R f559053f4f614296da4416c89cd8dc91 *R/lav_object_summary.R 4ed41c056bfb8ab2bb15cb1ec22d221b *R/lav_objective.R a7bead40f34134e427bfcef674309df2 *R/lav_optim_gn.R e5b455c5ee025f19ad1c83cde2925c46 *R/lav_optim_nlminb_constr.R 102e04196616be0e5779434d7022fafc *R/lav_optim_noniter.R 02509c9e6982dd2e25b49812420cfd3a *R/lav_options.R f94c8e3a13ee83366841597237f51db5 *R/lav_options_default.R 50618f4ca30d1bb1b8df43c14a36ff43 *R/lav_options_estimator.R aa57cfe1ccacc3549e7f230b55efbe9c *R/lav_options_mimic.R f267d8f7e1bde62a41d79446e4cd3cb0 *R/lav_options_se.R 39966a9d78f225f78a80bbe40befa0f8 *R/lav_partable.R eddccb3b941b850f2be8a740d320da5f *R/lav_partable_attributes.R 2172b5cfe85764a0cd7b296fa38ce340 *R/lav_partable_bounds.R 6d59df97eeff8aedcb74db47f486543a *R/lav_partable_cache.R bd650c8c4aeb3c7a064eb2872263fea7 *R/lav_partable_check.R bc32d345cbd4d12ca56a761780e98ae3 *R/lav_partable_complete.R a19ed3e5b2f2d0885e4e4c407ab08e9d *R/lav_partable_constraints.R 8fc2bedfe5f323be970ac6044b8e2352 *R/lav_partable_efa.R ae5b8483a6b56a44e25d2db89d7fb731 *R/lav_partable_flat.R 86b6e74f9e18481f8181cd855f1ac946 *R/lav_partable_from_lm.R 30901b7eee067f312b757cd5e2dc2861 *R/lav_partable_full.R 8eb7d506efa621f7e774a28fd6a131c2 *R/lav_partable_labels.R 363aeada26ccecf46895b43a51e9c1a4 *R/lav_partable_merge.R 0ab95e28725fce3e706a295195401dda *R/lav_partable_ov_from_data.R 8221f57d4a8c73b055cd6839845f0f24 *R/lav_partable_random.R f0fe5c66fe404daa74796be7c4e30c76 *R/lav_partable_subset.R a44bdc94743a26e21df272aa2c9712f6 *R/lav_partable_unrestricted.R cd5b18649378f6d8cd8c5d0323d00103 *R/lav_partable_utils.R 867baa9f915fa9a30ad2142d9572ecfc *R/lav_partable_vnames.R ea68e99e7eacf9798b96cec422e5f2de *R/lav_predict.R f0f5742bbf2f3d972f874e6e3c2cbd5f *R/lav_predict_y.R 1e64338cc57803324d2de889d3bbe7d5 *R/lav_prelis.R 093a4f48534f4f8bfa7afd5c4cc1469a *R/lav_print.R f50d4d4531ab614fabd9c460368e606e *R/lav_representation.R 8b93dfb4f71ef5d31aac15d42bfed8b4 *R/lav_representation_lisrel.R 32509ffd47cd04966b909858add42e69 *R/lav_representation_ram.R 643b3b9d4e88bc4310f9b5d92e8005f5 *R/lav_residuals.R 82d423e8ac137c7ae4a560e186b54f5a *R/lav_residuals_casewise.R f83dce042c8f16f09f5af69b091ae01b *R/lav_sam_step0.R 1a4ad311c78e6ab660cf4b3a86243c28 *R/lav_sam_step1.R ad69082b649bd2ea60b5502bece69524 *R/lav_sam_step2.R 865bb76aa1ad684533b6ef99f048cdcc *R/lav_sam_step2_se.R 5b7bf11a6e75fb5a45499d66d61a5bc8 *R/lav_sam_utils.R 9763022eca956a0e4cb9aa9c2f63c078 *R/lav_samplestats.R a7785004e3a3c7ded36340afaf4dc4be *R/lav_samplestats_gamma.R b5763d0d0c4da6a151f76a36abe30897 *R/lav_samplestats_icov.R 405cd306cb63e40a74a667c2c4015306 *R/lav_samplestats_igamma.R f8050e214fe10d39d79d898d472524f8 *R/lav_samplestats_robust.R 6d8a6c9178b989b5d535a933422d1fe4 *R/lav_samplestats_step1.R 476bbb97e359d3eaaa91a4d2db4b580a *R/lav_samplestats_step2.R 9304c6509a4d358af4ab8ce3a93bb883 *R/lav_samplestats_wls_obs.R 0f8ca273377e463cb2664436a9d44939 *R/lav_sem_miiv.R edd0cc536a0255d0e42d0e3edb1d03a6 *R/lav_simulate.R ff5621ce453bd61e457b21fa0167bdb3 *R/lav_simulate_old.R 91c816a43f702a2b61fd135c082b138e *R/lav_standardize.R d42217384cca0c6c91c1e38e62c4d905 *R/lav_start.R 202fe98df9b58fcde14163350be59e4e *R/lav_syntax.R 379beaea5cd6d7c1e98fd26df0ad771b *R/lav_syntax_independence.R 3117363f8d19d05b004305b2c4388cbd *R/lav_syntax_mlist.R 9511d4dec5e2ab57d29103fca9bfdd2a *R/lav_syntax_parser.R ac56a6446285364123dc1df190428eb8 *R/lav_tables.R eee7997e3c35967c43e12b3ee3f20bc3 *R/lav_tables_mvb.R e7978bd2485f317945b0f4001fc2f9bf *R/lav_test.R 30810397b4bd3e03802402c0b5071078 *R/lav_test_LRT.R b2106c37c3d44f131b5080edfd511f3c *R/lav_test_Wald.R 61fdabc0eb888bb31dd9d8949bf40173 *R/lav_test_browne.R 02549c5fcf9e585f8cb649d95645a31b *R/lav_test_diff.R 211c761f60585a8d1b8d7f7aca17c2e9 *R/lav_test_print.R 6b6ee114d8ac118ed6b87508bbae78d6 *R/lav_test_satorra_bentler.R eb4610b86f65172871ff3bd95529f3b4 *R/lav_test_score.R 5e02e675e58e7fa212418344c2c30762 *R/lav_test_yuan_bentler.R 6b26c00c12b8e0f4cce2fca645f8377b *R/lav_utils.R fb34727366d56c55c0df191bb74f72b3 *R/lav_uvord.R dfd8bf0a2c2ca09180127d39030fa296 *R/lav_uvreg.R a4557f81ec0bded889d4d52e6ec0a848 *R/ldw_trace.R 7fd3d5a76e6b6c95e68e77c6fa8a027f *R/xxx_efa.R 7ee88e235a86dde8d326bf3cb4c33fdc *R/xxx_fsr.R a1a2878c9d4f0ac673d426bc21db11a4 *R/xxx_lavaan.R b40e73a6e55ec2c56b1e75b6e430d342 *R/xxx_lavaanList.R 1d91eac1c95612785492178ad0e41070 *R/xxx_sam.R 7e72a232db1128caf5b5c108b4124ed0 *R/zzz.R 8eccaf9a671a2097cf71a79597ef5cd3 *R/zzz_OLDNAMES.R aab6803ee07cb31979143bf8ccc2622e *README.md f61db891eb928123aba4412ca322f508 *build/partial.rdb 8dcf021a8c1e3bec0e544d6d6fcf35a1 *data/Demo.growth.rda f5b0ad9a0d1a88b2d04c5d0af86c7c54 *data/Demo.twolevel.RData 140160c6aed2f9a94529130d0c3706d8 *data/FacialBurns.rda b5da5b64e3d3e91c1bcef8f3923bdfd9 *data/HolzingerSwineford1939.rda 5fc7b4e0adf386955e92d32a1ac248cc *data/PoliticalDemocracy.rda b0467e2697708aade26c0025cafcdcc6 *inst/CITATION f73914eb8af3793d5fd5cf9780fd2e07 *inst/understanding_lavaan_internals.R bb73dca4c8e777c91e0e07ba4a081e38 *man/Demo.growth.Rd 56788437e28457f94b9a359d1728e63c *man/Demo.twolevel.Rd da8432990701ab7a4f09666cc52a8e14 *man/FacialBurns.Rd 32975e756b249b0d9e096c520e01cd82 *man/HolzingerSwineford1939.Rd 63e6edf594ce04eee7759f6048211aaf *man/InformativeTesting.Rd 4de0629b5a90195946dbe01a31797a1b *man/PoliticalDemocracy.Rd b88073fe6a281aa96c0fc126e3fecf8c *man/bootstrap.Rd f5d0073f0030adeaaf1b8b3e5e4c6d69 *man/cfa.Rd 2eae5172d7464bcd51d8d5e8bab48528 *man/efa.Rd c21ebb903553abdd707439295567aed1 *man/estfun.Rd cd03ec819ef0fe8a3f5a4a7b8a9ba3ac *man/fitMeasures.Rd e10ce2f596f1e9105b12d1e9860aacfd *man/getCov.Rd fd97f24a912e4499c7396b4211b41534 *man/growth.Rd 73e825fa761127c81205c806be420ffa *man/inspectSampleCov.Rd bdeed4c0e6f40ea31a4c7caab104accd *man/lavCor.Rd 225af6a64d1104380844614ca4191caa *man/lavExport.Rd 3b48cd01679b78c52fcfc510b6728541 *man/lavInspect.Rd 26119d475ad895f46006afe1e0587060 *man/lavListInspect.Rd dd828fdc7f100a867061aa464b3a55b0 *man/lavMatrixRepresentation.Rd fff3a2754f2e4996ade52d8eb794ab44 *man/lavNames.Rd 552bd6e2aa1fdafc8331125858e914bb *man/lavOptions.Rd 0089859bf1dc952b1ec3f57ef5daaafc *man/lavPredict.Rd cc5f0cc376ed90ea111165d24fa21cc8 *man/lavPredictY.Rd 64edbf6b9fbf6cbae2a3c29451401a32 *man/lavPredictY_cv.Rd c39e30862f6cce71e415d2e0471d2d2f *man/lavResiduals.Rd d2e8961bb9e6e83fc8bc38c476f8ccc1 *man/lavTables.Rd 2216545054877ad0efa8be9f70f94c71 *man/lavTablesFit.Rd aa2a085a1af2ef8ce10da5390abf4ed2 *man/lavTest.Rd 3da7d1ead0e8a8f5cc170975c0b8b6c3 *man/lavTestLRT.Rd bb03170f13b572bdb1787b179689a7d2 *man/lavTestScore.Rd b2e463238f7fe7bfd43176d5443bbd2d *man/lavTestWald.Rd 81f283668573ca1a58784c8478f50be4 *man/lav_constraints.Rd 0ec739594e2d8a13bd4ae0bcb0e7a9c1 *man/lav_data.Rd 093c6fe2db24acc2a9ff901e02f29c94 *man/lav_export_estimation.Rd 758f43384f15c6ac5394eafdeadf4ff4 *man/lav_func.Rd e0452168221db2513208587b837a0f4b *man/lav_matrix.Rd d735c976b766d27483022acf6f8f75a2 *man/lav_model.Rd 5afbca330d1f93ef218fb552b5abd618 *man/lav_partable.Rd 2b3de45febb5837f8a48ceced0fbad9b *man/lav_samplestats.Rd 81037b714caac3adc32fcfb27c482948 *man/lavaan-class.Rd 2c80e96a925423808c698a4de59cb2be *man/lavaan.Rd 9d9722fa4fa91a6982834ef9bcc6467e *man/lavaanList-class.Rd 5faf3ea6c37e7cb70efffb778974c6bc *man/lavaanList.Rd 2cb122f7fa9c1c46204e4e418f5a6d4a *man/model.syntax.Rd 927850cc9f79096ce5d2e71f89d8b6a2 *man/modificationIndices.Rd 1f3b35bfd8a4992a3996533555c33d4c *man/mplus2lavaan.Rd 3265fce241220e80c93f403c4c749c61 *man/mplus2lavaan.modelSyntax.Rd 299c95c112718f7d852ba8c33b3f5f68 *man/parTable.Rd 59ca4b3d081fe9aad35e3a8830e18df9 *man/parameterEstimates.Rd 0c7105b43a2ca7c3b1aa0793507580ca *man/plot.InformativeTesting.Rd 6ec54b3b709ac43d7978608c62c58cb0 *man/sam.Rd a44a4a82551b1706d2075914304d038e *man/sem.Rd dedaecb247e9520abb3ec6ea882d6619 *man/simulateData.Rd f81a524a657f0ab663d976ae7805d91b *man/standardizedSolution.Rd 090b2d7843223e836c7480061d41baa4 *man/summary.efaList.Rd 31fb942eea99dbd852bd8ea3f2ed0a7b *man/varTable.Rd lavaan/R/0000755000176200001440000000000014630305220011710 5ustar liggesuserslavaan/R/lav_samplestats_robust.R0000644000176200001440000000346414627656441016666 0ustar liggesusers# 'robust' mean and (co)variance matrix using Huber weights # # see Yuan & Hayashi (2010). Fitting Data to Model: SEM Diagnosis using two # scatter plots. Psychological Methods, 15(4), 335-351 # # this function is based on the 'robmusig' function from K.H. Yuan's website: # https://www.nd.edu/~kyuan/SEMdiagnosis # see file CFA.r lines 46--96 # lav_cov_huber <- function(Y = NULL, prob = 0.95, max.it = 200L, tol = 1e-07) { Y <- as.matrix(Y) NAMES <- colnames(Y) Y <- unname(Y) N <- nrow(Y) P <- ncol(Y) # tuning parameters for Huber's weight chip <- qchisq(prob, P) ck <- sqrt(chip) cbeta <- (P * pchisq(chip, P + 2L) + chip * (1 - prob)) / P # initial values this.mu <- colMeans(Y, na.rm = TRUE) this.sigma <- cov(Y, use = "pairwise.complete.obs") for (i in seq_len(max.it)) { # store old old.mu <- this.mu old.sigma <- this.sigma # squared Mahalanobis distance inv.sigma <- solve(this.sigma) Y.c <- t(t(Y) - this.mu) mdist2 <- rowSums((Y.c %*% inv.sigma) * Y.c) mdist <- sqrt(mdist2) # Huber weights wt <- ifelse(mdist <= ck, 1, ck / mdist) # weighted mean this.mu <- apply(Y, 2L, weighted.mean, w = wt, na.rm = TRUE) # weighted cov Y.c <- t(t(Y) - this.mu) this.sigma <- crossprod(Y.c * wt) / (N * cbeta) # question: why N, and not sum(wt)? # check progress diff.mu <- abs(this.mu - old.mu) diff.sigma <- abs(this.sigma - old.sigma) crit <- max(c(max(diff.mu), max(diff.sigma))) if (crit < tol) { break } if (i == max.it) { lav_msg_warn(gettext( "maximum number of iterations has been reached, without convergence.")) } } names(this.mu) <- NAMES colnames(this.sigma) <- rownames(this.sigma) <- NAMES res <- list(Mu = this.mu, Sigma = this.sigma, niter = i, wt = wt) res } lavaan/R/lav_integrate.R0000644000176200001440000002147614627656440014714 0ustar liggesusers# routines for numerical intregration # integrate (-infty to +infty) a product of univariate Gaussian densities # with givens means (mus) and standard deviations (sds) (or variances, vars) lav_integration_gaussian_product <- function(mus = NULL, sds = NULL, vars = NULL) { n <- length(mus) if (is.null(vars)) { vars <- sds^2 } # variance product var.prod <- 1 / sum(1 / vars) # mean product mu.prod <- sum(mus / vars) * var.prod # normalization constant const <- 1 / sqrt((2 * pi)^(n - 1)) * sqrt(var.prod) * sqrt(1 / prod(vars)) * exp(-0.5 * (sum(mus^2 / vars) - mu.prod^2 / var.prod)) const } # return Gauss-Hermite quadrature rule for given order (n) # return list: x = nodes, w = quadrature weights # # As noted by Wilf (1962, chapter 2, ex 9), the nodes are given by # the eigenvalues of the Jacobi matrix; weights are given by the squares of the # first components of the (normalized) eigenvectors, multiplied by sqrt(pi) # # (This is NOT identical to Golub & Welsch, 1968: as they used a specific # method tailored for tridiagonal symmetric matrices) # # TODO: look at https://github.com/ajt60gaibb/FastGaussQuadrature.jl/blob/master/src/gausshermite.jl # featuring the work of Ignace Bogaert (UGent) # # approximation of the integral of 'f(x) * exp(-x*x)' from -inf to +inf # by sum( f(x_i) * w_i ) # # CHECK: sum(w_i) should be always sqrt(pi) = 1.772454 lav_integration_gauss_hermite_xw <- function(n = 21L, revert = FALSE) { # force n to be an integer n <- as.integer(n) stopifnot(n > 0L) if (n == 1L) { x <- 0 w <- sqrt(pi) } else { # construct symmetric, tridiagonal Jacobi matrix # diagonal = 0, -1/+1 diagonal is sqrt(1:(n-1)/2) u <- sqrt(seq.int(n - 1L) / 2) # upper diagonal of J Jn <- matrix(0, n, n) didx <- lav_matrix_diag_idx(n) Jn[(didx + 1)[-n]] <- u # Jn[(didx-1)[-1]] <- u # only lower matrix is used anyway # eigen decomposition # FIXME: use specialized function for tridiagonal symmetrix matrix ev <- eigen(Jn, symmetric = TRUE) x <- ev$values tmp <- ev$vectors[1L, ] w <- sqrt(pi) * tmp * tmp } # revert? (minus to plus) if (revert) { x <- -x } list(x = x, w = w) } # generate GH points + weights lav_integration_gauss_hermite <- function(n = 21L, dnorm = FALSE, mean = 0, sd = 1, ndim = 1L, revert = TRUE, prune = FALSE) { XW <- lav_integration_gauss_hermite_xw(n = n, revert = revert) # dnorm kernel? if (dnorm) { # scale/shift x x <- XW$x * sqrt(2) * sd + mean # scale w w <- XW$w / sqrt(pi) } else { x <- XW$x w <- XW$w } if (ndim > 1L) { # cartesian product x <- as.matrix(expand.grid(rep(list(x), ndim), KEEP.OUT.ATTRS = FALSE)) w <- as.matrix(expand.grid(rep(list(w), ndim), KEEP.OUT.ATTRS = FALSE)) w <- apply(w, 1, prod) } else { x <- as.matrix(x) w <- as.matrix(w) } # prune? if (is.logical(prune) && prune) { # always divide by N=21 lower.limit <- XW$w[1] * XW$w[floor((n + 1) / 2)] / 21 keep.idx <- which(w > lower.limit) w <- w[keep.idx] x <- x[keep.idx, , drop = FALSE] } else if (is.numeric(prune) && prune > 0) { lower.limit <- quantile(w, probs = prune) keep.idx <- which(w > lower.limit) w <- w[keep.idx] x <- x[keep.idx, , drop = FALSE] } list(x = x, w = w) } # backwards compatibility lav_integration_gauss_hermite_dnorm <- function(n = 21L, mean = 0, sd = 1, ndim = 1L, revert = TRUE, prune = FALSE) { lav_integration_gauss_hermite( n = n, dnorm = TRUE, mean = mean, sd = sd, ndim = ndim, revert = revert, prune = prune ) } # plot 2-dim # out <- lavaan:::lav_integration_gauss_hermite_dnorm(n = 20, ndim = 2) # plot(out$x, cex = -10/log(out$w), col = "darkgrey", pch=19) # integrand g(x) has the form g(x) = f(x) dnorm(x, m, s^2) lav_integration_f_dnorm <- function(func = NULL, # often ly.prod dnorm.mean = 0, # dnorm mean dnorm.sd = 1, # dnorm sd XW = NULL, # GH points n = 21L, # number of nodes adaptive = FALSE, # adaptive? iterative = FALSE, # iterative? max.iter = 20L, # max iterations ...) { # optional args for 'f' # create GH rule if (is.null(XW)) { XW <- lav_integration_gauss_hermite_xw(n = n, revert = TRUE) } if (!adaptive) { w.star <- XW$w / sqrt(pi) x.star <- dnorm.sd * (sqrt(2) * XW$x) + dnorm.mean out <- sum(func(x.star, ...) * w.star) } else { # Naylor & Smith (1982, 1988) if (iterative) { mu.est <- 0 sd.est <- 1 for (i in 1:max.iter) { w.star <- sqrt(2) * sd.est * dnorm(sqrt(2) * sd.est * XW$x + mu.est, dnorm.mean, dnorm.sd) * exp(XW$x^2) * XW$w x.star <- sqrt(2) * sd.est * XW$x + mu.est LIK <- sum(func(x.star, ...) * w.star) # update mu mu.est <- sum(x.star * (func(x.star, ...) * w.star) / LIK) # update sd var.est <- sum(x.star^2 * (func(x.star, ...) * w.star) / LIK) - mu.est^2 sd.est <- sqrt(var.est) if (lav_verbose()) { cat( "i = ", i, "LIK = ", LIK, "mu.est = ", mu.est, "sd.est = ", sd.est, "\n" ) } } out <- LIK # Liu and Pierce (1994) } else { # integrand g(x) = func(x) * dnorm(x; m, s^2) log.g <- function(x, ...) { ## FIXME: should we take the log right away? log(func(x, ...) * dnorm(x, mean = dnorm.mean, sd = dnorm.sd)) } # find mu hat and sd hat mu.est <- optimize( f = log.g, interval = c(-10, 10), maximum = TRUE, tol = .Machine$double.eps, ... )$maximum H <- as.numeric(numDeriv::hessian(func = log.g, x = mu.est, ...)) sd.est <- sqrt(1 / -H) w.star <- sqrt(2) * sd.est * dnorm(sd.est * (sqrt(2) * XW$x) + mu.est, dnorm.mean, dnorm.sd) * exp(XW$x^2) * XW$w x.star <- sd.est * (sqrt(2) * XW$x) + mu.est out <- sum(func(x.star, ...) * w.star) } } out } # integrand g(z) has the form g(z) = f(sz+m) dnorm(z, 0, 1) lav_integration_f_dnorm_z <- function(func = NULL, # often ly.prod f.mean = 0, # f mean f.sd = 1, # f sd XW = NULL, # GH points n = 21L, # number of nodes adaptive = FALSE, # adaptive? iterative = FALSE, # iterative? max.iter = 20L, # max iterations ...) { # optional args for 'f' # create GH rule if (is.null(XW)) { XW <- lav_integration_gauss_hermite_xw(n = n, revert = TRUE) } if (!adaptive) { w.star <- XW$w / sqrt(pi) x.star <- sqrt(2) * XW$x out <- sum(func(f.sd * x.star + f.mean, ...) * w.star) } else { # Naylor & Smith (1982, 1988) if (iterative) { mu.est <- 0 sd.est <- 1 for (i in 1:max.iter) { w.star <- sqrt(2) * sd.est * dnorm(sd.est * sqrt(2) * XW$x + mu.est, 0, 1) * exp(XW$x^2) * XW$w x.star <- sd.est * (sqrt(2) * XW$x) + mu.est LIK <- sum(func(f.sd * x.star + f.mean, ...) * w.star) # update mu mu.est <- sum(x.star * (func(f.sd * x.star + f.mean, ...) * w.star) / LIK) # update sd var.est <- sum(x.star^2 * (func(f.sd * x.star + f.mean, ...) * w.star) / LIK) - mu.est^2 sd.est <- sqrt(var.est) if (lav_verbose()) { cat( "i = ", i, "LIK = ", LIK, "mu.est = ", mu.est, "sd.est = ", sd.est, "\n" ) } } out <- LIK # Liu and Pierce (1994) } else { # integrand g(x) = func(x) * dnorm(x; m, s^2) log.gz <- function(x, ...) { ## FIXME: should we take the log right away? log(func(f.sd * x + f.mean, ...) * dnorm(x, mean = 0, sd = 1)) } # find mu hat and sd hat mu.est <- optimize( f = log.gz, interval = c(-10, 10), maximum = TRUE, tol = .Machine$double.eps, ... )$maximum H <- as.numeric(numDeriv::hessian(func = log.gz, x = mu.est, ...)) sd.est <- sqrt(1 / -H) w.star <- sqrt(2) * sd.est * dnorm(sd.est * (sqrt(2) * XW$x) + mu.est, 0, 1) * exp(XW$x^2) * XW$w x.star <- sd.est * (sqrt(2) * XW$x) + mu.est out <- sum(func(f.sd * x.star + f.mean, ...) * w.star) } } out } lavaan/R/lav_syntax_parser.R0000644000176200001440000012256214627656523015634 0ustar liggesusers# New version of parser, written by Luc De Wilde in september/october 2023 # ----------------------- ldw_create_enum ------------------------------------ # # function to create an Enumerable like structure in R # usage example mycolors <- ldw_create_enum(c("black", "white", # "orange", "green", "red", "blue")) # xyz <- mycolors$red # values are default 1L, ..., number of names, but can be user specified # ---------------------------------------------------------------------------- # ldw_create_enum <- function(names, values = seq_along(names)) { stopifnot(identical(unique(names), names), is.character(names)) stopifnot(length(names) == length(values)) res <- as.list(setNames(values, names)) res$enum.names <- names res$enum.values <- values res$enum.size <- length(values) res <- as.environment(res) lockEnvironment(res, bindings = TRUE) res } # ------------------------ ldw_parse_sublist --------------------------------- # # function to create a list with only some indexes for all members # ---------------------------------------------------------------------------- # ldw_parse_sublist <- function(inlist, indexes) { for (j in seq_along(inlist)) { inlist[[j]] <- inlist[[j]][indexes] } inlist } # ------------------------ ldw_txtloc --------------------------------------- # # function which translates a position in the model source string to a # user friendly locator (=[1L]) and the line with position (=[2L]) # ---------------------------------------------------------------------------- # ldw_txtloc <- function(modelsrc, position) { txt <- c("", "") if (nchar(modelsrc) >= position && position > 0) { newlines <- gregexpr("\n", paste0(modelsrc, "\n"), fixed = TRUE)[[1]] lijn <- which(newlines >= position)[1] if (lijn == 1L) { pos <- position lijnchar <- substr(modelsrc, 1L, newlines[1]) } else { pos <- position - newlines[lijn - 1L] lijnchar <- substr(modelsrc, newlines[lijn - 1L] + 1L, newlines[lijn]) } if (nchar(lijnchar) == 1L) { lijnchar <- "" } else { lijnchar <- substr(lijnchar, 1L, nchar(lijnchar) - 1) } # adapt line number when first line blank : if (grepl("^[ \t]*\n", modelsrc)) lijn <- lijn - 1L txt <- c( gettextf(" at line %1$s, pos %2$s", lijn, pos), paste(lijnchar, "\n", strrep(" ", pos - 1L), "^\n", sep = "") ) } txt } # ------------------------ ldw_parse_step1 ----------------------------------- # # function to split the model source in tokens. # Returns a list with tokens with their attributes # elem.pos : position in source # elem.type : type of token (cf. definition of types # in ldw_parse_model_string) # elem.text : the text of the token # elem.formule.number : sequence number of the 'logical' # formula where the token occurs # the function returns the stored tokens in a list # ---------------------------------------------------------------------------- # ldw_parse_step1 <- function(modelsrc, types) { nmax <- nchar(modelsrc) elem.pos <- vector("integer", nmax) elem.type <- elem.pos elem.text <- vector("character", nmax) elem.i <- 1L modelsrcw <- paste0(modelsrc, "\n") # working model, must end # with a newline for tests via regexpr stringliterals <- gregexpr("\"[^\"]*?[\"\n]", modelsrcw)[[1L]] if (stringliterals[1L] > -1L) { stringliteral.lengths <- attr(stringliterals, "match.length") for (i in seq_along(stringliterals)) { pfpos <- stringliterals[i] pflen <- stringliteral.lengths[i] substr(modelsrcw, pfpos + 1L, pfpos + pflen - 2L) <- strrep(" ", pflen - 2L) elem.pos[elem.i] <- pfpos elem.text[elem.i] <- substr(modelsrc, pfpos + 1L, pfpos + pflen - 2L) elem.type[elem.i] <- types$stringliteral elem.i <- elem.i + 1L } } comments <- gregexpr("[#!].*?\n", modelsrcw)[[1L]] if (comments[1] > -1L) { comment.lengths <- attr(comments, "match.length") for (i in seq_along(comments)) { substr(modelsrcw, comments[i], comments[i] + comment.lengths[i] - 1L) <- strrep(" ", comment.lengths[i] - 1L) # check for stringliterals in comment str.in.comment <- (elem.pos > comments[i] & elem.pos < comments[i] + comment.lengths[i]) if (any(str.in.comment)) { elem.type[str.in.comment] <- 0 } } } modelsrcw <- gsub("\t", " ", modelsrcw) newlines <- gregexpr("[;\n]", modelsrcw)[[1L]] if (newlines[1L] > -1L) { for (i in seq_along(newlines)) { pfpos <- newlines[i] substr(modelsrcw, pfpos, pfpos) <- "\n" elem.pos[elem.i] <- pfpos elem.text[elem.i] <- "\n" elem.type[elem.i] <- types$newline elem.i <- elem.i + 1L } } # --------------------- handling spaces in operators ------------------------ # if (grepl("= +~", modelsrcw)) { waar <- regexpr("= +~", modelsrcw)[1] modelsrcw <- gsub("=( +)~", "=~\\1", modelsrcw) tl <- ldw_txtloc(modelsrc, waar) lav_msg_warn(gettext("splitting of '=~' deprecated"), tl[1L], footer = tl[2L] ) } if (grepl("[^=~]~ +~", modelsrcw)) { waar <- regexpr("[^=~]~ +~", modelsrcw)[1] modelsrcw <- gsub("([^=~])~( +)~", "\\1~~\\2", modelsrcw) tl <- ldw_txtloc(modelsrc, waar + 1L) lav_msg_warn(gettext("splitting of '~~' deprecated"), tl[1L], footer = tl[2L] ) } # -------------------------------------------------------------------------- # lavops <- gregexpr("=~|<~|~\\*~|~~|~|==|<|>|:=|:|\\||%", modelsrcw)[[1]] if (lavops[1L] > -1L) { lavop.lengths <- attr(lavops, "match.length") for (i in seq_along(lavops)) { pfpos <- lavops[i] pflen <- lavop.lengths[i] elem.pos[elem.i] <- pfpos elem.text[elem.i] <- substr(modelsrcw, pfpos, pfpos + pflen - 1L) elem.type[elem.i] <- types$lavaanoperator substr(modelsrcw, pfpos, pfpos + pflen - 1L) <- strrep(" ", pflen) elem.i <- elem.i + 1L } } symbols <- gregexpr("[,()/*?^']", modelsrcw)[[1L]] # f1=~x2 + 0.5 ? x3 symbols1 <- gregexpr("[-+][^.0-9]", modelsrcw)[[1L]] # f1=~x2+x3 symbols2 <- gregexpr("[._0-9a-df-zA-DF-Z)] *[-+][.0-9]", modelsrcw)[[1L]] # f1=~x2+2*x3, len-2 ! symbols3 <- gregexpr("[^.0-9][eE] *[-+][.0-9]", modelsrcw)[[1L]] # f1=~xe+2*x3, len-2 ! if (symbols1[1L] > -1L) { if (symbols[1L] == -1L) { symbols <- symbols1 } else { symbols <- c(symbols, symbols1) } } if (symbols2[1L] > -1L) { symbols2.lengths <- attr(symbols2, "match.length") symbols2 <- symbols2 + symbols2.lengths - 2L if (symbols[1L] == -1L) { symbols <- symbols2 } else { symbols <- c(symbols, symbols2) } } if (symbols3[1L] > -1L) { symbols3.lengths <- attr(symbols3, "match.length") symbols3 <- symbols3 + symbols3.lengths - 2L if (symbols[1L] == -1L) { symbols <- symbols3 } else { symbols <- c(symbols, symbols3) } } if (symbols[1L] > -1L) { for (i in seq_along(symbols)) { pfpos <- symbols[i] substr(modelsrcw, pfpos, pfpos) <- " " elem.pos[elem.i] <- pfpos elem.text[elem.i] <- substr(modelsrc, pfpos, pfpos) elem.type[elem.i] <- types$symbol elem.i <- elem.i + 1L } } numliterals <- gregexpr( "([ \n][-+][.0-9]|[ \n]\\.[0-9]|[ \n][0-9])[-+\\.0-9eE]*", paste0(" ",modelsrcw) )[[1]] if (numliterals[1L] > -1L) { numliteral.lengths <- attr(numliterals, "match.length") - 1L for (i in seq_along(numliterals)) { pfpos <- numliterals[i] pflen <- numliteral.lengths[i] substr(modelsrcw, pfpos, pfpos + pflen - 1L) <- strrep(" ", pflen) elem.pos[elem.i] <- pfpos elem.text[elem.i] <- substr(modelsrc, pfpos, pfpos + pflen - 1L) elem.type[elem.i] <- types$numliteral elem.i <- elem.i + 1L } } identifiers <- gregexpr( "[ \n][_.a-zA-Z][._a-zA-Z0-9]*", paste0(" ", modelsrcw) )[[1]] identifier.lengths <- attr(identifiers, "match.length") - 1L for (i in seq_along(identifiers)) { pfpos <- identifiers[i] pflen <- identifier.lengths[i] substr(modelsrcw, pfpos, pfpos + pflen - 1L) <- strrep(" ", pflen) elem.pos[elem.i] <- pfpos elem.text[elem.i] <- substr(modelsrc, pfpos, pfpos + pflen - 1L) elem.type[elem.i] <- types$identifier elem.i <- elem.i + 1L } # check for uninterpreted chars wrong <- regexpr("[^\"\n ]", modelsrcw) if (wrong != -1L) { tl <- ldw_txtloc(modelsrc, wrong) lav_msg_stop(gettext("unexpected character"), tl[1L], footer = tl[2L] ) } # remove unused elements from vectors elements <- which(elem.type > 0L) elem.pos <- elem.pos[elements] elem.type <- elem.type[elements] elem.text <- elem.text[elements] # order tokens token.order <- order(elem.pos) elem.pos <- elem.pos[token.order] elem.type <- elem.type[token.order] elem.text <- elem.text[token.order] # concatenate identifiers with only spaces in between - LDW 22/4/2024 elem.i <- length(elem.pos) concatenated <- FALSE while (elem.i > 1L) { if (any(elem.type[elem.i] == c(types$identifier, types$numliteral)) && elem.type[elem.i - 1L] == types$identifier) { spaces.between <- elem.pos[elem.i] - elem.pos[elem.i - 1L] - length(elem.text[elem.i - 1L]) elem.text[elem.i - 1L] <- paste0( elem.text[elem.i - 1L], strrep(" ", spaces.between), elem.text[elem.i] ) elem.type[elem.i] <- 0L concatenated <- TRUE } elem.i <- elem.i - 1L } if (concatenated) { # remove items with type 0 elements <- which(elem.type > 0L) elem.pos <- elem.pos[elements] elem.type <- elem.type[elements] elem.text <- elem.text[elements] } # to set formula number elem.formula.number <- rep(0L, length(elem.type)) frm.number <- 1L frm.hasefa <- FALSE frm.lastplus <- FALSE frm.incremented <- FALSE for (i in seq_along(elem.type)) { elem.formula.number[i] <- frm.number if (elem.type[i] == types$identifier && elem.text[i] == "efa") { frm.hasefa <- TRUE } if (any(elem.text[i] == c("+", "*", "=~"))) { if (frm.incremented) { frm.number <- frm.number - 1L elem.formula.number[i] <- frm.number frm.incremented <- FALSE } frm.lastplus <- TRUE } else { if (any(elem.type[i] == c( types$stringliteral, types$identifier, types$numliteral, types$stringliteral, types$symbol ))) { frm.lastplus <- FALSE } if (i > 1 && elem.type[i] != types$newline && elem.type[i - 1L] == types$lavaanoperator) { frm.hasefa <- FALSE } } if (elem.type[i] == types$newline) { if (i > 1 && elem.type[i - 1L] != types$newline) { # ignore multiple nl's if (!frm.hasefa && !frm.lastplus) { frm.number <- frm.number + 1L frm.incremented <- TRUE } else { frm.hasefa <- FALSE } } } else { frm.incremented <- FALSE } } return(list( elem.pos = elem.pos, elem.type = elem.type, elem.text = elem.text, elem.formula.number = elem.formula.number )) } # ------------------------ ldw_parse_step2 ----------------------------------- # # function to group the modellist tokens in 'mono' formulas. # mono means that the terms (for formulas other then blocks and constraints) # are split in seperate formula's, e.g. # a1 + a2 =~ b1 + b2 becomes # / a1 =~ b1 # | a1 =~ b2 # | a2 =~ b1 # \ a2 =~ b2 # newlines are removed # the function returns a list of formulas # ---------------------------------------------------------------------------- # ldw_parse_step2 <- function(modellist, modelsrc, types) { real.operators <- c("=~", "<~", "~*~", "~~", "~", "|", "%") welke <- modellist$elem.type != types$newline formula.numbers <- unique(modellist$elem.formula.number[welke]) formulas <- lapply(formula.numbers, function(s) { welkenu <- modellist$elem.formula.number == s & welke list( elem.pos = modellist$elem.pos[welkenu], elem.type = modellist$elem.type[welkenu], elem.text = modellist$elem.text[welkenu] ) }) maxnum <- length(formula.numbers) + sum(modellist$elem.text == "+") outval <- vector(mode = "list", length = maxnum) realnum <- 0L for (i in seq_along(formulas)) { formul1 <- formulas[[i]] opi <- which(formul1$elem.type == types$lavaanoperator) nelem <- length(formul1$elem.type) if (length(opi) == 0L) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1]) lav_msg_stop(gettext("formula without valid operator"), tl[1L], footer = tl[2L] ) } if (length(opi) > 1L) opi <- opi[1] # only first operator taken if (any(formul1$elem.text[opi] == real.operators) && sum(formul1$elem.text == "+") > 0) { # check + symbols outside parentheses in left and right hand side lhplusjes <- integer(0) openparentheses <- 0L for (jj in seq.int(1L, opi - 1L)) { if (formul1$elem.text[jj] == "(") { openparentheses <- openparentheses + 1L next } if (formul1$elem.text[jj] == ")") { openparentheses <- openparentheses - 1L next } if (formul1$elem.text[jj] == "+" && openparentheses == 0L) { lhplusjes <- c(lhplusjes, jj) } } lhplusjes <- c(lhplusjes, opi) plusjes <- integer(0) openparentheses <- 0L for (jj in seq.int(opi + 1L, nelem)) { if (formul1$elem.text[jj] == "(") { openparentheses <- openparentheses + 1L next } if (formul1$elem.text[jj] == ")") { openparentheses <- openparentheses - 1L next } if (formul1$elem.text[jj] == "+" && openparentheses == 0L) { plusjes <- c(plusjes, jj) } } plusjes <- c(plusjes, nelem + 1) # splitting lhs and rhs on '+' signs for (j in seq_along(lhplusjes)) { j0 <- 1L if (j > 1L) j0 <- lhplusjes[j - 1L] + 1L j1 <- lhplusjes[j] - 1L if (j1 < j0) next # skip empty parts for (k in seq_along(plusjes)) { k0 <- opi + 1L k1 <- plusjes[k] - 1L if (k > 1L) k0 <- plusjes[k - 1L] + 1L if (k1 < k0) next # skip empty parts welke <- c(seq.int(j0, j1), opi, seq.int(k0, k1)) realnum <- realnum + 1L outval[[realnum]] <- ldw_parse_sublist(formul1, welke) } } } else { realnum <- realnum + 1L outval[[realnum]] <- formul1 } } outval[seq_len(realnum)] } # ------------------------ ldw_parse_check_valid_name ------------------------ # # checks if an element of the elem.text member in a list is a valid r-name # ---------------------------------------------------------------------------- # ldw_parse_check_valid_name <- function(formul1, ind, modelsrc) { # allow spaces, LDW 22/4/2024 testitem <- gsub(" ", "_", formul1$elem.text[ind], fixed = TRUE) if (make.names(testitem) != testitem) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[ind]) lav_msg_stop( gettext("identifier is either a reserved word (in R) or contains an illegal character"), tl[1L], footer = tl[2L] ) } return(invisible(NULL)) } # ------------------------ ldw_num_modifier --------------------------------- # # function for transforming string with numeric values separated by semicolons # in a numeric vector (used in ldw_parse_get_modifier) # ---------------------------------------------------------------------------- # ldw_num_modifier <- function(txt) { # help function vapply(strsplit(txt, ";")[[1]], function(x) { if (x == "NA") NA_real_ else as.numeric(x) }, 1.0, USE.NAMES = FALSE) } # ------------------------ ldw_unpaste -------------------------------------- # # function for transforming string with string values separated by semicolons # in a vector (used in ldw_parse_get_modifier) # ---------------------------------------------------------------------------- # ldw_unpaste <- function(text) { out <- strsplit(text, ";(NA;)*")[[1]] if (grepl(";$", text)) out <- c(out, "") out } # ------------------------ ldw_evaluate_r_expression ------------------------- # # help function to evaluate the value of an r expression formed by the elements # with index 'from' to 'to' of a formula 'formul1' # returns "_error_" if evaluation failed # used only in ldw_parse_get_modifier # ---------------------------------------------------------------------------- # ldw_evaluate_r_expression <- function(formul1, from, to, types) { strings <- vapply(seq.int(from, to), function(x) { if (formul1$elem.type[x] == types$stringliteral) { paste0('"', formul1$elem.text[x], '"') } else { formul1$elem.text[x] } }, "") txt <- paste(strings, collapse = "") result <- try(eval(parse(text = txt), envir = NULL, enclos = baseenv() ), silent = TRUE) if (inherits(result, "try-error")) { return("_error_") } return(result) } # ------------------------ ldw_adapt_vector_type ----------------------------- # # help function to dynamically adapt the type of a vector in a c(...) sequence # used only in ldw_parse_get_modifier # ---------------------------------------------------------------------------- # ldw_adapt_vector_type <- function(typenu, typetoadd, texttoadd, types) { if (texttoadd != "NA") { if (typenu == 0) { typenu <- typetoadd } else { if (typenu != typetoadd) typenu <- types$stringliteral } } else if (typenu == 0) { typenu <- types$numliteral } return(typenu) } # ------------------------ ldw_parse_get_modifier ---------------------------- # # The function takes a list with tokens belonging to a single 'mono' lavaan # formula as input. The other arguments are: # lhs : check for lhs or rhs modifier # opi : index of the lavaan operator in the list-items # modelsrc : the model source string (for error messages and warnings) # types : the types of tokens # rme : index of last element of modifier in formula (*) # rmeprev : index of first element of modifier in formula - 1L (*) # The function return the modifier detected as element of a list # with name the modifier type (efa, fixed, start, label, lower, upper, prior or # rv) and value an array of values (length > 1 if vector via c(...)) for the # modifier value. # (*) if rme > remprev the rhs is limited to the elements with index # rmeprev+1:rme, this is to support multiple modifiers for the same element. # An error message is produced when no modifier can be determined. # ---------------------------------------------------------------------------- # ldw_parse_get_modifier <- function(formul1, lhs, opi, modelsrc, types, rme = 0L, rmeprev = 0L) { if (rme > rmeprev) { welke <- c(seq.int(1L, opi), seq.int(rmeprev + 1L, rme), length(formul1)) formul1 <- ldw_parse_sublist(formul1, welke) } nelem <- length(formul1$elem.type) # remove unnecessary parentheses (one element between parentheses, previous # no identifier) check.more <- TRUE while (check.more && nelem > 4L) { check.more <- FALSE for (par.i in seq.int(3L, nelem - 1L)) { if (formul1$elem.text[par.i - 1L] == "(" && formul1$elem.text[par.i + 1L] == ")" && formul1$elem.type[par.i - 2L] != types$identifier) { formul1$elem.type[par.i - 1L] <- 0L formul1$elem.type[par.i + 1L] <- 0L check.more <- TRUE } } if (check.more) { formul1 <- ldw_parse_sublist(formul1, which(formul1$elem.type > 0)) nelem <- length(formul1$elem.type) } } if (lhs) { # modifier on left hand side # only 1 possibility : efa ( expression-resulting-in-char ) * # identifier operator ... (rhs) ... if (formul1$elem.text[1L] == "efa" && formul1$elem.text[2L] == "(" && formul1$elem.text[opi - 3L] == ")" && formul1$elem.text[opi - 2L] == "*") { temp <- ldw_evaluate_r_expression(formul1, 3L, opi - 4L, types) if (is.character(temp) && temp[1] != "_error_") { return(list(efa = temp)) } } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1L]) lav_msg_stop(gettext("invalid left hand side modifier"), tl[1L], footer = tl[2L] ) } else { # modifier on right hand side # check for vectors c(...), start(...), fixed(...), ... for (j in (opi + 1L):(nelem - 2L)) { if (formul1$elem.text[j + 1L] == "(") { if (formul1$elem.text[j] == "c") { vector.type <- 0 labnu <- j + 2L lab <- formul1$elem.text[labnu] vector.type <- ldw_adapt_vector_type( vector.type, formul1$elem.type[labnu], formul1$elem.text[labnu], types ) while (formul1$elem.text[labnu + 1L] == ",") { labnu <- labnu + 2L lab <- c(lab, formul1$elem.text[labnu]) vector.type <- ldw_adapt_vector_type( vector.type, formul1$elem.type[labnu], formul1$elem.text[labnu], types ) } if (vector.type == 0) vector.type <- types$stringliteral if (formul1$elem.text[labnu + 1L] == ")") { formul1$elem.type[seq.int(j, labnu)] <- 0 formul1$elem.type[labnu + 1L] <- vector.type formul1$elem.text[labnu + 1L] <- paste(lab, collapse = ";") formul1 <- ldw_parse_sublist(formul1, which(formul1$elem.type > 0)) nelem <- length(formul1$elem.type) break } else { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[j]) lav_msg_stop(gettext("invalid vector specification"), tl[1L], footer = tl[2L] ) } } if (j + 3L < nelem && formul1$elem.text[j + 3L] == "," && any(formul1$elem.text[j] == c( "start", "fixed", "label", "upp", "lower", "rv", "prior" ))) { vector.type <- 0 labnu <- j + 2L lab <- formul1$elem.text[labnu] vector.type <- ldw_adapt_vector_type( vector.type, formul1$elem.type[labnu], formul1$elem.text[labnu], types ) while (formul1$elem.text[labnu + 1L] == ",") { labnu <- labnu + 2L lab <- c(lab, formul1$elem.text[labnu]) vector.type <- ldw_adapt_vector_type( vector.type, formul1$elem.type[labnu], formul1$elem.text[labnu], types ) } if (vector.type == 0) vector.type <- types$stringliteral if (formul1$elem.text[labnu + 1L] == ")") { formul1$elem.type[seq.int(j + 3L, labnu)] <- 0 formul1$elem.type[j + 2L] <- vector.type formul1$elem.text[j + 2L] <- paste(lab, collapse = ";") formul1 <- ldw_parse_sublist(formul1, which(formul1$elem.type > 0)) nelem <- length(formul1$elem.type) break } else { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[j]) lav_msg_stop(gettext("invalid vector specification"), tl[1L], footer = tl[2L] ) } } } } # possibilities # stringliteral|identifier * identifier|numliteral # numliteral * identifier|numliteral # numliteral ? identifier|numliteral # fixed|start|upper|lower|rv|prior(numliteral) * identifier|numliteral # label|equal (stringliteral|identifier) * identifier|numliteral # ==> literals before * or ? can be replaced by R-expression # resulting in correct type # check on last element being a numliteral or identifier # already done in calling function if (all(formul1$elem.text[nelem - 1L] != c("*", "?"))) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[nelem - 1L]) lav_msg_stop(gettext("invalid modifier symbol (should be '*' or '?')"), tl[1L], footer = tl[2L] ) } if (formul1$elem.text[nelem - 1L] == "?") { temp <- ldw_evaluate_r_expression(formul1, opi + 1L, nelem - 2L, types) if (is.numeric(temp)) { return(list(start = temp)) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 1L]) lav_msg_stop(gettext( "invalid start value expression (should be numeric)"), tl[1L], footer = tl[2L] ) } if (nelem == opi + 3) { if (formul1$elem.text[opi + 1L] == "NA") { formul1$elem.type[opi + 1L] <- types$numliteral } if (any(formul1$elem.type[opi + 1L] == c(types$identifier, types$stringliteral))) { return(list(label = ldw_unpaste(formul1$elem.text[opi + 1L]))) } else { if (formul1$elem.type[opi + 1L] == types$numliteral) { return(list(fixed = ldw_num_modifier(formul1$elem.text[opi + 1L]))) } else { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 1L]) lav_msg_stop( gettext("invalid value (should be numeric, identifier or string)"), tl[1L], footer = tl[2L] ) } } } if (formul1$elem.text[opi + 2L] == "(" && formul1$elem.text[nelem - 2L] == ")") { if (any(formul1$elem.text[opi + 1L] == c("fixed", "start", "upper", "lower", "prior"))) { if (nelem == opi + 6L) { if (formul1$elem.text[opi + 3L] == "NA") { formul1$elem.type[opi + 3L] <- types$numliteral } if (formul1$elem.type[opi + 3L] == types$numliteral) { outje <- list() outje[[formul1$elem.text[opi + 1L]]] <- ldw_num_modifier(formul1$elem.text[opi + 3L]) return(outje) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 3L]) lav_msg_stop(gettext("invalid value (should be numeric)"), tl[1L], footer = tl[2L] ) } temp <- ldw_evaluate_r_expression(formul1, opi + 3L, nelem - 3L, types) if (is.numeric(temp)) { outje <- list() outje[[formul1$elem.text[opi + 1L]]] <- temp return(outje) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 3L]) lav_msg_stop( gettext("invalid value R-expression (should be numeric)"), tl[1L], footer = tl[2L] ) } if (any(formul1$elem.text[opi + 1L] == c("equal", "rv", "label"))) { modname <- formul1$elem.text[opi + 1L] if (modname == "equal") modname <- "label" if (nelem == opi + 6L) { if (formul1$elem.type[opi + 3L] == types$stringliteral) { outje <- list() outje[[modname]] <- ldw_unpaste(formul1$elem.text[opi + 3L]) return(outje) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 3L]) lav_msg_stop(gettext("invalid value (should be string)"), tl[1L], footer = tl[2L] ) } temp <- ldw_evaluate_r_expression(formul1, opi + 3L, nelem - 3L, types) if (is.character(temp) && temp[1] != "_error_") { outje <- list() outje[[modname]] <- temp return(outje) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 3L]) lav_msg_stop( gettext("invalid value R-expression (should be a string)"), tl[1L], footer = tl[2L] ) } } temp <- ldw_evaluate_r_expression(formul1, opi + 1L, nelem - 2L, types) if (is.numeric(temp)) { return(list(fixed = temp)) } if (is.character(temp) && temp[1] != "_error_") { return(list(label = temp)) } tl <- ldw_txtloc(modelsrc, formul1$elem.pos[opi + 1L]) lav_msg_stop(gettext("invalid modifier specification"), tl[1L], footer = tl[2L] ) } } # -------------------- main parsing function --------------------------------- # ldw_parse_model_string <- function(model.syntax = "", as.data.frame. = FALSE) { stopifnot(length(model.syntax) > 0L) # replace 'strange' tildes (in some locales) (new in 0.6-6) modelsrc <- gsub( pattern = "\u02dc", replacement = "~", paste(unlist(model.syntax), "", collapse = "\n") ) types <- ldw_create_enum(c( "identifier", "numliteral", "stringliteral", "symbol", "lavaanoperator", "newline" )) modellist <- ldw_parse_step1(modelsrc, types) if (lav_debug()) { print(data.frame( pos = modellist$elem.pos, type = types$enum.names[modellist$elem.type], text = modellist$elem.text, formula = modellist$elem.formula.number )) } formulalist <- ldw_parse_step2(modellist, modelsrc, types) #---- analyse syntax formulas and put in flat.----- max.mono.formulas <- length(formulalist) flat.lhs <- character(max.mono.formulas) flat.op <- character(max.mono.formulas) flat.rhs <- character(max.mono.formulas) flat.rhs.mod.idx <- integer(max.mono.formulas) flat.block <- integer(max.mono.formulas) # keep track of groups using ":" opr flat.fixed <- character(max.mono.formulas) # only for display purposes! flat.start <- character(max.mono.formulas) # only for display purposes! flat.lower <- character(max.mono.formulas) # only for display purposes! flat.upper <- character(max.mono.formulas) # only for display purposes! flat.label <- character(max.mono.formulas) # only for display purposes! flat.prior <- character(max.mono.formulas) flat.efa <- character(max.mono.formulas) flat.rv <- character(max.mono.formulas) flat.idx <- 0L mod.idx <- 0L constraints <- list() mod <- list() block <- 1L block.op <- FALSE if (lav_debug()) { cat("formula to analyse:\n") } # operators <- c("=~", "<~", "~*~", "~~", "~", "==", "<", ">", ":=", # ":", "\\|", "%") constraint_operators <- c("==", "<", ">", ":=") for (s in seq_along(formulalist)) { formul1 <- formulalist[[s]] if (lav_debug()) { cat(vapply(seq_along(formul1$elem.type), function(j) { if (formul1$elem.type[j] == types$stringliteral) { return(dQuote(formul1$elem.text[j], FALSE)) } return(formul1$elem.text[j]) }, ""), "\n") } nelem <- length(formul1$elem.type) # where is the operator opi <- match(types$lavaanoperator, formul1$elem.type) # opi <- which(formul1$elem.type == types$lavaanoperator) # if (length(opi) > 1L) opi <- opi[1L] op <- formul1$elem.text[opi] if (any(op == constraint_operators)) { # ----- constraints ------- lhs <- paste(formul1$elem.text[seq.int(1L, opi - 1L)], collapse = "") rhs <- paste(formul1$elem.text[seq.int(opi + 1L, nelem)], collapse = "") constraints <- c( constraints, list(list( op = op, lhs = lhs, rhs = rhs, user = 1L )) ) next } if (op == ":") { # ------------------------- block start ----------------- # if (opi == 1L) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1]) lav_msg_stop( gettext( "Missing block identifier. The correct syntax is: \"LHS: RHS\", where LHS is a block identifier (eg group or level), and RHS is the group/level/block number or label." ), tl[1L], footer = tl[2L] ) } if (opi > 2L || all(tolower(formul1$elem.text[1]) != c("group", "level", "block", "class"))) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1]) lav_msg_stop( gettext( "Invalid block identifier. The correct syntax is: \"LHS: RHS\", where LHS is a block identifier (eg group or level), and RHS is the group/level/block number or label." ), tl[1L], footer = tl[2L] ) } if (nelem != 3 || all(formul1$elem.type[3] != c(types$stringliteral, types$identifier, types$numliteral))) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1]) lav_msg_stop( gettext("syntax contains block identifier \"group\" with missing or invalid number/label.The correct syntax is: \"LHS: RHS\", where LHS is a block identifier (eg group or level), and RHS is the group/level/block number or label."), tl[1L], footer = tl[2L] ) } flat.idx <- flat.idx + 1L flat.lhs[flat.idx] <- formul1$elem.text[1] flat.op[flat.idx] <- op flat.rhs[flat.idx] <- formul1$elem.text[3] flat.rhs.mod.idx[flat.idx] <- 0L if (block.op) { block <- block + 1L } else { if (flat.idx != 1) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[1]) lav_msg_warn( gettext("First block defined after other formula's"), tl[1L], footer = tl[2L] ) } } flat.block[flat.idx] <- block block.op <- TRUE next } # ------------------ relational operators -------------------------------- # # warn if some identifiers contain spaces contsp <- which(formul1$elem.type == types$identifier & grepl(" ", formul1$elem.text, fixed = TRUE)) if (length(contsp) > 0L) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[contsp[1L]]) lav_msg_warn( gettextf( "having identifiers with spaces ('%s') is deprecated", formul1$elem.text[contsp[1]] ), tl[1L], footer = tl[2L] ) } # checks for valid names in lhs and rhs ldw_parse_check_valid_name(formul1, opi - 1L, modelsrc) # valid name lhs for (j in seq.int(opi + 1L, nelem)) { # valid names rhs if (formul1$elem.type[j] == types$identifier && formul1$elem.text[j] != "NA") { ldw_parse_check_valid_name(formul1, j, modelsrc) } } if (formul1$elem.type[nelem] != types$identifier && (formul1$elem.type[nelem] != types$numliteral || all(op != c("~", "=~")))) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[nelem]) lav_msg_stop( gettext("Last element of rhs part expected to be an identifier or, for operator ~ or =~, a numeric literal!"), tl[1L], footer = tl[2L] ) } # intercept fixed on 0 # replace 'lhs ~ 0' => 'lhs ~ 0 * 1' - intercept fixed on zero if (formul1$elem.text[nelem] == "0" && op == "~" && opi == nelem - 1L) { formul1$elem.type <- c(formul1$elem.type, types$symbol, types$numliteral) formul1$elem.text <- c(formul1$elem.text, "*", "1") formul1$elem.pos <- c(formul1$elem.pos, rep(formul1$elem.pos[nelem], 2)) nelem <- length(formul1$elem.type) } # phantom latent variable # replace 'lhs =~ 0' => 'lhs =~ fixed(0)*lhs', 0 can be other numliteral # also, lhs is last element before '=~' if (formul1$elem.type[nelem] == types$numliteral && op == "=~") { formul1$elem.type <- c( formul1$elem.type[seq.int(1L, nelem - 1L)], types$identifier, types$symbol, types$numliteral, types$symbol, types$symbol, types$identifier ) formul1$elem.text <- c( formul1$elem.text[seq.int(1L, nelem - 1L)], "fixed", "(", formul1$elem.text[nelem], ")", "*", formul1$elem.text[opi - 1L] ) formul1$elem.pos <- c( formul1$elem.pos[seq.int(1L, nelem - 1L)], rep(formul1$elem.pos[nelem], 6) ) nelem <- length(formul1$elem.type) } # handling interaction variable types colons <- which(formul1$elem.text[seq.int(1L, nelem - 1L)] == ":" & formul1$elem.type[seq.int(2L, nelem)] == types$identifier) # check at most 1 colon if (length(colons) > 1) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[colons[2]]) lav_msg_stop( gettext( "Three-way or higher-order interaction terms (using multiple colons) are not supported in the lavaan syntax; please manually construct the product terms yourself in the data.frame, give them an appropriate name, and then you can use these interaction variables as any other (observed) variable in the model syntax." ), tl[1L], footer = tl[2L] ) } if (length(colons) == 1) { # collapse items around colon "a" ":" "b" => "a:b" formul1$elem.text[colons - 1L] <- paste(formul1$elem.text[seq.int(colons - 1L, colons + 1L)], collapse = "" ) formul1 <- ldw_parse_sublist(formul1, seq.int(1L, colons - 1L)) nelem <- length(formul1$elem.type) } # modifiers rhsmodelems <- which(seq_along(formul1$elem.type) > opi & formul1$elem.type == types$symbol & (formul1$elem.text == "*" | formul1$elem.text == "?")) for (j in seq_along(rhsmodelems)) { if (sum(formul1$elem.text[seq.int(opi, rhsmodelems[j])] == "(") != sum(formul1$elem.text[seq.int(opi, rhsmodelems[j])] == ")")) rhsmodelems[j] = 0L } rhsmodelems <- rhsmodelems[rhsmodelems != 0L] if (length(rhsmodelems) == 0L) rhsmodelems <- opi lhs <- formul1$elem.text[opi - 1L] rhs <- formul1$elem.text[nelem] for (rmei in seq_along(rhsmodelems)) { rme <- rhsmodelems[rmei] rmeprev <- if (rmei == 1L) opi else rhsmodelems[rmei - 1L] already <- which(flat.lhs == lhs & flat.op == op & flat.block == block & (flat.rhs == rhs | (flat.rhs == "" & op == "~" & formul1$elem.type[nelem] == types$numliteral))) if (length(already) == 1L) { idx <- already } else { flat.idx <- flat.idx + 1L idx <- flat.idx flat.lhs[idx] <- lhs flat.op[idx] <- op flat.rhs[idx] <- rhs flat.block[idx] <- block if (formul1$elem.type[nelem] == types$numliteral) { if (op == "~") flat.rhs[idx] <- "" } } lhsmod <- list() if (opi > 2 && rmei == 1L) { lhsmod <- ldw_parse_get_modifier( formul1, TRUE, opi, modelsrc, types ) } rhsmod <- list() if (nelem - opi > 1) { rhsmod <- ldw_parse_get_modifier( formul1, FALSE, opi, modelsrc, types, rme, rmeprev ) } flat.fixed[idx] <- if (is.null(rhsmod$fixed)) { flat.fixed[idx] } else { paste(rhsmod$fixed, collapse = ";") } flat.start[idx] <- if (is.null(rhsmod$start)) { flat.start[idx] } else { paste(rhsmod$start, collapse = ";") } flat.label[idx] <- if (is.null(rhsmod$label)) { flat.label[idx] } else { paste(rhsmod$label, collapse = ";") } flat.lower[idx] <- if (is.null(rhsmod$lower)) { flat.lower[idx] } else { paste(rhsmod$lower, collapse = ";") } flat.upper[idx] <- if (is.null(rhsmod$upper)) { flat.upper[idx] } else { paste(rhsmod$upper, collapse = ";") } flat.prior[idx] <- if (is.null(rhsmod$prior)) { flat.prior[idx] } else { paste(rhsmod$prior, collapse = ";") } flat.efa[idx] <- if (is.null(lhsmod$efa)) { flat.efa[idx] } else { paste(lhsmod$efa, collapse = ";") } flat.rv[idx] <- if (is.null(rhsmod$rv)) { flat.rv[idx] } else { paste(rhsmod$rv, collapse = ";") } modnu <- c(lhsmod, rhsmod) if (length(modnu) > 0L) { # there is a modifier here if (length(already) == 0) { # unknown element mod.idx <- mod.idx + 1L cur.mod.idx <- mod.idx mod[[cur.mod.idx]] <- modnu flat.rhs.mod.idx[idx] <- cur.mod.idx } else { # known element if (flat.rhs.mod.idx[idx] == 0) { # not yet modifier mod.idx <- mod.idx + 1L cur.mod.idx <- mod.idx mod[[cur.mod.idx]] <- modnu flat.rhs.mod.idx[idx] <- cur.mod.idx } else { # use existing modifier index cur.mod.idx <- flat.rhs.mod.idx[idx] overwrite <- names(modnu)[names(modnu) %in% names(mod[[cur.mod.idx]])] if (length(overwrite) > 0) { tl <- ldw_txtloc(modelsrc, formul1$elem.pos[rmeprev + 1L]) lav_msg_warn( gettextf( "modifier %s specified multiple times, overwritten", overwrite[1L] ), tl[1L], footer = tl[2L] ) } mod[[cur.mod.idx]] <- modifyList(mod[[cur.mod.idx]], modnu) } } } } } # create flat (omit items without operator) filled.ones <- which(flat.op != "") flat <- list( lhs = flat.lhs[filled.ones], op = flat.op[filled.ones], rhs = flat.rhs[filled.ones], mod.idx = flat.rhs.mod.idx[filled.ones], block = flat.block[filled.ones], fixed = flat.fixed[filled.ones], start = flat.start[filled.ones], lower = flat.lower[filled.ones], upper = flat.upper[filled.ones], label = flat.label[filled.ones], prior = flat.prior[filled.ones], efa = flat.efa[filled.ones], rv = flat.rv[filled.ones] ) # change op for intercepts (for convenience only) int.idx <- which(flat.op == "~" & flat.rhs == "") if (length(int.idx) > 0L) { flat$op[int.idx] <- "~1" } # new in 0.6, reorder covariances here! flat <- lav_partable_covariance_reorder(flat) if (as.data.frame.) { flat <- as.data.frame(flat, stringsAsFactors = FALSE) } # new in 0.6-4: check for 'group' within 'level' if (any(flat.op == ":")) { op.idx <- which(flat.op == ":") if (length(op.idx) < 2L) { # only 1 block identifier? this is weird -> give warning lav_msg_warn(gettext("syntax contains only a single block identifier!")) } else { first.block <- flat.lhs[op.idx[1L]] second.block <- flat.lhs[op.idx[2L]] if (first.block == "level" && second.block == "group") { lav_msg_stop(gettext("groups can not be nested within levels!")) } } } attr(flat, "modifiers") <- mod attr(flat, "constraints") <- constraints flat } lavaan/R/lav_mvnorm.R0000644000176200001440000006312314627656441014244 0ustar liggesusers# the multivariate normal distribution # 1) loglikelihood (from raw data, or sample statistics) # 2) derivatives with respect to mu, Sigma, vech(Sigma) # 3) casewise scores with respect to mu, vech(Sigma), mu + vech(Sigma) # 4) hessian mu + vech(Sigma) # 5) information h0 mu + vech(Sigma) # 5a: (unit) expected information # 5b: (unit) observed information # 5c: (unit) first.order information # 6) inverted information h0 mu + vech(Sigma) # 6a: (unit) inverted expected information # 6b: / # 6c: / # 7) ACOV h0 mu + vech(Sigma) # 7a: 1/N * inverted expected information # 7b: 1/N * inverted observed information # 7c: 1/N * inverted first-order information # 7d: sandwich acov # YR 07 Feb 2016: first version # YR 24 Mar 2016: added firstorder information, hessian logl # YR 19 Jan 2017: added lav_mvnorm_inverted_information_expected # YR 04 Okt 2018: adding wt= argument, and missing meanstructure= # YR 27 Jun 2018: adding cluster.idx= argument for information_firstorder # YR 24 Jul 2022: adding correlation= argument for information_expected # (only for catml; not for correlation = TRUE!) # 0. densities lav_mvnorm_dmvnorm <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, Sigma.inv = NULL, Sinv.method = "eigen", x.idx = integer(0L), x.mean = NULL, x.cov = NULL, log = TRUE) { if (is.matrix(Y)) { if (is.null(Mu) && is.null(Sigma) && is.null(Sigma.inv)) { out <- lav_mvnorm_loglik_data_z(Y = Y, casewise = TRUE) } else { out <- lav_mvnorm_loglik_data( Y = Y, Mu = Mu, Sigma = Sigma, casewise = TRUE, Sinv.method = Sinv.method ) } } else { # just one P <- length(Y) LOG.2PI <- log(2 * pi) if (is.null(Mu) && is.null(Sigma) && is.null(Sigma.inv)) { # mahalanobis distance DIST <- sum(Y * Y) out <- -(P * LOG.2PI + DIST) / 2 } else { if (is.null(Sigma.inv)) { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(Sigma.inv, "logdet") } else { logdet <- attr(Sigma.inv, "logdet") if (is.null(logdet)) { # compute - ln|Sigma.inv| ev <- eigen(Sigma.inv, symmetric = TRUE, only.values = TRUE) logdet <- -1 * sum(log(ev$values)) } } # mahalanobis distance Yc <- Y - Mu DIST <- sum(Yc %*% Sigma.inv * Yc) out <- -(P * LOG.2PI + logdet + DIST) / 2 } } if (!is.null(wt)) { out <- out * wt } # x.idx? if (length(x.idx) > 0L) { if (is.null(Sigma) && is.null(x.cov)) { lav_msg_stop(gettext("when x.idx is not empty, we need Sigma or x.cov")) } if (is.matrix(Y)) { X <- Y[, x.idx, drop = FALSE] } else { X <- Y[x.idx] } Mu.X <- x.mean Sigma.X <- x.cov if (is.null(x.mean)) { Mu.X <- as.numeric(Mu)[x.idx] } if (is.null(x.cov)) { Sigma.X <- Sigma[x.idx, x.idx, drop = FALSE] } logl.X <- lav_mvnorm_dmvnorm( Y = X, wt = wt, Mu = Mu.X, Sigma = Sigma.X, Sigma.inv = NULL, Sinv.method = Sinv.method, x.idx = integer(0L), log = TRUE ) # subtract logl.X out <- out - logl.X } if (!log) { out <- exp(out) } out } # 1. likelihood # 1a: input is raw data # (note casewise = TRUE same as: dmvnorm(Y, mean, sigma, log = TRUE)) lav_mvnorm_loglik_data <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), x.mean = NULL, x.cov = NULL, casewise = FALSE, Sinv.method = "eigen") { # Y must be a matrix (use lav_mvnorm_dmvnorm() for non-matrix input) stopifnot(is.matrix(Y)) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } P <- NCOL(Y) Mu <- as.numeric(Mu) if (casewise) { LOG.2PI <- log(2 * pi) # invert Sigma if (Sinv.method == "chol") { cS <- chol(Sigma) icS <- backsolve(cS, diag(P)) Yc <- t(t(Y) - Mu) DIST <- rowSums((Yc %*% icS)^2) logdet <- -2 * sum(log(diag(icS))) } else { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(Sigma.inv, "logdet") # mahalanobis distance Yc <- t(t(Y) - Mu) DIST <- rowSums(Yc %*% Sigma.inv * Yc) } loglik <- -(P * LOG.2PI + logdet + DIST) / 2 # weights if (!is.null(wt)) { loglik <- loglik * wt } } else { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.mean <- out$center sample.cov <- out$cov } else { sample.mean <- base::.colMeans(Y, m = N, n = P) sample.cov <- lav_matrix_cov(Y) } loglik <- lav_mvnorm_loglik_samplestats( sample.mean = sample.mean, sample.cov = sample.cov, sample.nobs = N, Mu = Mu, Sigma.inv = Sigma.inv ) } # fixed.x? if (length(x.idx) > 0L) { Mu.X <- x.mean Sigma.X <- x.cov if (is.null(x.mean)) { Mu.X <- as.numeric(Mu)[x.idx] } if (is.null(x.cov)) { Sigma.X <- Sigma[x.idx, x.idx, drop = FALSE] } loglik.x <- lav_mvnorm_loglik_data( Y = Y[, x.idx, drop = FALSE], wt = wt, Mu = Mu.X, Sigma = Sigma.X, x.idx = integer(0L), casewise = casewise, Sinv.method = Sinv.method ) # subtract logl.X loglik <- loglik - loglik.x } loglik } # 1b: input are sample statistics (mean, cov, N) only lav_mvnorm_loglik_samplestats <- function(sample.mean = NULL, sample.cov = NULL, sample.nobs = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), x.mean = NULL, x.cov = NULL, Sinv.method = "eigen", Sigma.inv = NULL) { P <- length(sample.mean) N <- sample.nobs Mu <- as.numeric(Mu) sample.mean <- as.numeric(sample.mean) LOG.2PI <- log(2 * pi) if (is.null(Sigma.inv)) { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(Sigma.inv, "logdet") } else { logdet <- attr(Sigma.inv, "logdet") if (is.null(logdet)) { # compute - ln|Sigma.inv| ev <- eigen(Sigma.inv, symmetric = TRUE, only.values = TRUE) logdet <- -1 * sum(log(ev$values)) } } # tr(Sigma^{-1} %*% S) DIST1 <- sum(Sigma.inv * sample.cov) # (ybar - mu)^T %*% Sigma.inv %*% (ybar - mu) Diff <- as.numeric(sample.mean - Mu) DIST2 <- sum(as.numeric(crossprod(Diff, Sigma.inv)) * Diff) loglik <- -N / 2 * (P * LOG.2PI + logdet + DIST1 + DIST2) # fixed.x? if (length(x.idx) > 0L) { Mu.X <- x.mean Sigma.X <- x.cov if (is.null(x.mean)) { Mu.X <- Mu[x.idx] } if (is.null(x.cov)) { Sigma.X <- Sigma[x.idx, x.idx, drop = FALSE] } sample.mean.x <- sample.mean[x.idx] sample.cov.x <- sample.cov[x.idx, x.idx, drop = FALSE] loglik.x <- lav_mvnorm_loglik_samplestats( sample.mean = sample.mean.x, sample.cov = sample.cov.x, sample.nobs = sample.nobs, Mu = Mu.X, Sigma = Sigma.X, x.idx = integer(0L), Sinv.method = Sinv.method ) # subtract logl.X loglik <- loglik - loglik.x } loglik } # 1c special case: Mu = 0, Sigma = I lav_mvnorm_loglik_data_z <- function(Y = NULL, wt = NULL, casewise = FALSE) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } P <- NCOL(Y) LOG.2PI <- log(2 * pi) if (casewise) { DIST <- rowSums(Y * Y) loglik <- -(P * LOG.2PI + DIST) / 2 if (!is.null(wt)) { loglik <- loglik * wt } } else { if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.mean <- out$center sample.cov <- out$cov } else { sample.mean <- base::.colMeans(Y, m = N, n = P) sample.cov <- lav_matrix_cov(Y) } DIST1 <- sum(diag(sample.cov)) DIST2 <- sum(sample.mean * sample.mean) loglik <- -N / 2 * (P * LOG.2PI + DIST1 + DIST2) } loglik } # 2. Derivatives # 2a: derivative logl with respect to mu lav_mvnorm_dlogl_dmu <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # substract 'Mu' from Y Yc <- t(t(Y) - Mu) # weights if (!is.null(wt)) { Yc <- Yc * wt } # derivative dmu <- as.numeric(Sigma.inv %*% colSums(Yc)) # fixed.x? if (length(x.idx) > 0L) { dmu[x.idx] <- 0 } dmu } # 2b: derivative logl with respect to Sigma (full matrix, ignoring symmetry) lav_mvnorm_dlogl_dSigma <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # W.tilde if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") SY <- out$cov MY <- out$center W.tilde <- SY + tcrossprod(MY - Mu) } else { # substract 'Mu' from Y # Yc <- t( t(Y) - Mu ) # W.tilde <- crossprod(Yc) / N W.tilde <- lav_matrix_cov(Y, Mu = Mu) } # derivative dSigma <- -(N / 2) * (Sigma.inv - (Sigma.inv %*% W.tilde %*% Sigma.inv)) # fixed.x? if (length(x.idx) > 0L) { dSigma[x.idx, x.idx] <- 0 } dSigma } # 2c: derivative logl with respect to vech(Sigma) lav_mvnorm_dlogl_dvechSigma <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # W.tilde if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") SY <- out$cov MY <- out$center W.tilde <- SY + tcrossprod(MY - Mu) } else { W.tilde <- lav_matrix_cov(Y, Mu = Mu) } # derivative (avoiding kronecker product) dSigma <- -(N / 2) * (Sigma.inv - (Sigma.inv %*% W.tilde %*% Sigma.inv)) # fixed.x? if (length(x.idx) > 0L) { dSigma[x.idx, x.idx] <- 0 } # vech dvechSigma <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(dSigma)) )) dvechSigma } # 2d: : derivative logl with respect to Mu and vech(Sigma) lav_mvnorm_dlogl_dmu_dvechSigma <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # substract Mu Yc <- t(t(Y) - Mu) # W.tilde if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") SY <- out$cov MY <- out$center W.tilde <- SY + tcrossprod(MY - Mu) dmu <- as.numeric(Sigma.inv %*% colSums(Yc * wt)) } else { W.tilde <- lav_matrix_cov(Y, Mu = Mu) dmu <- as.numeric(Sigma.inv %*% colSums(Yc)) } # derivative (avoiding kronecker product) dSigma <- -(N / 2) * (Sigma.inv - (Sigma.inv %*% W.tilde %*% Sigma.inv)) # fixed.x? if (length(x.idx) > 0L) { dSigma[x.idx, x.idx] <- 0 dmu[x.idx] <- 0 } # vech dvechSigma <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(dSigma)) )) c(dmu, dvechSigma) } # 3. Casewise scores # 3a: casewise scores with respect to mu lav_mvnorm_scores_mu <- function(Y = NULL, wt = NULL, Mu = NULL, x.idx = integer(0L), Sigma = NULL, Sinv.method = "eigen", Sigma.inv = NULL) { Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # substract Mu Yc <- t(t(Y) - Mu) # postmultiply with Sigma.inv SC <- Yc %*% Sigma.inv # weights if (!is.null(wt)) { SC <- SC * wt } # fixed.x? if (length(x.idx) > 0L) { SC[, x.idx] <- 0 } SC } # 3b: casewise scores with respect to vech(Sigma) lav_mvnorm_scores_vech_sigma <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { P <- NCOL(Y) Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # vech(Sigma.inv) isigma <- lav_matrix_vech(Sigma.inv) # substract Mu Yc <- t(t(Y) - Mu) # postmultiply with Sigma.inv Yc <- Yc %*% Sigma.inv # tcrossprod idx1 <- lav_matrix_vech_col_idx(P) idx2 <- lav_matrix_vech_row_idx(P) Z <- Yc[, idx1] * Yc[, idx2] # substract isigma from each row SC <- t(t(Z) - isigma) # adjust for vech SC[, lav_matrix_diagh_idx(P)] <- SC[, lav_matrix_diagh_idx(P)] / 2 # fixed.x? if (length(x.idx) > 0L) { SC[, lav_matrix_vech_which_idx(n = P, idx = x.idx)] <- 0 } # weights if (!is.null(wt)) { SC <- SC * wt } SC } # 3c: casewise scores with respect to mu + vech(Sigma) lav_mvnorm_scores_mu_vech_sigma <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { P <- NCOL(Y) Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # vech(Sigma.inv) isigma <- lav_matrix_vech(Sigma.inv) # substract Mu Yc <- t(t(Y) - Mu) # postmultiply with Sigma.inv Yc <- Yc %*% Sigma.inv # tcrossprod idx1 <- lav_matrix_vech_col_idx(P) idx2 <- lav_matrix_vech_row_idx(P) Z <- Yc[, idx1] * Yc[, idx2] # substract isigma from each row SC <- t(t(Z) - isigma) # adjust for lav_matrix_duplication_pre (not vech!) SC[, lav_matrix_diagh_idx(P)] <- SC[, lav_matrix_diagh_idx(P)] / 2 # fixed.x? if (length(x.idx) > 0L) { Yc[, x.idx] <- 0 SC[, lav_matrix_vech_which_idx(n = P, idx = x.idx)] <- 0 } out <- cbind(Yc, SC) # weights if (!is.null(wt)) { out <- out * wt } out } # 4. hessian of logl # 4a: hessian logl Mu and vech(Sigma) from raw data lav_mvnorm_logl_hessian_data <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } # observed information observed <- lav_mvnorm_information_observed_data( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv, meanstructure = meanstructure ) -N * observed } # 4b: hessian Mu and vech(Sigma) from samplestats lav_mvnorm_logl_hessian_samplestats <- function(sample.mean = NULL, sample.cov = NULL, sample.nobs = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE) { N <- sample.nobs # observed information observed <- lav_mvnorm_information_observed_samplestats( sample.mean = sample.mean, sample.cov = sample.cov, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv, meanstructure = meanstructure ) -N * observed } # 5) Information h0 # 5a: unit expected information h0 Mu and vech(Sigma) lav_mvnorm_information_expected <- function(Y = NULL, # unused! wt = NULL, # unused! Mu = NULL, # unused! Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE, correlation = FALSE) { if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } if(correlation) { I22 <- 0.5 * lav_matrix_duplication_cor_pre_post(Sigma.inv %x% Sigma.inv) } else { I22 <- 0.5 * lav_matrix_duplication_pre_post(Sigma.inv %x% Sigma.inv) } # fixed.x? if (length(x.idx) > 0L) { pstar.x <- lav_matrix_vech_which_idx( n = NCOL(Sigma.inv), idx = x.idx ) I22[pstar.x, ] <- 0 I22[, pstar.x] <- 0 } if (meanstructure) { I11 <- Sigma.inv # fixed.x? if (length(x.idx) > 0L) { I11[x.idx, ] <- 0 I11[, x.idx] <- 0 } out <- lav_matrix_bdiag(I11, I22) } else { out <- I22 } out } # 5b: unit observed information h0 lav_mvnorm_information_observed_data <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE) { if (!is.null(wt)) { N <- sum(wt) out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.cov <- out$cov sample.mean <- out$center } else { N <- NROW(Y) # sample statistics sample.mean <- colMeans(Y) sample.cov <- lav_matrix_cov(Y) } lav_mvnorm_information_observed_samplestats( sample.mean = sample.mean, sample.cov = sample.cov, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv, meanstructure = meanstructure ) } # 5b-bis: observed information h0 from sample statistics lav_mvnorm_information_observed_samplestats <- function( sample.mean = NULL, sample.cov = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE) { sample.mean <- as.numeric(sample.mean) Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } W.tilde <- sample.cov + tcrossprod(sample.mean - Mu) if (meanstructure) { I11 <- Sigma.inv I21 <- lav_matrix_duplication_pre((Sigma.inv %*% (sample.mean - Mu)) %x% Sigma.inv) I12 <- t(I21) } AAA <- Sigma.inv %*% (2 * W.tilde - Sigma) %*% Sigma.inv I22 <- (1 / 2) * lav_matrix_duplication_pre_post(Sigma.inv %x% AAA) if (meanstructure) { out <- rbind( cbind(I11, I12), cbind(I21, I22) ) } else { out <- I22 } # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx( n = NCOL(Sigma.inv), idx = x.idx, add.idx.at.start = meanstructure ) out[, not.x] <- 0 out[not.x, ] <- 0 } out } # 5c: unit first-order information h0 lav_mvnorm_information_firstorder <- function(Y = NULL, wt = NULL, cluster.idx = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, meanstructure = TRUE) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } if (meanstructure) { SC <- lav_mvnorm_scores_mu_vech_sigma( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) } else { # the caller should use Mu = sample.mean SC <- lav_mvnorm_scores_vech_sigma( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) } # handle clustering if (!is.null(cluster.idx)) { # take the sum within each cluster SC <- rowsum(SC, group = cluster.idx, reorder = FALSE, na.rm = TRUE) # lower bias if number of clusters is not very high # FIXME: reference? nC <- nrow(SC) correction.factor <- nC / (nC - 1) SC <- SC * sqrt(correction.factor) } # unit information out <- crossprod(SC) / N out } # 6: inverted information h0 # 6a: inverted unit expected information h0 Mu and vech(Sigma) # # Note: this is the same as lav_samplestats_Gamma_NT() # but where COV=Sigma and MEAN=Mu # lav_mvnorm_inverted_information_expected <- function(Y = NULL, # unused! wt = NULL, # unused! Mu = NULL, # unused! Sigma = NULL, x.idx = integer(0L), meanstructure = TRUE) { if (length(x.idx) > 0L) { # cov(Y|X) = A - B C^{-1} B' # where A = cov(Y), B = cov(Y,X), C = cov(X) A <- Sigma[-x.idx, -x.idx, drop = FALSE] B <- Sigma[-x.idx, x.idx, drop = FALSE] C <- Sigma[ x.idx, x.idx, drop = FALSE] YbarX <- A - B %*% solve(C, t(B)) # reinsert YbarX in Y+X (residual) covariance matrix YbarX.aug <- matrix(0, nrow = NROW(Sigma), ncol = NCOL(Sigma)) YbarX.aug[-x.idx, -x.idx] <- YbarX # take difference R <- Sigma - YbarX.aug SS <- 2 * lav_matrix_duplication_ginv_pre_post(Sigma %x% Sigma) RR <- 2 * lav_matrix_duplication_ginv_pre_post(R %x% R) I22 <- SS - RR if (meanstructure) { I11 <- YbarX.aug out <- lav_matrix_bdiag(I11, I22) } else { out <- I22 } } else { I22 <- 2 * lav_matrix_duplication_ginv_pre_post(Sigma %x% Sigma) if (meanstructure) { I11 <- Sigma out <- lav_matrix_bdiag(I11, I22) } else { out <- I22 } } out } # 6b: inverted unit observed information h0 # one could use the inverse of a partitioned matrix, but that does not # seem to help much... unless we can find an expression for solve(I22) # 6c: inverted unit first-order information h0 # / # 7) ACOV h0 mu + vech(Sigma) # not implemented, as too trivial # 7a: 1/N * inverted expected information # 7b: 1/N * inverted observed information # 7c: 1/N * inverted first-order information # 7d: sandwich acov lavaan/R/lav_object_summary.R0000644000176200001440000001752514627656441015756 0ustar liggesusers# initial version: YR 03/05/2017 # major change: YR 14/06/2022 for 0.6-12 # - summary() is now silent if not printed # - here, we only collect the necessary ingredients, and store them in a # a list # - the result is a S3 class lavaan.summary # - the actual printing is done by print.lavaan.summary (see lav_print.R) # YR 26 July 2022: add fm.args= argument to change the way (some) fit measures # are computed # YR 24 Sept 2022: add efa= argument # YR 19 Nov 2023: add remove.unused= argument # TDJ 28 March 2024: deprecate std.nox= argument ("std.nox" can be %in% standardized=) # create summary of a lavaan object lav_object_summary <- function(object, header = TRUE, fit.measures = FALSE, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.h0.closefit = 0.05, rmsea.h0.notclosefit = 0.08 ), estimates = TRUE, ci = FALSE, fmi = FALSE, standardized = FALSE, std = standardized, remove.step1 = TRUE, remove.unused = TRUE, cov.std = TRUE, rsquare = FALSE, efa = FALSE, efa.args = list( lambda = TRUE, theta = TRUE, psi = TRUE, eigenvalues = TRUE, sumsq.table = TRUE, lambda.structure = FALSE, fs.determinacy = FALSE, se = FALSE, zstat = FALSE, pvalue = FALSE ), modindices = FALSE) { # return a list with the main ingredients res <- list() # this is to avoid partial matching of 'std' with std.nox if (is.logical(std) && is.logical(standardized)) { standardized <- std || standardized } else { # At least 1 is not logical. Retain only valid standardization options. standardized <- intersect(union(tolower(std), tolower(standardized)), c("std.lv","std.all","std.nox")) } # create the 'short' summary if (header) { # 1. collect header information if (.hasSlot(object, "version")) { VERSION <- object@version } else { VERSION <- "pre 0.6" } res$header <- list( lavaan.version = VERSION, sam.approach = (.hasSlot(object, "internal") && !is.null(object@internal$sam.method)), optim.method = object@Options$optim.method, optim.iterations = object@optim$iterations, optim.converged = object@optim$converged ) # sam or sem? if (.hasSlot(object, "internal") && !is.null(object@internal$sam.method)) { # SAM version # 2. sam header res$sam.header <- list( sam.method = object@internal$sam.method, sam.local.options = object@internal$sam.local.options, sam.mm.list = object@internal$sam.mm.list, sam.mm.estimator = object@internal$sam.mm.estimator, sam.struc.estimator = object@internal$sam.struc.estimator ) # 3. no EFA (for now)? # 4. summarize lavdata res$data <- lav_data_summary_short(object@Data) # 5a. sam local test statistics res$sam <- list( sam.method = object@internal$sam.method, sam.mm.table = object@internal$sam.mm.table, sam.mm.rel = object@internal$sam.mm.rel, sam.struc.fit = object@internal$sam.struc.fit, ngroups = object@Data@ngroups, group.label = object@Data@group.label, nlevels = object@Data@nlevels, level.label = object@Data@level.label, block.label = object@Data@block.label ) # 5b. global test statistics (for global only) if (object@internal$sam.method == "global") { res$test <- object@test } } else { # SEM version # 2. summarize optim info (including estimator) res$optim <- list( estimator = object@Options$estimator, estimator.args = object@Options$estimator.args, optim.method = object@Options$optim.method, npar = object@Model@nx.free, eq.constraints = object@Model@eq.constraints, nrow.ceq.jac = nrow(object@Model@ceq.JAC), nrow.cin.jac = nrow(object@Model@cin.JAC), nrow.con.jac = nrow(object@Model@con.jac), con.jac.rank = qr(object@Model@con.jac)$rank ) # 3. if EFA/ESEM, summarize rotation info if (.hasSlot(object@Model, "nefa") && object@Model@nefa > 0L) { res$rotation <- list( rotation = object@Options$rotation, rotation.args = object@Options$rotation.args ) } # 4. summarize lavdata res$data <- lav_data_summary_short(object@Data) # 5. test statistics TEST <- object@test # TDJ: check for user-supplied h1 model if (!is.null(object@external$h1.model)) { stopifnot(inherits(object@external$h1.model, "lavaan")) ## update @test slot TEST <- lav_update_test_custom_h1(lav_obj_h0 = object, lav_obj_h1 = object@external$h1.model)@test } # double check if we have attr(TEST, "info") (perhaps old object?) if (is.null(attr(TEST, "info"))) { lavdata <- object@Data lavoptions <- object@Options attr(TEST, "info") <- list( ngroups = lavdata@ngroups, group.label = lavdata@group.label, information = lavoptions$information, h1.information = lavoptions$h1.information, observed.information = lavoptions$observed.information ) } res$test <- TEST } # regular sem } # header # efa-related info if (efa) { res$efa <- lav_efa_summary(object, efa.args = efa.args) } # efa # only if requested, add the additional fit measures if (fit.measures) { # some early warnings (to avoid a hard stop) if (object@Data@data.type == "none") { lav_msg_warn(gettext( "fit measures not available if there is no data")) } else if (length(object@Options$test) == 1L && object@Options$test == "none") { lav_msg_warn(gettext( "fit measures not available if test = \"none\"")) } else if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_warn(gettext( "fit measures not available if model did not converge")) } else { FIT <- lav_fit_measures(object, fit.measures = "default", fm.args = fm.args ) res$fit <- FIT } } # main ingredient: the parameter table if (estimates) { PE <- parameterEstimates(object, ci = ci, standardized = standardized, rsquare = rsquare, fmi = fmi, cov.std = cov.std, remove.eq = FALSE, remove.system.eq = TRUE, remove.ineq = FALSE, remove.def = FALSE, remove.nonfree = FALSE, remove.step1 = remove.step1, remove.unused = remove.unused, output = "text", header = TRUE ) res$pe <- as.data.frame(PE) } # modification indices? if (modindices) { MI <- modificationIndices(object, standardized = TRUE, cov.std = cov.std) res$mi <- MI } # create lavaan.summary S3 class class(res) <- c("lavaan.summary", "list") res } lavaan/R/lav_partable_from_lm.R0000644000176200001440000000311714627656441016230 0ustar liggesusers# build a bare-bones parameter table from a fitted lm object lav_partable_from_lm <- function(object, est = FALSE, label = FALSE, as.data.frame. = FALSE) { # sanity check if (!inherits(object, "lm")) { lav_msg_stop(gettext("object must be of class lm")) } objectTerms <- terms(object) responseIndex <- attr(objectTerms, "response") varNames <- as.character(attr(objectTerms, "variables"))[-1] responseName <- varNames[responseIndex] predCoef <- lav_object_inspect_coef(object, type = "free", add.labels = TRUE ) predNames <- names(predCoef) lhs <- rep(responseName, length(predNames)) op <- rep("~", length(predNames)) rhs <- predNames # intercept? if (attr(objectTerms, "intercept")) { int.idx <- which(rhs == "(Intercept)") op[int.idx] <- "~1" rhs[int.idx] <- "" } # always add residual variance? # lhs <- c(lhs, responseName) # op <- c(op, "~~") # rhs <- c(rhs, responseName) # construct minimal partable partable <- list(lhs = lhs, op = op, rhs = rhs) # include 'est' column? if (est) { # partable$est <- c(as.numeric(predCoef), # sum(resid(object)^2) / object$df.residual) partable$est <- as.numeric(predCoef) } # include 'label' column? if (label) { # partable$label <- c(predNames, responseName) partable$label <- predNames # convert all ':' to '.' partable$label <- gsub("[:()]", ".", partable$label) } # convert to data.frame? if (as.data.frame.) { partable <- as.data.frame(partable, stringsAsFactors = FALSE) } partable } lavaan/R/00generic.R0000644000176200001440000000564114627656441013641 0ustar liggesusers# for blavaan # TDJ: add "..." to make the generic actually generic, for lavaan.mi objects # S3 generic for S3 dispatch fitMeasures <- function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { UseMethod("fitMeasures", object) } fitmeasures <- function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { UseMethod("fitmeasures", object) } # S4 generic for S4 dispatch setGeneric( "fitMeasures", function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { standardGeneric("fitMeasures") } ) setGeneric( "fitmeasures", function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { standardGeneric("fitmeasures") } ) # S3 generics inspect <- function(object, what = "free", ...) { UseMethod("inspect", object) } lavInspect <- function(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) { UseMethod("lavInspect", object) } lavTech <- function(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { UseMethod("lavTech", object) } lavaan/R/lav_test_LRT.R0000644000176200001440000004073514627656441014432 0ustar liggesusers# compare two nested models, by default using the chi-square # difference test # - in 0.5-16, SB.classic = TRUE is the default again (for now) # - in 0.5-18, SB.classic is replaced by 'method', with the following # options: # method = "default" (we choose a default method, based on the estimator) # method = "standard" (option to explicitly avoid robust adjustment) # method = "Satorra.2000" # method = "Satorra.Bentler.2001" # method = "Satorra.Bentler.2010" # method = "mean.var.adjusted.PLRT" # # - 0.6-13: RMSEA.D (also known as 'RDR') is added to the table (unless scaled) # - 0.6-13: fix multiple-group UG^2 bug in Satorra.2000 (reported by # Gronneberg, Foldnes and Moss) # # - 0.6-18: # New option method = "standard" (to explicitly avoid robust adjustment) # New test= argument to select scaled stat when method="satorra.bentler.2001/2010" lavTestLRT <- function(object, ..., method = "default", test = "default", A.method = "delta", scaled.shifted = TRUE, # only when method="Satorra.2000" type = "Chisq", model.names = NULL) { type <- tolower(type[1]) test <- tolower(test[1]) method <- tolower(gsub("[-_\\.]", "", method[1])) if (type %in% c("browne", "browne.residual.adf", "browne.residual.nt")) { if (type == "browne") { type <- "browne.residual.adf" } if (!method %in% c("default", "standard")) { lav_msg_stop(gettext( "method cannot be used if type is browne.residual.adf or browne.residual.nt")) } method <- "default" } # NOTE: if we add additional arguments, it is not the same generic # anova() function anymore, and match.call will be screwed up mcall <- match.call(expand.dots = TRUE) dots <- list(...) modp <- if (length(dots)) { sapply(dots, inherits, "lavaan") } else { logical(0L) } # some general properties (taken from the first model) estimator <- object@Options$estimator likelihood <- object@Options$likelihood ngroups <- object@Data@ngroups nobs <- object@SampleStats@nobs ntotal <- object@SampleStats@ntotal # TDJ: check for user-supplied h1 model user_h1_exists <- FALSE if (!is.null(object@external$h1.model)) { if (inherits(object@external$h1.model, "lavaan")) { user_h1_exists <- TRUE } } # shortcut for single argument (just plain LRT) if (!any(modp) && !user_h1_exists) { if (type == "cf") { lav_msg_warn(gettext("`type' argument is ignored for a single model")) } return(lav_test_lrt_single_model(object)) } # list of models mods <- c(list(object), dots[modp]) if (!is.null(model.names)) { names(mods) <- model.names } else { names(mods) <- sapply( as.list(mcall)[which(c(FALSE, TRUE, modp))], function(x) deparse(x) ) } # TDJ: Add user-supplied h1 model, if it exists if (user_h1_exists) mods$user_h1 <- object@external$h1.model # put them in order (using degrees of freedom) ndf <- sapply(mods, function(x) x@test[[1]]$df) order.idx <- order(ndf) mods <- mods[order.idx] ndf <- ndf[order.idx] # here come the checks -- eventually, an option may skip this if (TRUE) { # 1. same set of observed variables? ov.names <- lapply(mods, function(x) { sort(lavNames(x)) }) OV <- ov.names[[1L]] # the observed variable names of the first model if (!all(sapply(ov.names, function(x) identical(x, OV)))) { lav_msg_warn(gettext( "some models are based on a different set of observed variables")) } ## wow FIXME: we may need to reorder the rows/columns first!! # COVS <- lapply(mods, function(x) slot(slot(x, "Sample"), "cov")[[1]]) # if(!all(sapply(COVS, all.equal, COVS[[1]]))) { # stop("lavaan ERROR: models must be fit to the same data") # } # 2. nested models? *different* npars? # TODO! # 3. all meanstructure? mean.structure <- sapply(mods, inspect, "meanstructure") if (sum(mean.structure) > 0L && sum(mean.structure) < length(mean.structure)) { lav_msg_warn(gettext("not all models have a meanstructure")) } # 4. all converged? if (!all(sapply(mods, lavInspect, "converged"))) { lav_msg_warn(gettext("not all models converged")) } } mods.scaled <- unlist(lapply(mods, function(x) { any(c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted" ) %in% unlist(sapply(slot(x, "test"), "[[", "test"))) })) if (all(mods.scaled | ndf == 0) && any(mods.scaled)) { # Note: if df=0, test is not really robust, hence the above condition scaled <- TRUE # which test to choose by default? # i.e., not determined by method= scaledList <- sapply(object@test, function(x) !is.null(x$scaled.test.stat)) scaled.idx <- which(scaledList)[[1]] default.TEST <- object@test[[scaled.idx]]$test if (test == "default") { TEST <- default.TEST } else if (!test %in% c("satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted")) { lav_msg_stop(gettextf( "test = %s not found in object. See available tests in lavInspect(object, \"options\")$test.", dQuote(test))) } else TEST <- test ## is the test available from all models? check.scaled <- unlist(lapply(mods, function(x) { TEST %in% unlist(sapply(slot(x, "test"), "[[", "test")) })) if (any(!check.scaled)) { lav_msg_stop(gettextf( "test = %1$s not found in model(s): %2$s. Find available tests per model using lavInspect(fit, \"options\")$test.", dQuote(test), lav_msg_view(names(mods)[which(!check.scaled)], "none"))) } } else if (!any(mods.scaled)) { # thanks to R.M. Bee to fix this scaled <- FALSE TEST <- "standard" method <- "standard" } else { lav_msg_stop(gettext( "some models (but not all) have scaled test statistics")) } if (type %in% c("browne.residual.adf", "browne.residual.nt")) { scaled <- FALSE method <- "standard" } if (method == "standard") { scaled <- FALSE } # select method if (method == "default") { if (estimator == "PML") { method <- "mean.var.adjusted.PLRT" } else if (scaled) { if (TEST %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus" )) { method <- "satorra.bentler.2001" } else { method <- "satorra.2000" } } else { # nothing to do } } else if (method == "meanvaradjustedplrt" || method == "mean.var.adjusted.PLRT") { method <- "mean.var.adjusted.PLRT" stopifnot(estimator == "PML") } else if (method == "satorra2000") { method <- "satorra.2000" } else if (method == "satorrabentler2001") { method <- "satorra.bentler.2001" } else if (method == "satorrabentler2010") { method <- "satorra.bentler.2010" ## only option left: } else if (method != "standard") { lav_msg_stop( gettextf("unknown method for scaled difference test: %s.", method)) } ## in case users specify method= or test= (but still type="chisq"), ## make sure the arguments are consistent for scaled tests if (method %in% c("satorra.bentler.2001","satorra.bentler.2010") && scaled && (!TEST %in% c("satorra.bentler","yuan.bentler","yuan.bentler.mplus")) ) { lav_msg_stop(gettextf( "method = %s only available when models are fitted with test = \"satorra.bentler\", \"yuan.bentler\", or \"yuan.bentler.mplus\".", dQuote(method))) } else { ## method="satorra.2000" still available when TEST != scaled.shifted ## Or !scaled, so nothing to do. } # check method if scaled = FALSE if (type == "chisq" && !scaled && method %in% c( "mean.var.adjusted.PLRT", "satorra.bentler.2001", "satorra.2000", "satorra.bentler.2010" )) { lav_msg_warn(gettextf( "method = %s but no robust test statistics were used; switching to the standard chi-squared difference test", dQuote(method))) method <- "standard" } # which models have used a MEANSTRUCTURE? mods.meanstructure <- sapply(mods, function(x) { unlist(slot( slot(x, "Model"), "meanstructure" )) }) if (all(mods.meanstructure)) { meanstructure <- "ok" } else if (sum(mods.meanstructure) == 0) { meanstructure <- "ok" } else { lav_msg_stop(gettext("some models (but not all) have a meanstructure")) } # collect statistics for each model if (type == "chisq") { Df <- sapply(mods, function(x) slot(x, "test")[[1]]$df) STAT <- sapply(mods, function(x) slot(x, "test")[[1]]$stat) } else if (type == "browne.residual.nt") { TESTlist <- lapply( mods, function(x) lavTest(x, test = "browne.residual.nt") ) Df <- sapply(TESTlist, function(x) x$df) STAT <- sapply(TESTlist, function(x) x$stat) } else if (type == "browne.residual.adf") { TESTlist <- lapply( mods, function(x) lavTest(x, test = "browne.residual.adf") ) Df <- sapply(TESTlist, function(x) x$df) STAT <- sapply(TESTlist, function(x) x$stat) } else if (type == "cf") { tmp <- lapply(mods, lavTablesFitCf) STAT <- unlist(tmp) Df <- unlist(lapply(tmp, attr, "DF")) } else { lav_msg_stop(gettextf("test type unknown: %s", type)) } # difference statistics STAT.delta <- c(NA, diff(STAT)) Df.delta <- c(NA, diff(Df)) if (method == "satorra.2000" && scaled.shifted) { a.delta <- b.delta <- rep(as.numeric(NA), length(STAT)) } else if (method %in% c("satorra.bentler.2001","satorra.bentler.2010", "satorra.2000")) { c.delta <- rep(as.numeric(NA), length(STAT)) } # new in 0.6-13 if (!scaled) { RMSEA.delta <- c(NA, lav_fit_rmsea( X2 = STAT.delta[-1], df = Df.delta[-1], N = ntotal, G = ngroups )) } # check for negative values in STAT.delta # but with a tolerance (0.6-12)! if (any(STAT.delta[-1] < -1 * .Machine$double.eps^(1 / 3))) { lav_msg_warn(gettextf( "Some restricted models fit better than less restricted models; either these models are not nested, or the less restricted model failed to reach a global optimum.Smallest difference = %s.", min(STAT.delta[-1]))) } # correction for scaled test statistics if (type == "chisq" && scaled) { if (method == "satorra.bentler.2001") { # use formula from Satorra & Bentler 2001 for (m in seq_len(length(mods) - 1L)) { out <- lav_test_diff_SatorraBentler2001(mods[[m]], mods[[m + 1]], # in case not @test[[2]]: test = TEST) STAT.delta[m + 1] <- out$T.delta Df.delta[m + 1] <- out$df.delta c.delta[m + 1] <- out$scaling.factor } } else if (method == "mean.var.adjusted.PLRT") { for (m in seq_len(length(mods) - 1L)) { out <- ctr_pml_plrt_nested(mods[[m]], mods[[m + 1]]) STAT.delta[m + 1] <- out$FSMA.PLRT Df.delta[m + 1] <- out$adj.df } } else if (method == "satorra.bentler.2010") { for (m in seq_len(length(mods) - 1L)) { out <- lav_test_diff_SatorraBentler2010(mods[[m]], mods[[m + 1]], test = TEST, # in case not @test[[2]] H1 = FALSE ) # must be F STAT.delta[m + 1] <- out$T.delta Df.delta[m + 1] <- out$df.delta c.delta[m + 1] <- out$scaling.factor } } else if (method == "satorra.2000") { for (m in seq_len(length(mods) - 1L)) { if (TEST %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus" )) { Satterthwaite <- FALSE } else { Satterthwaite <- TRUE } out <- lav_test_diff_Satorra2000(mods[[m]], mods[[m + 1]], H1 = TRUE, Satterthwaite = Satterthwaite, scaled.shifted = scaled.shifted, A.method = A.method ) STAT.delta[m + 1] <- out$T.delta Df.delta[m + 1] <- out$df.delta if (scaled.shifted) { a.delta[m + 1] <- out$a b.delta[m + 1] <- out$b } else { c.delta[m + 1] <- out$scaling.factor } } } } # Pvalue Pvalue.delta <- pchisq(STAT.delta, Df.delta, lower.tail = FALSE) aic <- bic <- rep(NA, length(mods)) if (estimator == "ML") { aic <- sapply(mods, FUN = AIC) bic <- sapply(mods, FUN = BIC) } else if (estimator == "PML") { OUT <- lapply(mods, ctr_pml_aic_bic) aic <- sapply(OUT, "[[", "PL_AIC") bic <- sapply(OUT, "[[", "PL_BIC") } if (estimator == "PML") { val <- data.frame( Df = Df, PL_AIC = aic, PL_BIC = bic, Chisq = STAT, "Chisq diff" = STAT.delta, "Df diff" = Df.delta, "Pr(>Chisq)" = Pvalue.delta, row.names = names(mods), check.names = FALSE ) } else { if (scaled) { val <- data.frame( Df = Df, AIC = aic, BIC = bic, Chisq = STAT, "Chisq diff" = STAT.delta, "Df diff" = Df.delta, "Pr(>Chisq)" = Pvalue.delta, row.names = names(mods), check.names = FALSE ) } else { val <- data.frame( Df = Df, AIC = aic, BIC = bic, Chisq = STAT, "Chisq diff" = STAT.delta, "RMSEA" = RMSEA.delta, "Df diff" = Df.delta, "Pr(>Chisq)" = Pvalue.delta, row.names = names(mods), check.names = FALSE ) } } # catch Df.delta == 0 cases (reported by Florian Zsok in Zurich) # but only if there are no inequality constraints! (0.6-1) idx <- which(val[, "Df diff"] == 0) if (length(idx) > 0L) { # remove models with inequality constraints ineq.idx <- which(sapply(lapply(mods, function(x) slot(slot(x, "Model"), "x.cin.idx")), length) > 0L) rm.idx <- which(idx %in% ineq.idx) if (length(rm.idx) > 0L) { idx <- idx[-rm.idx] } } if (length(idx) > 0L) { val[idx, "Pr(>Chisq)"] <- as.numeric(NA) lav_msg_warn(gettext("some models have the same degrees of freedom")) } if (type == "chisq") { if (scaled) { txt <- paste("The ", dQuote("Chisq"), " column contains standard ", "test statistics, not the robust test that should be ", "reported per model. A robust difference test is a ", "function of two standard (not robust) statistics.", sep = "" ) attr(val, "heading") <- paste("\nScaled Chi-Squared Difference Test (method = ", dQuote(method), ")\n\n", lav_msg(paste("lavaan NOTE:", txt)), sep = "" ) if (method == "satorra.2000" && scaled.shifted) { attr(val, "scale") <- a.delta attr(val, "shift") <- b.delta } else if (method %in% c("satorra.bentler.2001","satorra.bentler.2010", "satorra.2000")) { attr(val, "scale") <- c.delta } } else { attr(val, "heading") <- "\nChi-Squared Difference Test\n" } } else if (type == "browne.residual.adf") { attr(val, "heading") <- "\nChi-Squared Difference Test based on Browne's residual (ADF) Test\n" } else if (type == "browne.residual.nt") { attr(val, "heading") <- "\nChi-Squared Difference Test based on Browne's residual (NT) Test\n" } else if (type == "cf") { colnames(val)[c(3, 4)] <- c("Cf", "Cf diff") attr(val, "heading") <- "\nCf Difference Test\n" } class(val) <- c("anova", class(val)) return(val) } # anova table for a single model lav_test_lrt_single_model <- function(object) { estimator <- object@Options$estimator aic <- bic <- c(NA, NA) if (estimator == "ML") { aic <- c(NA, AIC(object)) bic <- c(NA, BIC(object)) } if (length(object@test) > 1L) { val <- data.frame( Df = c(0, object@test[[2L]]$df), AIC = aic, BIC = bic, Chisq = c(0, object@test[[2L]]$stat), "Chisq diff" = c(NA, object@test[[2L]]$stat), "Df diff" = c(NA, object@test[[2L]]$df), "Pr(>Chisq)" = c(NA, object@test[[2L]]$pvalue), row.names = c("Saturated", "Model"), check.names = FALSE ) attr(val, "heading") <- "Chi-Squared Test Statistic (scaled)\n" } else { val <- data.frame( Df = c(0, object@test[[1L]]$df), AIC = aic, BIC = bic, Chisq = c(0, object@test[[1L]]$stat), "Chisq diff" = c(NA, object@test[[1L]]$stat), "Df diff" = c(NA, object@test[[1L]]$df), "Pr(>Chisq)" = c(NA, object@test[[1L]]$pvalue), row.names = c("Saturated", "Model"), check.names = FALSE ) attr(val, "heading") <- "Chi-Squared Test Statistic (unscaled)\n" } class(val) <- c("anova", class(val)) val } lavaan/R/lav_partable_unrestricted.R0000644000176200001440000007037214627656441017317 0ustar liggesusers# YR - 26 Nov 2013: generate partable for the unrestricted model # YR - 19 Mar 2017: handle twolevel model # YR - 27 May 2021: added lav_partable_unrestricted_chol so we can use # a cholesky parameterization: S = LAMBDA %*% t(LAMBDA) lav_partable_unrestricted <- function(lavobject = NULL, # if no object is available, lavdata = NULL, lavpta = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, # optional user-provided sample stats sample.cov = NULL, sample.mean = NULL, sample.slopes = NULL, sample.th = NULL, sample.th.idx = NULL, sample.cov.x = NULL, sample.mean.x = NULL) { lav_partable_indep_or_unrestricted( lavobject = lavobject, lavdata = lavdata, lavpta = lavpta, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = lavh1, sample.cov = sample.cov, sample.mean = sample.mean, sample.slopes = sample.slopes, sample.th = sample.th, sample.th.idx = sample.th.idx, independent = FALSE ) } # generate parameter table for an independence model # YR - 12 Sep 2017: special case of lav_partable_unrestricted() lav_partable_independence <- function(lavobject = NULL, # if no object is available, lavdata = NULL, lavpta = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, # optional user-provided sample stats sample.cov = NULL, sample.mean = NULL, sample.slopes = NULL, sample.th = NULL, sample.th.idx = NULL, sample.cov.x = NULL, sample.mean.x = NULL) { lav_partable_indep_or_unrestricted( lavobject = lavobject, lavdata = lavdata, lavpta = lavpta, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = lavh1, sample.cov = sample.cov, sample.mean = sample.mean, sample.slopes = sample.slopes, sample.th = sample.th, sample.th.idx = sample.th.idx, independent = TRUE ) } lav_partable_indep_or_unrestricted <- function(lavobject = NULL, # if no object is available, lavdata = NULL, lavpta = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, # optional user-provided sample stats sample.cov = NULL, sample.mean = NULL, sample.slopes = NULL, sample.th = NULL, sample.th.idx = NULL, sample.cov.x = NULL, sample.mean.x = NULL, independent = FALSE) { # grab everything from lavaan lavobject if (!is.null(lavobject)) { stopifnot(inherits(lavobject, "lavaan")) lavdata <- lavobject@Data lavoptions <- lavobject@Options lavsamplestats <- lavobject@SampleStats lavpta <- lavobject@pta lavh1 <- lavobject@h1 } if (lavdata@data.type == "none") { lavsamplestats <- NULL } # conditional.x ? check res.cov[[1]] slot conditional.x <- FALSE if (!is.null(lavsamplestats) && !is.null(lavsamplestats@res.cov[[1]])) { conditional.x <- TRUE } else if (!is.null(lavoptions) && lavoptions$conditional.x) { conditional.x <- TRUE } # group.w.free? group.w.free <- FALSE if (!is.null(lavoptions) && lavoptions$group.w.free) { group.w.free <- TRUE } # we use CAPS below for the list version, so we can use 'small caps' # within the for() loop # get sample statistics, all groups SAMPLE.cov <- sample.cov if (is.null(SAMPLE.cov) && !is.null(lavsamplestats)) { if (conditional.x) { SAMPLE.cov <- lavsamplestats@res.cov } else { SAMPLE.cov <- lavsamplestats@cov } } SAMPLE.mean <- sample.mean if (is.null(SAMPLE.mean) && !is.null(lavsamplestats)) { if (conditional.x) { SAMPLE.mean <- lavsamplestats@res.int } else { SAMPLE.mean <- lavsamplestats@mean } } SAMPLE.slopes <- sample.slopes if (conditional.x && is.null(SAMPLE.slopes) && !is.null(lavsamplestats)) { SAMPLE.slopes <- lavsamplestats@res.slopes } SAMPLE.th <- sample.th if (is.null(SAMPLE.th) && !is.null(lavsamplestats)) { if (conditional.x) { SAMPLE.th <- lavsamplestats@res.th } else { SAMPLE.th <- lavsamplestats@th } } SAMPLE.th.idx <- sample.th.idx if (is.null(SAMPLE.th.idx) && !is.null(lavsamplestats)) { SAMPLE.th.idx <- lavsamplestats@th.idx } SAMPLE.cov.x <- sample.cov.x if (is.null(SAMPLE.cov.x) && !is.null(lavsamplestats)) { SAMPLE.cov.x <- lavsamplestats@cov.x } SAMPLE.mean.x <- sample.mean.x if (is.null(SAMPLE.mean.x) && !is.null(lavsamplestats)) { SAMPLE.mean.x <- lavsamplestats@mean.x } ov <- lavdata@ov meanstructure <- lavoptions$meanstructure categorical <- any(ov$type == "ordered") ngroups <- lavdata@ngroups nlevels <- lavdata@nlevels if (lavoptions$estimator == "catML") { categorical <- FALSE } correlation <- FALSE if (!is.null(lavoptions$correlation)) { correlation <- lavoptions$correlation } # what with fixed.x? # - does not really matter; fit will be saturated anyway # - fixed.x = TRUE may avoid convergence issues with non-numeric # x-covariates fixed.x <- lavoptions$fixed.x # if multilevel if (nlevels > 1L) { # fixed.x <- FALSE # for now conditional.x <- FALSE # for now categorical <- FALSE # for now } lhs <- rhs <- op <- character(0) group <- block <- level <- free <- exo <- integer(0) ustart <- numeric(0) # block number b <- 0L for (g in 1:ngroups) { # only for multilevel if (nlevels > 1L) { YLp <- lavsamplestats@YLp[[g]] Lp <- lavdata@Lp[[g]] } # local copy sample.cov <- SAMPLE.cov[[g]] sample.mean <- SAMPLE.mean[[g]] sample.slopes <- SAMPLE.slopes[[g]] sample.th <- SAMPLE.th[[g]] sample.th.idx <- SAMPLE.th.idx[[g]] sample.cov.x <- SAMPLE.cov.x[[g]] sample.mean.x <- SAMPLE.mean.x[[g]] # force local sample.cov to be pd -- just for starting values anyway if (!is.null(sample.cov) && !anyNA(sample.cov)) { sample.cov <- lav_matrix_symmetric_force_pd(sample.cov) } for (l in 1:nlevels) { # block b <- b + 1L # ov.names for this block if (is.null(lavpta)) { # only data was used ov.names <- lavdata@ov.names[[g]] ov.names.x <- lavdata@ov.names.x[[g]] ov.names.nox <- ov.names[!ov.names %in% ov.names.x] } else { if (conditional.x) { ov.names <- lavpta$vnames$ov.nox[[b]] } else { ov.names <- lavpta$vnames$ov[[b]] } ov.names.x <- lavpta$vnames$ov.x[[b]] ov.names.nox <- lavpta$vnames$ov.nox[[b]] } # only for multilevel, overwrite sample.cov and sample.mean if (nlevels > 1L) { if (independent) { # beter use lavdata@Lp[[g]]$ov.x.idx?? # in case we have x/y mismatch across levels? ov.x.idx <- lavpta$vidx$ov.x[[b]] ov.names.x <- lavpta$vnames$ov.x[[b]] ov.names.nox <- lavpta$vnames$ov.nox[[b]] sample.cov.x <- lavh1$implied$cov[[b]][ov.x.idx, ov.x.idx, drop = FALSE ] sample.mean.x <- lavh1$implied$mean[[b]][ov.x.idx] } else { ov.names.x <- character(0L) ov.names.nox <- ov.names } if (length(lavh1) > 0L) { sample.cov <- lavh1$implied$cov[[b]] sample.mean <- lavh1$implied$mean[[b]] } else { sample.cov <- diag(length(ov.names)) sample.mean <- numeric(length(ov.names)) } # if(l == 1L) { # sample.cov <- YLp[[2]]$Sigma.W[block.idx, block.idx, # drop = FALSE] # sample.mean <- YLp[[2]]$Mu.W[block.idx] # } else { # sample.cov <- YLp[[2]]$Sigma.B[block.idx, block.idx, # drop = FALSE] # sample.mean <- YLp[[2]]$Mu.B[block.idx] # } # force local sample.cov to be strictly pd (and exaggerate) # just for starting values anyway, but at least the first # evaluation will be feasible sample.cov <- lav_matrix_symmetric_force_pd(sample.cov, tol = 1e-03 ) } # a) VARIANCES (all ov's, if !conditional.x, also exo's) nvar <- length(ov.names) lhs <- c(lhs, ov.names) op <- c(op, rep("~~", nvar)) rhs <- c(rhs, ov.names) block <- c(block, rep(b, nvar)) group <- c(group, rep(g, nvar)) level <- c(level, rep(l, nvar)) if (correlation) { free <- c(free, rep(0L, nvar)) } else { free <- c(free, rep(1L, nvar)) } exo <- c(exo, rep(0L, nvar)) # starting values -- variances if (correlation) { ustart <- c(ustart, rep(1, nvar)) } else if (!is.null(sample.cov)) { ustart <- c(ustart, diag(sample.cov)) } else { ustart <- c(ustart, rep(as.numeric(NA), nvar)) } # COVARIANCES! if (!independent) { pstar <- nvar * (nvar - 1) / 2 if (pstar > 0L) { # only if more than 1 variable tmp <- utils::combn(ov.names, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri op <- c(op, rep("~~", pstar)) rhs <- c(rhs, tmp[2, ]) block <- c(block, rep(b, pstar)) group <- c(group, rep(g, pstar)) level <- c(level, rep(l, pstar)) free <- c(free, rep(1L, pstar)) exo <- c(exo, rep(0L, pstar)) } # starting values -- covariances if (!is.null(sample.cov)) { sample.cov.vech <- lav_matrix_vech(sample.cov, diagonal = FALSE) ustart <- c(ustart, sample.cov.vech) # check for 'missing by design' cells: here, the sample.cov # element is *exactly* zero (new in 0.6-18) zero.cov <- which(sample.cov.vech == 0) if (length(zero.cov) > 0L) { n.tmp <- length(free) ones.and.zeroes <- rep(1L, pstar) ones.and.zeroes[zero.cov] <- 0L free[(n.tmp - pstar + 1):n.tmp] <- ones.and.zeroes } } else { ustart <- c(ustart, rep(as.numeric(NA), pstar)) } } # ordered? fix variances, add thresholds ord.names <- character(0L) if (categorical) { ord.names <- ov$name[ov$type == "ordered"] # only for this group ord.names <- ov.names[which(ov.names %in% ord.names)] if (length(ord.names) > 0L) { # fix variances to 1.0 idx <- which(lhs %in% ord.names & op == "~~" & lhs == rhs) ustart[idx] <- 1.0 free[idx] <- 0L # add thresholds lhs.th <- character(0) rhs.th <- character(0) for (o in ord.names) { nth <- ov$nlev[ov$name == o] - 1L if (nth < 1L) next lhs.th <- c(lhs.th, rep(o, nth)) rhs.th <- c(rhs.th, paste("t", seq_len(nth), sep = "")) } nel <- length(lhs.th) lhs <- c(lhs, lhs.th) rhs <- c(rhs, rhs.th) op <- c(op, rep("|", nel)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) free <- c(free, rep(1L, nel)) exo <- c(exo, rep(0L, nel)) # starting values if (!is.null(sample.th) && !is.null(sample.th.idx)) { th.start <- sample.th[sample.th.idx > 0L] ustart <- c(ustart, th.start) } else { ustart <- c(ustart, rep(as.numeric(NA), nel)) } # fixed-to-zero intercepts (since 0.5.17) ov.int <- ord.names nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~1", nel)) rhs <- c(rhs, rep("", nel)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) free <- c(free, rep(0L, nel)) exo <- c(exo, rep(0L, nel)) ustart <- c(ustart, rep(0, nel)) # ~*~ (since 0.6-1) nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~*~", nel)) rhs <- c(rhs, ov.int) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) free <- c(free, rep(0L, nel)) exo <- c(exo, rep(0L, nel)) ustart <- c(ustart, rep(1, nel)) } } # categorical # correlation structure? if (!categorical && correlation) { nel <- nvar lhs <- c(lhs, ov.names) op <- c(op, rep("~*~", nel)) rhs <- c(rhs, ov.names) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) free <- c(free, rep(0L, nel)) exo <- c(exo, rep(0L, nel)) ustart <- c(ustart, rep(1, nel)) } # meanstructure? if (meanstructure) { # auto-remove ordinal variables ov.int <- ov.names idx <- which(ov.int %in% ord.names) if (length(idx)) ov.int <- ov.int[-idx] nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~1", nel)) rhs <- c(rhs, rep("", nel)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) # if multilevel, level=1 has fixed zeroes if (nlevels > 1L && l == 1L) { WITHIN <- rep(0L, nel) # FIXME: assuming 1 group within.idx <- match(Lp$within.idx[[2]], Lp$ov.idx[[1]]) WITHIN[within.idx] <- 1L free <- c(free, WITHIN) } else { free <- c(free, rep(1L, nel)) } exo <- c(exo, rep(0L, nel)) # starting values if (!is.null(sample.mean)) { sample.int.idx <- match(ov.int, ov.names) ustart <- c(ustart, sample.mean[sample.int.idx]) } else { ustart <- c(ustart, rep(as.numeric(NA), length(ov.int))) } } # fixed.x exogenous variables? if (!conditional.x && (nx <- length(ov.names.x)) > 0L) { if (independent && lavoptions$mimic %in% c("Mplus", "lavaan")) { # add covariances for eXo pstar <- nx * (nx - 1) / 2 if (pstar > 0L) { # only if more than 1 variable tmp <- utils::combn(ov.names.x, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri op <- c(op, rep("~~", pstar)) rhs <- c(rhs, tmp[2, ]) block <- c(block, rep(b, pstar)) group <- c(group, rep(g, pstar)) level <- c(level, rep(l, pstar)) free <- c(free, rep(1L, pstar)) exo <- c(exo, rep(0L, pstar)) # starting values if (!is.null(sample.cov.x)) { rhs.idx <- match(tmp[1, ], ov.names.x) lhs.idx <- match(tmp[2, ], ov.names.x) ustart <- c( ustart, sample.cov.x[cbind(rhs.idx, lhs.idx)] ) } else { ustart <- c(ustart, rep(as.numeric(NA), pstar)) } } } if (fixed.x) { # fix variances/covariances exo.idx <- which(rhs %in% ov.names.x & lhs %in% ov.names.x & op == "~~" & group == g) # ok exo[exo.idx] <- 1L free[exo.idx] <- 0L # fix means exo.idx <- which(lhs %in% ov.names.x & op == "~1" & group == g) # ok exo[exo.idx] <- 1L free[exo.idx] <- 0L } } # conditional.x? if (conditional.x && (nx <- length(ov.names.x)) > 0L) { # eXo variances nel <- length(ov.names.x) lhs <- c(lhs, ov.names.x) op <- c(op, rep("~~", nel)) rhs <- c(rhs, ov.names.x) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) if (fixed.x) { free <- c(free, rep(0L, nel)) exo <- c(exo, rep(1L, nel)) } else { free <- c(free, rep(1L, nel)) exo <- c(exo, rep(0L, nel)) } # starting values if (!is.null(sample.cov.x)) { ustart <- c(ustart, diag(sample.cov.x)) } else { ustart <- c(ustart, rep(as.numeric(NA), nel)) } # eXo covariances pstar <- nx * (nx - 1) / 2 if (pstar > 0L) { # only if more than 1 variable tmp <- utils::combn(ov.names.x, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri op <- c(op, rep("~~", pstar)) rhs <- c(rhs, tmp[2, ]) block <- c(block, rep(b, pstar)) group <- c(group, rep(g, pstar)) level <- c(level, rep(l, pstar)) if (fixed.x) { free <- c(free, rep(0L, pstar)) exo <- c(exo, rep(1L, pstar)) } else { free <- c(free, rep(1L, pstar)) exo <- c(exo, rep(0L, pstar)) } # starting values if (!is.null(sample.cov.x)) { rhs.idx <- match(tmp[1, ], ov.names.x) lhs.idx <- match(tmp[2, ], ov.names.x) ustart <- c( ustart, sample.cov.x[cbind(rhs.idx, lhs.idx)] ) } else { ustart <- c(ustart, rep(as.numeric(NA), pstar)) } } # eXo means if (meanstructure) { ov.int <- ov.names.x nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~1", nel)) rhs <- c(rhs, rep("", nel)) group <- c(group, rep(g, nel)) block <- c(block, rep(b, nel)) level <- c(level, rep(l, nel)) if (fixed.x) { free <- c(free, rep(0L, nel)) exo <- c(exo, rep(1L, nel)) } else { free <- c(free, rep(1L, nel)) exo <- c(exo, rep(0L, nel)) } # starting values if (!is.null(sample.mean.x)) { sample.int.idx <- match(ov.int, ov.names.x) ustart <- c(ustart, sample.mean.x[sample.int.idx]) } else { ustart <- c(ustart, rep(as.numeric(NA), length(ov.int))) } } # slopes nnox <- length(ov.names.nox) nel <- nnox * nx lhs <- c(lhs, rep(ov.names.nox, times = nx)) op <- c(op, rep("~", nel)) rhs <- c(rhs, rep(ov.names.x, each = nnox)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) if (independent) { if (lavoptions$baseline.conditional.x.free.slopes) { free <- c(free, rep(1L, nel)) } else { free <- c(free, rep(0L, nel)) } } else { free <- c(free, rep(1L, nel)) } exo <- c(exo, rep(1L, nel)) # starting values -- slopes if (independent) { # FIXME: zero slope-structure provides a fit that # is equal to the conditional.x = FALSE version; # in principle, we could just fix the slope-structure # to the sample-based slopes # to get the old behaviour: if (!lavoptions$baseline.conditional.x.free.slopes) { ustart <- c(ustart, rep(0, nel)) } else { # but we probably should do: ustart <- c(ustart, lav_matrix_vec(sample.slopes)) } } else if (!is.null(sample.slopes)) { ustart <- c(ustart, lav_matrix_vec(sample.slopes)) } else { ustart <- c(ustart, rep(as.numeric(NA), nel)) } } # conditional.x # group.w.free (new in 0.6-8) if (group.w.free) { lhs <- c(lhs, "group") op <- c(op, "%") rhs <- c(rhs, "w") block <- c(block, b) group <- c(group, g) level <- c(level, l) free <- c(free, 1L) exo <- c(exo, 0L) ustart <- c(ustart, lavsamplestats@WLS.obs[[g]][1]) } } # levels } # ngroups # free counter idx.free <- which(free > 0) free[idx.free] <- 1:length(idx.free) LIST <- list( id = 1:length(lhs), lhs = lhs, op = op, rhs = rhs, user = rep(1L, length(lhs)), block = block, group = group, level = level, free = free, ustart = ustart, exo = exo # , # label = rep("", length(lhs)) # eq.id = rep(0L, length(lhs)), # unco = free ) # keep level column if no levels? (no for now) if (nlevels < 2L) { LIST$level <- NULL } LIST } # - currently only used for continuous twolevel data # - conditional.x not supported (yet) lav_partable_unrestricted_chol <- function(lavobject = NULL, # if no object is available, lavdata = NULL, lavpta = NULL, lavoptions = NULL) { # grab everything from lavaan lavobject if (!is.null(lavobject)) { stopifnot(inherits(lavobject, "lavaan")) lavdata <- lavobject@Data lavoptions <- lavobject@Options # lavsamplestats <- lavobject@SampleStats lavpta <- lavobject@pta # lavh1 <- lavobject@h1 } ov <- lavdata@ov meanstructure <- lavoptions$meanstructure categorical <- any(ov$type == "ordered") if (categorical) { lav_msg_stop(gettext("categorical data not supported in this function")) } ngroups <- lavdata@ngroups nlevels <- lavdata@nlevels # what with fixed.x? # - does not really matter; fit will be saturated anyway # - fixed.x = TRUE may avoid convergence issues with non-numeric # x-covariates fixed.x <- lavoptions$fixed.x # if multilevel if (nlevels > 1L) { # fixed.x <- FALSE # for now conditional.x <- FALSE # for now categorical <- FALSE # for now } lhs <- rhs <- op <- character(0) group <- block <- level <- free <- exo <- integer(0) ustart <- lower <- numeric(0) # block number b <- 0L for (g in 1:ngroups) { # only for multilevel if (nlevels > 1L) { Lp <- lavdata@Lp[[g]] } for (l in 1:nlevels) { # block b <- b + 1L if (is.null(lavpta)) { ov.names <- lavdata@ov.names[[b]] ov.names.x <- lavdata@ov.names.x[[b]] ov.names.nox <- ov.names[!ov.names %in% ov.names.x] } else { ov.names <- lavpta$vnames$ov[[b]] ov.names.x <- lavpta$vnames$ov.x[[b]] ov.names.nox <- lavpta$vnames$ov.nox[[b]] } # only for multilevel, overwrite sample.cov and sample.mean if (nlevels > 1L) { ov.names.x <- character(0L) ov.names.nox <- ov.names } # create lv.names == ov.names lv.names <- paste("f", ov.names, sep = "") # a) OV VARIANCES -> fixed to zero nvar <- length(ov.names) lhs <- c(lhs, ov.names) op <- c(op, rep("~~", nvar)) rhs <- c(rhs, ov.names) block <- c(block, rep(b, nvar)) group <- c(group, rep(g, nvar)) level <- c(level, rep(l, nvar)) ustart <- c(ustart, rep(0.0001, nvar)) ### Force PD!! (option?) free <- c(free, rep(0L, nvar)) exo <- c(exo, rep(0L, nvar)) lower <- c(lower, rep(0.0, nvar)) # b) LV VARIANCES -> fixed to 1.0 nvar <- length(lv.names) lhs <- c(lhs, lv.names) op <- c(op, rep("~~", nvar)) rhs <- c(rhs, lv.names) block <- c(block, rep(b, nvar)) group <- c(group, rep(g, nvar)) level <- c(level, rep(l, nvar)) ustart <- c(ustart, rep(1.0, nvar)) free <- c(free, rep(0L, nvar)) exo <- c(exo, rep(0L, nvar)) lower <- c(lower, rep(1.0, nvar)) # c) LOADINGS self nvar <- length(ov.names) lhs <- c(lhs, lv.names) op <- c(op, rep("=~", nvar)) rhs <- c(rhs, ov.names) block <- c(block, rep(b, nvar)) group <- c(group, rep(g, nvar)) level <- c(level, rep(l, nvar)) ustart <- c(ustart, rep(as.numeric(NA), nvar)) free <- c(free, rep(1L, nvar)) exo <- c(exo, rep(0L, nvar)) lower <- c(lower, rep(0.0, nvar)) # lower bound! # d) LOADINGS other if (length(ov.names) > 1L) { tmp <- utils::combn(ov.names, 2) pstar <- ncol(tmp) lhs <- c(lhs, paste("f", tmp[1, ], sep = "")) op <- c(op, rep("=~", pstar)) rhs <- c(rhs, tmp[2, ]) block <- c(block, rep(b, pstar)) group <- c(group, rep(g, pstar)) level <- c(level, rep(l, pstar)) free <- c(free, rep(1L, pstar)) exo <- c(exo, rep(0L, pstar)) lower <- c(lower, rep(-Inf, pstar)) ustart <- c(ustart, rep(as.numeric(NA), pstar)) } # check for zero coverage at level 1 (new in 0.6-18) if (lavdata@missing == "ml" && l == 1 && !is.null(lavdata@Mp[[g]])) { coverage <- lavdata@Mp[[g]]$coverage sample.cov.vech <- lav_matrix_vech(coverage, diagonal = FALSE) zero.cov <- which(sample.cov.vech == 0) if (length(zero.cov) > 0L) { n.tmp <- length(free) ones.and.zeroes <- rep(1L, pstar) ones.and.zeroes[zero.cov] <- 0L inf.and.zeroes <- rep(-Inf, pstar) inf.and.zeroes[zero.cov] <- 0 na.and.zeroes <- rep(as.numeric(NA), pstar) na.and.zeroes[zero.cov] <- 0 free[ (n.tmp - pstar + 1):n.tmp] <- ones.and.zeroes ustart[(n.tmp - pstar + 1):n.tmp] <- na.and.zeroes lower[ (n.tmp - pstar + 1):n.tmp] <- inf.and.zeroes } } # meanstructure? if (meanstructure) { # OV ov.int <- ov.names nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~1", nel)) rhs <- c(rhs, rep("", nel)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) # if multilevel, level=1 has fixed zeroes if (nlevels > 1L && l == 1L) { WITHIN <- rep(0L, nel) within.idx <- match(Lp$within.idx[[2]], Lp$ov.idx[[1]]) WITHIN[within.idx] <- 1L free <- c(free, WITHIN) } else { free <- c(free, rep(1L, nel)) } exo <- c(exo, rep(0L, nel)) lower <- c(lower, rep(-Inf, nel)) ustart <- c(ustart, rep(as.numeric(NA), nel)) # LV ov.int <- lv.names nel <- length(ov.int) lhs <- c(lhs, ov.int) op <- c(op, rep("~1", nel)) rhs <- c(rhs, rep("", nel)) block <- c(block, rep(b, nel)) group <- c(group, rep(g, nel)) level <- c(level, rep(l, nel)) free <- c(free, rep(0L, nel)) exo <- c(exo, rep(0L, nel)) ustart <- c(ustart, rep(0.0, nel)) lower <- c(lower, rep(-Inf, nel)) } } # levels } # ngroups # free counter idx.free <- which(free > 0) free[idx.free] <- 1:length(idx.free) LIST <- list( id = 1:length(lhs), lhs = lhs, op = op, rhs = rhs, user = rep(1L, length(lhs)), block = block, group = group, level = level, free = free, ustart = ustart, exo = exo, lower = lower # , # label = rep("", length(lhs)) # eq.id = rep(0L, length(lhs)), # unco = free ) # keep level column if no levels? (no for now) if (nlevels < 2L) { LIST$level <- NULL } LIST } lavaan/R/lav_export_bugs.R0000644000176200001440000002627614627656441015277 0ustar liggesusers# export go BUGS syntax # we assume that N1, N2, ... are in data lav2bugs <- function(partable, as.function. = FALSE) { # get parameter table attributes pta <- lav_partable_attributes(partable = partable) vnames <- pta$vnames nblocks <- pta$nblocks nvar <- pta$nvar nfac <- pta$nfac # sanity check partable <- lav2check(partable) # tabs t1 <- paste(rep(" ", 2L), collapse = "") t2 <- paste(rep(" ", 4L), collapse = "") t3 <- paste(rep(" ", 6L), collapse = "") t4 <- paste(rep(" ", 8L), collapse = "") # TXT header if (as.function.) { TXT <- paste("{\n", sep = "") } else { TXT <- paste("model {\n", sep = "") } # model for every i for (g in 1:nblocks) { ov.names <- vnames$ov[[g]] lv.names <- vnames$lv[[g]] yname <- paste("y", g, sep = "") if (nblocks > 1L) { TXT <- paste(TXT, t1, "# block ", g, "\n", sep = "" ) } else { TXT <- paste(TXT, "\n") } TXT <- paste(TXT, t1, "for(i in 1:N", g, ") {\n", sep = "" ) # ov.nox - all observed variables (except exogenous ones) ov.names.nox <- vnames$ov.nox[[g]] nov <- length(ov.names.nox) TXT <- paste(TXT, "\n", t2, "# ov.nox", sep = "" ) for (i in 1:nov) { ov.idx <- match(ov.names.nox[i], ov.names) theta.free.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs == partable$rhs & partable$lhs == ov.names.nox[i]) if (length(theta.free.idx) != 1L) { lav_msg_stop(gettextf( "parameter for residual variance %s not found", ov.names.nox[i]) ) } else { theta.idx <- partable$free[theta.free.idx] } TXT <- paste(TXT, "\n", t2, yname, "[i,", ov.idx, "] ~ dnorm(mu", g, "[i,", ov.idx, "], itheta[", theta.idx, "])", sep = "" ) } TXT <- paste(TXT, "\n", t2, sep = "") for (i in 1:nov) { ov.idx <- match(ov.names.nox[i], ov.names) TXT <- paste(TXT, "\n", t2, "mu", g, "[i,", ov.idx, "] <- ", sep = "" ) # find rhs for this observed variable # 1. intercept? int.idx <- which(partable$block == g & partable$op == "~1" & partable$lhs == ov.names.nox[i]) if (length(int.idx) == 1L) { # fixed or free? if (partable$free[int.idx] == 0L) { TXT <- paste(TXT, partable$ustart[int.idx], sep = "" ) } else { TXT <- paste(TXT, "theta[", partable$free[int.idx], "]", sep = "" ) } } else { # no intercept, say '0', so we always have rhs TXT <- paste(TXT, "0", sep = "") } # 2. factor loading? lam.idx <- which(partable$block == g & partable$op == "=~" & partable$rhs == ov.names.nox[i]) for (j in lam.idx) { # fixed or free? if (partable$free[j] == 0L) { TXT <- paste(TXT, " + ", partable$ustart[j], "*eta", g, "[i,", match(partable$lhs[j], lv.names), "]", sep = "" ) } else { TXT <- paste(TXT, " + ", "theta[", partable$free[j], "]*eta", g, "[i,", match(partable$lhs[j], lv.names), "]", sep = "" ) } } # 3. regression? r.idx <- which(partable$block == g & partable$op == "~" & partable$lhs == ov.names.nox[i]) for (j in r.idx) { # what is the rhs? rhs <- partable$rhs[j] if (rhs %in% lv.names) { RHS <- paste("eta", g, "[i,", match(rhs, lv.names), "]", sep = "" ) } else if (rhs %in% vnames$ov[[g]]) { RHS <- paste("y", g, "[i,", match(rhs, ov.names), "]", sep = "" ) } # fixed or free? if (partable$free[j] == 0L) { TXT <- paste(TXT, " + ", partable$ustart[j], "*", RHS, sep = "" ) } else { TXT <- paste(TXT, " + ", "theta[", partable$free[j], "]*", RHS, sep = "" ) } } } # lv.y # var(lv.y) = PSI (lisrel style) lv.y <- vnames$lv.y[[g]] if (length(lv.y) > 0L) { TXT <- paste(TXT, "\n\n", t2, "# lv.y", sep = "" ) lv.y.idx <- match(lv.y, lv.names) ny <- length(lv.y.idx) for (j in 1:ny) { theta.free.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs == partable$rhs & partable$lhs == lv.y[j]) if (length(theta.free.idx) != 1L) { lav_msg_stop(gettextf( "parameter for residual variance %s not found", lv.y[j]) ) } else { theta.idx <- partable$free[theta.free.idx] } TXT <- paste(TXT, "\n", t2, # dnorm for now "eta", g, "[i,", lv.y.idx[j], "] ~ dnorm(mu.eta", g, "[i,", lv.y.idx[j], "], itheta[", theta.idx, "])", sep = "" ) } for (j in 1:ny) { TXT <- paste(TXT, "\n", t2, # dnorm for now "mu.eta", g, "[i,", lv.y.idx[j], "] <- ", sep = "" ) # lhs elements regression # 1. intercept? int.idx <- which(partable$block == g & partable$op == "~1" & partable$lhs == lv.y[j]) if (length(int.idx) == 1L) { # fixed or free? if (partable$free[int.idx] == 0L) { TXT <- paste(TXT, partable$ustart[int.idx], sep = "" ) } else { TXT <- paste(TXT, "theta[", partable$free[int.idx], "]", sep = "" ) } } else { # no intercept, say '0', so we always have rhs TXT <- paste(TXT, "0", sep = "") } rhs.idx <- which(partable$block == g & partable$op == "~" & partable$lhs == lv.y[j]) np <- length(rhs.idx) for (p in 1:np) { TXT <- paste(TXT, " + ", "theta[", partable$free[rhs.idx[p]], "]*eta", g, "[i,", match(partable$rhs[rhs.idx[p]], lv.names), "]", sep = "" ) } } } # exogenous lv -- FIXME: we assume the lv.x array is continous # (eg 3,4,5, but NOT 3,5,6) # var(lv.x) = PHI (lisrel style) lv.x <- vnames$lv.x[[g]] if (length(lv.x) > 0L) { TXT <- paste(TXT, "\n\n", t2, "# lv.x", sep = "" ) lv.x.idx <- match(lv.x, lv.names) nx <- length(lv.x.idx) TXT <- paste(TXT, "\n", t2, # dmnorm for now "eta", g, "[i,", min(lv.x.idx), ":", max(lv.x.idx), "] ~ dmnorm(mu.eta", g, "[i,", min(lv.x.idx), ":", max(lv.x.idx), "], iphi", g, "[1:", nx, ",1:", nx, "])", sep = "" ) for (j in 1:nx) { TXT <- paste(TXT, "\n", t2, "mu.eta", g, "[i,", lv.x.idx[j], "] <- 0", sep = "" ) } } # exogenous ov ??? (what to do here?) # end of this block TXT <- paste(TXT, "\n\n", t1, "} # end of block ", g, "\n", sep = "" ) } # priors (both fixed and free) TXT <- paste(TXT, "\n", t1, "# Priors free parameters (univariate):", sep = "" ) npt <- length(partable$lhs) for (i in seq_len(npt)) { if (partable$free[i] == 0L) next # skip non-free parameters lhs <- partable$lhs[i] op <- partable$op[i] rhs <- partable$rhs[i] free.idx <- partable$free[i] g <- partable$block[i] if (op == "=~") { # factor loading TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] ~ dnorm(0.8, 1)", sep = "" ) } else if (op == "~") { # regression TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] ~ dnorm(0, 1)", sep = "" ) } else if (op == "~~" && lhs == rhs) { # variance # 1. lv.x + lv.x (skip -> multivariate) # 2. lv.y + lv.y # 3. observed + observed # 4. else -> fix (upgrade to latent?) if (lhs %in% vnames$lv.x[[g]] && rhs %in% vnames$lv.x[[g]]) { # lv.x: move to multivariate... (dwish) next } else if (lhs %in% vnames$lv.y[[g]] && rhs %in% vnames$lv.y[[g]]) { # lv.y TXT <- paste(TXT, "\n", t1, "itheta[", free.idx, "] ~ dgamma(9, 4)", sep = "" ) TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] <- 1/itheta[", free.idx, "]", sep = "" ) } else if (lhs %in% vnames$ov[[g]] && rhs %in% vnames$ov[[g]]) { TXT <- paste(TXT, "\n", t1, "itheta[", free.idx, "] ~ dgamma(9, 4)", sep = "" ) TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] <- 1/itheta[", free.idx, "]", sep = "" ) } else { lav_msg_stop(gettextf("FIXME!! parameter %s", i)) } } else if (op == "~~" && lhs != rhs) { # covariance # 1. lv.x + lv.x (skip -> multivariate) # 2. lv.y + lv.y # 3. observed + observed # 4. else -> fix (upgrade to latent?) if (lhs %in% vnames$lv.x[[g]] && rhs %in% vnames$lv.x[[g]]) { # exo lv covariance next } else if (lhs %in% vnames$lv.y[[g]] && rhs %in% vnames$lv.y[[g]]) { # lv.y lav_msg_stop(gettextf("FIXME!! parameter ", i)) } else if (lhs %in% vnames$ov[[g]] && rhs %in% vnames$ov[[g]]) { TXT <- paste(TXT, "\n", t1, "itheta[", free.idx, "] ~ dgamma(9, 4)", sep = "" ) TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] <- 1/itheta[", free.idx, "]", sep = "" ) } else { lav_msg_stop(gettextf("FIXME!! parameter ", i)) } } else if (op == "~1") { # intercept TXT <- paste(TXT, "\n", t1, "theta[", free.idx, "] ~ dnorm(0, 1)", sep = "" ) } else { lav_msg_stop(gettextf("op not supported yet for parameter ", i)) } } TXT <- paste(TXT, "\n\n", t1, "# Priors free parameters (multivariate):", sep = "" ) for (g in 1:nblocks) { lv.phi.idx <- which(partable$block == g & partable$op == "~~" & partable$lhs %in% vnames$lv.x[[g]] & partable$rhs %in% vnames$lv.x[[g]]) nx <- length(vnames$lv.x[[g]]) if (length(nx) > 0L) { TXT <- paste(TXT, "\n", t1, "iphi", g, "[1:", nx, ",1:", nx, "] ~ dwish(R", g, "[1:", nx, ",1:", nx, "], 5)", sep = "" ) TXT <- paste(TXT, "\n", t1, "phi", g, "[1:", nx, ",1:", nx, "] <- inverse(iphi", g, "[1:", nx, ",1:", nx, "])", sep = "" ) for (idx in lv.phi.idx) { TXT <- paste(TXT, "\n", t1, "theta[", partable$free[idx], "] <- phi", g, "[", match(partable$lhs[idx], vnames$lv.x[[g]]), ",", match(partable$rhs[idx], vnames$lv.x[[g]]), "]", sep = "" ) } } } # end of model TXT <- paste(TXT, "\n\n", "} # End of model\n", sep = "") # end of model if (as.function.) { out <- function() NULL formals(out) <- alist() body(out) <- parse(file = "", text = TXT) } else { out <- TXT class(out) <- c("lavaan.character", "character") } out } lavaan/R/lav_fit_srmr.R0000644000176200001440000002303414627656441014550 0ustar liggesusers# functions related to the SRMR fit measures (single level only) # lower-level functions: # - lav_fit_srmr_mplus # - lav_fit_srmr_twolevel # higher-level functions: # - lav_fit_srmr_lavobject # Y.R. 22 July 2022 # Note: for rmrm/srmr/crmr, we use lav_residuals_summmary() # SRMR for continuous data only # see https://www.statmodel.com/download/SRMR.pdf lav_fit_srmr_mplus <- function(lavobject) { # ngroups G <- lavobject@Data@ngroups # container per group srmr_mplus.group <- numeric(G) srmr_mplus_nomean.group <- numeric(G) # If you change how any of the observed/estimated moments below are retrieved, # please tag @TDJorgensen at the end of the commit message. for (g in 1:G) { # observed if (!lavobject@SampleStats@missing.flag) { if (lavobject@Model@conditional.x) { S <- lavobject@SampleStats@res.cov[[g]] M <- lavobject@SampleStats@res.int[[g]] } else { S <- lavobject@SampleStats@cov[[g]] M <- lavobject@SampleStats@mean[[g]] } } else { # EM estimates S <- lavobject@SampleStats@missing.h1[[g]]$sigma M <- lavobject@SampleStats@missing.h1[[g]]$mu } nvar <- ncol(S) # estimated implied <- lavobject@implied lavmodel <- lavobject@Model Sigma.hat <- if (lavmodel@conditional.x) { implied$res.cov[[g]] } else { implied$cov[[g]] } Mu.hat <- if (lavmodel@conditional.x) { implied$res.int[[g]] } else { implied$mean[[g]] } # Bollen approach: simply using cov2cor ('correlation residuals') S.cor <- cov2cor(S) Sigma.cor <- cov2cor(Sigma.hat) R.cor <- (S.cor - Sigma.cor) # meanstructure if (lavobject@Model@meanstructure) { # standardized residual mean vector R.cor.mean <- M / sqrt(diag(S)) - Mu.hat / sqrt(diag(Sigma.hat)) e <- nvar * (nvar + 1) / 2 + nvar srmr_mplus.group[g] <- sqrt((sum(R.cor[lower.tri(R.cor, diag = FALSE)]^2) + sum(R.cor.mean^2) + sum(((diag(S) - diag(Sigma.hat)) / diag(S))^2)) / e) e <- nvar * (nvar + 1) / 2 srmr_mplus_nomean.group[g] <- sqrt((sum(R.cor[lower.tri(R.cor, diag = FALSE)]^2) + sum(((diag(S) - diag(Sigma.hat)) / diag(S))^2)) / e) } else { e <- nvar * (nvar + 1) / 2 srmr_mplus_nomean.group[g] <- srmr_mplus.group[g] <- sqrt((sum(R.cor[lower.tri(R.cor, diag = FALSE)]^2) + sum(((diag(S) - diag(Sigma.hat)) / diag(S))^2)) / e) } } # G attr(srmr_mplus.group, "nomean") <- srmr_mplus_nomean.group srmr_mplus.group } lav_fit_srmr_twolevel <- function(lavobject = NULL) { nlevels <- lavobject@Data@nlevels G <- lavobject@Data@ngroups SRMR.within <- numeric(G) SRMR.between <- numeric(G) for (g in 1:G) { b.within <- (g - 1L) * nlevels + 1L b.between <- (g - 1L) * nlevels + 2L # OBSERVED # if these change, tag @TDJorgensen in commit message S.within <- lavobject@h1$implied$cov[[b.within]] M.within <- lavobject@h1$implied$mean[[b.within]] S.between <- lavobject@h1$implied$cov[[b.between]] M.between <- lavobject@h1$implied$mean[[b.between]] # ESTIMATED # if these change, tag @TDJorgensen in commit message implied <- lav_model_implied_cond2uncond(lavobject@implied) Sigma.within <- implied$cov[[b.within]] Mu.within <- implied$mean[[b.within]] Sigma.between <- implied$cov[[b.between]] Mu.between <- implied$mean[[b.between]] # force pd for between # S.between <- lav_matrix_symmetric_force_pd(S.between) Sigma.between <- lav_matrix_symmetric_force_pd(Sigma.between) # Bollen approach: simply using cov2cor ('residual correlations') S.within.cor <- cov2cor(S.within) S.between.cor <- cov2cor(S.between) Sigma.within.cor <- cov2cor(Sigma.within) if (all(diag(Sigma.between) > 0)) { Sigma.between.cor <- cov2cor(Sigma.between) } else { Sigma.between.cor <- matrix(as.numeric(NA), nrow = nrow(Sigma.between), ncol = ncol(Sigma.between) ) } R.within.cor <- (S.within.cor - Sigma.within.cor) R.between.cor <- (S.between.cor - Sigma.between.cor) nvar.within <- NCOL(S.within) nvar.between <- NCOL(S.between) pstar.within <- nvar.within * (nvar.within + 1) / 2 pstar.between <- nvar.between * (nvar.between + 1) / 2 # SRMR SRMR.within[g] <- sqrt(sum(lav_matrix_vech(R.within.cor)^2) / pstar.within) SRMR.between[g] <- sqrt(sum(lav_matrix_vech(R.between.cor)^2) / pstar.between) } # adjust for group sizes ng <- unlist(lavobject@SampleStats@nobs) # if this changes, tag @TDJorgensen in commit message ntotal <- lavobject@SampleStats@ntotal # if this changes, tag @TDJorgensen in commit message SRMR_WITHIN <- sum(ng / ntotal * SRMR.within) SRMR_BETWEEN <- sum(ng / ntotal * SRMR.between) SRMR_TOTAL <- SRMR_WITHIN + SRMR_BETWEEN c(SRMR_TOTAL, SRMR_WITHIN, SRMR_BETWEEN) } lav_fit_srmr_lavobject <- function(lavobject = NULL, fit.measures = "rmsea") { # check lavobject stopifnot(inherits(lavobject, "lavaan")) # categorical? categorical <- lavobject@Model@categorical # supported fit measures in this function if (categorical) { fit.srmr <- c("srmr") fit.srmr2 <- c( "rmr", "rmr_nomean", "srmr", # per default equal to srmr_bentler_nomean "srmr_bentler", "srmr_bentler_nomean", "crmr", "crmr_nomean", "srmr_mplus", "srmr_mplus_nomean" ) } else { if (lavobject@Data@nlevels > 1L) { fit.srmr <- c("srmr", "srmr_within", "srmr_between") fit.srmr2 <- c("srmr", "srmr_within", "srmr_between") } else { fit.srmr <- c("srmr") fit.srmr2 <- c( "rmr", "rmr_nomean", "srmr", # the default "srmr_bentler", "srmr_bentler_nomean", "crmr", "crmr_nomean", "srmr_mplus", "srmr_mplus_nomean" ) } } # which one do we need? if (missing(fit.measures)) { # default set fit.measures <- fit.srmr } else { # remove any not-SRMR related index from fit.measures rm.idx <- which(!fit.measures %in% fit.srmr2) if (length(rm.idx) > 0L) { fit.measures <- fit.measures[-rm.idx] } if (length(fit.measures) == 0L) { return(list()) } } # output container indices <- list() # 1. single level if (lavobject@Data@nlevels == 1L) { # RMR/SRMR/CRMR: we get it from lav_residuals_summary() out <- lav_residuals_summary(lavobject, se = FALSE, unbiased = FALSE) cov.cor <- "cov" if (categorical) { cov.cor <- "cor" } # only cov rmr_nomean.group <- sapply(lapply(out, "[[", "rmr"), "[[", cov.cor) srmr_nomean.group <- sapply(lapply(out, "[[", "srmr"), "[[", cov.cor) crmr_nomean.group <- sapply(lapply(out, "[[", "crmr"), "[[", cov.cor) # total if (lavobject@Model@meanstructure) { rmr.group <- sapply(lapply(out, "[[", "rmr"), "[[", "total") srmr.group <- sapply(lapply(out, "[[", "srmr"), "[[", "total") crmr.group <- sapply(lapply(out, "[[", "crmr"), "[[", "total") } else { # no 'total', only 'cov' rmr.group <- rmr_nomean.group srmr.group <- srmr_nomean.group crmr.group <- crmr_nomean.group } # the Mplus versions srmr_mplus.group <- lav_fit_srmr_mplus(lavobject = lavobject) srmr_mplus_nomean.group <- attr(srmr_mplus.group, "nomean") attr(srmr_mplus.group, "nomean") <- NULL # adjust for group sizes ng <- unlist(lavobject@SampleStats@nobs) # if this changes, tag @TDJorgensen in commit message ntotal <- lavobject@SampleStats@ntotal # if this changes, tag @TDJorgensen in commit message RMR <- sum(ng / ntotal * rmr.group) RMR_NOMEAN <- sum(ng / ntotal * rmr_nomean.group) SRMR_BENTLER <- sum(ng / ntotal * srmr.group) SRMR_BENTLER_NOMEAN <- sum(ng / ntotal * srmr_nomean.group) CRMR <- sum(ng / ntotal * crmr.group) CRMR_NOMEAN <- sum(ng / ntotal * crmr_nomean.group) SRMR_MPLUS <- sum(ng / ntotal * srmr_mplus.group) SRMR_MPLUS_NOMEAN <- sum(ng / ntotal * srmr_mplus_nomean.group) # srmr if (lavobject@Options$mimic %in% c("lavaan", "EQS")) { if (categorical) { indices["srmr"] <- SRMR_BENTLER_NOMEAN } else { indices["srmr"] <- SRMR_BENTLER } } else if (lavobject@Options$mimic == "Mplus") { if (lavobject@Options$information[1] == "expected") { if (categorical) { indices["srmr"] <- SRMR_BENTLER_NOMEAN } else { indices["srmr"] <- SRMR_BENTLER } } else { if (categorical) { indices["srmr"] <- SRMR_MPLUS_NOMEAN } else { indices["srmr"] <- SRMR_MPLUS } } } # Mplus only # the others indices["srmr_bentler"] <- SRMR_BENTLER indices["srmr_bentler_nomean"] <- SRMR_BENTLER_NOMEAN indices["crmr"] <- CRMR indices["crmr_nomean"] <- CRMR_NOMEAN # only correct for non-categorical: if (lavobject@Model@categorical) { # FIXME! Compute Mplus 8.1 way to compute SRMR in the # categorical setting # See 'SRMR in Mplus (2018)' document on Mplus website indices["srmr_mplus"] <- as.numeric(NA) indices["srmr_mplus_nomean"] <- as.numeric(NA) } else { indices["srmr_mplus"] <- SRMR_MPLUS indices["srmr_mplus_nomean"] <- SRMR_MPLUS_NOMEAN } indices["rmr"] <- RMR indices["rmr_nomean"] <- RMR_NOMEAN } else { # 2. twolevel setting out <- lav_fit_srmr_twolevel(lavobject = lavobject) indices["srmr"] <- out[1] indices["srmr_within"] <- out[2] indices["srmr_between"] <- out[3] } # twolevel # return only those that were requested indices[fit.measures] } lavaan/R/lav_bvmix.R0000644000176200001440000003327614627656441014061 0ustar liggesusers# the weighted bivariate ordinal/linear model # YR 08 March 2020 (replacing the old lav_polyserial.R routines) # # - polyserial (and biserial) correlations # - bivariate ordinal/linear regression # - using sampling weights wt # polyserial correlation # # Y1 = linear # Y2 = ordinal lav_bvmix_cor_twostep_fit <- function(Y1, Y2, eXo = NULL, wt = NULL, fit.y1 = NULL, fit.y2 = NULL, Y1.name = NULL, Y2.name = NULL, optim.method = "nlminb1", # 0.6-7 optim.scale = 1.0, init.theta = NULL, control = list()) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # create cache environment cache <- lav_bvmix_init_cache(fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt) # optim.method minObjective <- lav_bvmix_min_objective minGradient <- lav_bvmix_min_gradient minHessian <- lav_bvmix_min_hessian if (optim.method == "nlminb" || optim.method == "nlminb2") { # nothing to do } else if (optim.method == "nlminb0") { minGradient <- minHessian <- NULL } else if (optim.method == "nlminb1") { minHessian <- NULL } # optimize if (is.null(control$trace)) { control$trace <- ifelse(lav_verbose(), 1, 0) } # init theta? if (!is.null(init.theta)) { start.x <- init.theta } else { start.x <- cache$theta } # try 1 optim <- nlminb( start = start.x, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control, scale = optim.scale, lower = -0.995, upper = +0.995, cache = cache ) # try 2 if (optim$convergence != 0L) { optim <- nlminb( start = start.x, objective = minObjective, gradient = NULL, hessian = NULL, control = control, scale = optim.scale, lower = -0.995, upper = +0.995, cache = cache ) } # try 3 if (optim$convergence != 0L) { optim <- nlminb( start = 0, objective = minObjective, gradient = NULL, hessian = NULL, control = control, scale = 10, lower = -0.995, upper = +0.995, cache = cache ) } # try 4 -- new in 0.6-8 if (optim$convergence != 0L) { optim <- optimize( f = minObjective, interval = c(-0.995, +0.995), cache = cache, tol = .Machine$double.eps ) if (is.finite(optim$minimum)) { optim$convergence <- 0L optim$par <- optim$minimum } } # check convergence if (optim$convergence != 0L) { if (!is.null(Y1.name) && !is.null(Y2.name)) { lav_msg_warn(gettextf( "estimation polyserial correlation did not converge for variables %1$s and %2$s", Y1.name, Y2.name)) } else { lav_msg_warn(gettext( "estimation polyserial correlation(s) did not always converge")) } rho <- cache$theta # starting value } else { rho <- optim$par } rho } # Y1 = linear # Y2 = ordinal lav_bvmix_init_cache <- function(fit.y1 = NULL, fit.y2 = NULL, wt = NULL, scores = FALSE, parent = parent.frame()) { # data Y1 <- fit.y1$y Y2 <- fit.y2$y eXo <- fit.y1$X # extract parameters # Y1 y1.VAR <- fit.y1$theta[fit.y1$var.idx] y1.SD <- sqrt(y1.VAR) y1.ETA <- fit.y1$yhat Z <- (Y1 - y1.ETA) / y1.SD # Y2 th.y2 <- fit.y2$theta[fit.y2$th.idx] # exo? if (is.null(eXo)) { nexo <- 0L } else { nexo <- ncol(eXo) } # nobs if (is.null(wt)) { N <- length(Y1) } else { N <- sum(wt) } # starting value -- Olsson 1982 eq 38 if (nexo > 0L) { # exo if (is.null(wt)) { COR <- cor(Z, Y2, use = "pairwise.complete.obs") SD <- sd(Y2, na.rm = TRUE) * sqrt((N - 1) / N) } else { tmp <- na.omit(cbind(Z, Y2, wt)) COR <- cov.wt(x = tmp[, 1:2], wt = tmp[, 3], cor = TRUE)$cor[2, 1] SD <- sqrt(lav_matrix_var_wt(tmp[, 2], wt = tmp[, 3])) } rho.init <- (COR * SD / sum(dnorm(th.y2))) } else { # no exo if (is.null(wt)) { COR <- cor(Y1, Y2, use = "pairwise.complete.obs") SD <- sd(Y2, na.rm = TRUE) * sqrt((N - 1) / N) } else { tmp <- na.omit(cbind(Y1, Y2, wt)) COR <- cov.wt(x = tmp[, 1:2], wt = tmp[, 3], cor = TRUE)$cor[2, 1] SD <- sqrt(lav_matrix_var_wt(tmp[, 2], wt = tmp[, 3])) } rho.init <- (COR * SD / sum(dnorm(th.y2))) } # sanity check if (is.na(rho.init)) { rho.init <- 0.0 } else if (abs(rho.init) > 0.9) { rho.init <- rho.init / 2 } # parameter vector theta <- rho.init # only # different cache if scores or not if (scores) { out <- list2env( list( nexo = nexo, theta = theta, N = N, y1.VAR = y1.VAR, eXo = eXo, y2.Y1 = fit.y2$Y1, y2.Y2 = fit.y2$Y2, Y1 = Y1, y1.SD = y1.SD, y1.ETA = y1.ETA, Z = Z, fit.y2.z1 = fit.y2$z1, fit.y2.z2 = fit.y2$z2 ), parent = parent ) } else { out <- list2env( list( nexo = nexo, theta = theta, N = N, Y1 = Y1, y1.SD = y1.SD, y1.ETA = y1.ETA, Z = Z, fit.y2.z1 = fit.y2$z1, fit.y2.z2 = fit.y2$z2 ), parent = parent ) } out } # casewise likelihoods, unweighted! lav_bvmix_lik_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] R <- sqrt(1 - rho * rho) # p(Y2|Y1) tauj.star <- (fit.y2.z1 - rho * Z) / R tauj1.star <- (fit.y2.z2 - rho * Z) / R py2y1 <- pnorm(tauj.star) - pnorm(tauj1.star) # TODO, check when to use 1 - pnorm() py2y1[py2y1 < .Machine$double.eps] <- .Machine$double.eps # p(Y1) py1 <- dnorm(Y1, mean = y1.ETA, sd = y1.SD) # lik lik <- py1 * py2y1 # catch very small values lik.toosmall.idx <- which(lik < sqrt(.Machine$double.eps)) lik[lik.toosmall.idx] <- as.numeric(NA) return(lik) }) } lav_bvmix_logl_cache <- function(cache = NULL) { with(cache, { lik <- lav_bvmix_lik_cache(cache) # unweighted! if (!is.null(wt)) { logl <- sum(wt * log(lik), na.rm = TRUE) } else { logl <- sum(log(lik), na.rm = TRUE) } return(logl) }) } lav_bvmix_gradient_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] y.Z1 <- dnorm(tauj.star) y.Z2 <- dnorm(tauj1.star) pyx.inv.R3 <- 1 / (py2y1 * R * R * R) # rho d1 <- fit.y2.z1 * rho - Z d2 <- fit.y2.z2 * rho - Z dx <- pyx.inv.R3 * (y.Z1 * d1 - y.Z2 * d2) # to be consistent with (log)lik_cache if (length(lik.toosmall.idx) > 0L) { dx[lik.toosmall.idx] <- as.numeric(NA) } if (is.null(wt)) { dx.rho <- sum(dx, na.rm = TRUE) } else { dx.rho <- sum(wt * dx, na.rm = TRUE) } return(dx.rho) }) } # YR 29 March 2020 # obtained by using 'Deriv' (from package Deriv) on the # gradient function, and cleaning up # correct, but not good enough lav_bvmix_hessian_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] R2 <- R * R t1 <- Z - rho * tauj.star / R t2 <- Z - rho * tauj1.star / R tmp <- (y.Z1 * (d1 * ((3 * rho / R2) + tauj.star * t1 / R) + fit.y2.z1 + dx * R2 * t1) - y.Z2 * (d2 * ((3 * rho / R2) + tauj1.star * t2 / R) + fit.y2.z2 + dx * R2 * t2) ) # to be consistent with (log)lik_cache if (length(lik.toosmall.idx) > 0L) { tmp[lik.toosmall.idx] <- as.numeric(NA) } if (is.null(wt)) { H <- sum(tmp * pyx.inv.R3, na.rm = TRUE) } else { H <- sum(wt * (tmp * pyx.inv.R3), na.rm = TRUE) } dim(H) <- c(1L, 1L) # for nlminb return(H) }) } # compute total (log)likelihood, for specific 'x' (nlminb) lav_bvmix_min_objective <- function(x, cache = NULL) { cache$theta <- x -1 * lav_bvmix_logl_cache(cache = cache) / cache$N } # compute gradient, for specific 'x' (nlminb) lav_bvmix_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_bvmix_logl_cache(cache = cache) } -1 * lav_bvmix_gradient_cache(cache = cache) / cache$N } # compute hessian, for specific 'x' (nlminb) lav_bvmix_min_hessian <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { tmp <- lav_bvmix_logl_cache(cache = cache) tmp <- lav_bvmix_gradient_cache(cache = cache) } -1 * lav_bvmix_hessian_cache(cache = cache) / cache$N } lav_bvmix_cor_scores_cache <- function(cache = NULL, sigma.correction = FALSE, na.zero = FALSE) { with(cache, { rho <- theta[1L] R <- sqrt(1 - rho * rho) tauj.star <- (fit.y2.z1 - rho * Z) / R tauj1.star <- (fit.y2.z2 - rho * Z) / R y.Z1 <- dnorm(tauj.star) y.Z2 <- dnorm(tauj1.star) # p(Y2|Y1) py2y1 <- pnorm(tauj.star) - pnorm(tauj1.star) py2y1[py2y1 < .Machine$double.eps] <- .Machine$double.eps pyx.inv <- 1 / py2y1 # mu.y1 y.Z1.y.Z2 <- y.Z1 - y.Z2 dx.mu.y1 <- 1 / y1.SD * (Z + (pyx.inv * (rho / R) * y.Z1.y.Z2)) if (!is.null(wt)) { dx.mu.y1 <- wt * dx.mu.y1 } # var.y1 dx.var.y1 <- 1 / (2 * y1.VAR) * (((Z * Z) - 1) + (pyx.inv * rho * Z / R) * y.Z1.y.Z2) if (!is.null(wt)) { dx.var.y1 <- wt * dx.var.y1 } # th.y2 dx.th.y2 <- (y2.Y1 * y.Z1 - y2.Y2 * y.Z2) * 1 / R * pyx.inv if (!is.null(wt)) { dx.th.y2 <- wt * dx.th.y2 } # sl.y1 dx.sl.y1 <- NULL if (nexo > 0L) { dx.sl.y1 <- dx.mu.y1 * eXo # if(!is.null(wt)) { # dx.mu.y1 had already been weighted # } } # sl.y2 dx.sl.y2 <- NULL if (nexo > 0L) { dx.sl.y2 <- (y.Z2 - y.Z1) * eXo * 1 / R * pyx.inv if (!is.null(wt)) { dx.sl.y2 <- wt * dx.sl.y2 } } # rho TAUj <- y.Z1 * (fit.y2.z1 * rho - Z) TAUj1 <- y.Z2 * (fit.y2.z2 * rho - Z) dx.rho <- pyx.inv * 1 / (R * R * R) * (TAUj - TAUj1) if (!is.null(wt)) { dx.rho <- wt * dx.rho } # FIXME: only tested for non_exo! # used by pml_deriv1() if (sigma.correction) { dx.rho.orig <- dx.rho dx.var.y1.orig <- dx.var.y1 # sigma dx.rho <- dx.rho.orig / y1.SD # var COV <- rho * y1.SD dx.var.y1 <- (dx.var.y1.orig - 1 / 2 * COV / y1.VAR * 1 / y1.SD * dx.rho.orig) } out <- list( dx.mu.y1 = dx.mu.y1, dx.var.y1 = dx.var.y1, dx.th.y2 = dx.th.y2, dx.sl.y1 = dx.sl.y1, dx.sl.y2 = dx.sl.y2, dx.rho = dx.rho ) return(out) }) } # casewise scores # # Y1 = linear # Y2 = ordinal lav_bvmix_cor_scores <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, th.y2 = NULL, sl.y2 = NULL, sigma.correction = FALSE, na.zero = FALSE) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update z1/z2 if needed (used in pml_deriv1() in lav_model_gradient_pml.R) fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvmix_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho SC <- lav_bvmix_cor_scores_cache( cache = cache, sigma.correction = sigma.correction, na.zero = na.zero ) SC } # logl - no cache lav_bvmix_logl <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, th.y2 = NULL, sl.y2 = NULL) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update z1/z2 if needed (used in pml_deriv1() in lav_model_gradient_pml.R) fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvmix_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho lav_bvmix_logl_cache(cache = cache) } # lik - no cache lav_bvmix_lik <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, th.y2 = NULL, sl.y2 = NULL, .log = FALSE) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update z1/z2 if needed (used in pml_deriv1() in lav_model_gradient_pml.R) fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvmix_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho lik <- lav_bvmix_lik_cache(cache = cache) # unweighted if (.log) { lik <- log(lik) } if (!is.null(wt)) { if (.log) { lik <- wt * lik } else { tmp <- wt * log(lik) lik <- exp(tmp) } } lik } lavaan/R/lav_lavaan_step14_test.R0000644000176200001440000000450414627656441016425 0ustar liggesuserslav_lavaan_step14_test <- function(lavoptions = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavpartable = NULL, lavcache = NULL, lavimplied = NULL, lavh1 = NULL, x = NULL, VCOV = NULL, # nolint lavloglik = NULL) { # # # # # # # # # # # # # 14. lavtest # # # # # # # # # # # # # # if lavoptions$test != "none" and x converged # compute lavtest via lav_model_test(...) # else # lavtest <- list(list(test = "none", stat = NA, # stat.group = rep(NA, lavdata@ngroups), # df = NA, refdistr = "unknown", pvalue = NA)) lavtest <- NULL if (!(length(lavoptions$test) == 1L && lavoptions$test == "none") && attr(x, "converged")) { if (lav_verbose()) { cat("computing TEST for test(s) =", lavoptions$test, "...") } lavtest <- lav_model_test( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, x = x, VCOV = VCOV, lavdata = lavdata, lavcache = lavcache, lavloglik = lavloglik ) if (lav_verbose()) { cat(" done.\n") } } else { lavtest <- list(list( test = "none", stat = NA, stat.group = rep(NA, lavdata@ngroups), df = NA, refdistr = "unknown", pvalue = NA )) } lavtest } lav_lavaan_step14_fit <- function(lavpartable = NULL, lavmodel = NULL, lavimplied = NULL, x = NULL, VCOV = NULL, # nolint lavtest = NULL) { # # # # # # # # # # # # # # 14bis. lavfit # # -> remove if the offending packages are fixed!! # # # # # # # # # # # # lavfit <- lav_model_fit( lavpartable = lavpartable, lavmodel = lavmodel, lavimplied = lavimplied, x = x, VCOV = VCOV, TEST = lavtest ) # lavfit <- new("Fit") lavfit } lavaan/R/lav_optim_nlminb_constr.R0000644000176200001440000001640014627656441017001 0ustar liggesusers# constrained optimization # - references: * Nocedal & Wright (2006) Chapter 17 # * Optimization with constraints by Madsen, Nielsen & Tingleff # * original papers: Powell, 1969 and Rockafeller, 1974 # - using 'nlminb' for the unconstrained subproblem # - convergence scheme is based on the auglag function in the alabama package nlminb.constr <- function(start, objective, gradient = NULL, hessian = NULL, ..., scale = 1, control = list(), lower = -Inf, upper = Inf, ceq = NULL, ceq.jac = NULL, cin = NULL, cin.jac = NULL, control.outer = list()) { # we need a gradient stopifnot(!is.null(gradient)) # if no 'ceq' or 'cin' function, we create a dummy one if (is.null(ceq)) { ceq <- function(x, ...) { return(numeric(0)) } } if (is.null(cin)) { cin <- function(x, ...) { return(numeric(0)) } } # if no user-supplied jacobian functions, create them if (is.null(ceq.jac)) { if (is.null(ceq)) { ceq.jac <- function(x, ...) { matrix(0, nrow = 0L, ncol = length(x)) } } else { ceq.jac <- function(x, ...) { numDeriv::jacobian(func = ceq, x = x, ...) } } } if (is.null(cin.jac)) { if (is.null(cin)) { cin.jac <- function(x, ...) { matrix(0, nrow = 0L, ncol = length(x)) } } else { cin.jac <- function(x, ...) { numDeriv::jacobian(func = cin, x = x, ...) } } } # how many ceq and cin constraints? nceq <- length(ceq(start)) ncin <- length(cin(start)) ncon <- nceq + ncin ceq.idx <- cin.idx <- integer(0) if (nceq > 0L) ceq.idx <- 1:nceq if (ncin > 0L) cin.idx <- nceq + 1:ncin cin.flag <- rep(FALSE, length(ncon)) if (ncin > 0L) cin.flag[cin.idx] <- TRUE # control outer default values control.outer.default <- list( mu0 = 100, lambda0 = 10, tol = 1e-06, # changed this in 0.4-12 itmax = 100L, verbose = FALSE ) control.outer <- modifyList(control.outer.default, control.outer) # construct augmented lagrangian function auglag <- function(x, ...) { # apply constraints ceq0 <- ceq(x, ...) cin0 <- cin(x, ...) con0 <- c(ceq0, cin0) # 'release' inactive constraints if (ncin > 0L) { slack <- lambda / mu inactive.idx <- which(cin.flag & con0 > slack) con0[inactive.idx] <- slack[inactive.idx] } objective(x, ...) - sum(lambda * con0) + (mu / 2) * sum(con0 * con0) } fgrad <- function(x, ...) { # apply constraints ceq0 <- ceq(x, ...) cin0 <- cin(x, ...) con0 <- c(ceq0, cin0) # jacobian JAC <- rbind(ceq.jac(x, ...), cin.jac(x, ...)) lambda.JAC <- lambda * JAC # handle inactive constraints if (ncin > 0L) { slack <- lambda / mu inactive.idx <- which(cin.flag & con0 > slack) if (length(inactive.idx) > 0L) { JAC <- JAC[-inactive.idx, , drop = FALSE] lambda.JAC <- lambda.JAC[-inactive.idx, , drop = FALSE] con0 <- con0[-inactive.idx] } } if (nrow(JAC) > 0L) { (gradient(x, ...) - colSums(lambda.JAC) + mu * as.numeric(t(JAC) %*% con0)) } else { gradient(x, ...) } } # initialization ceq0 <- ceq(start, ...) cin0 <- cin(start, ...) con0 <- c(ceq0, cin0) lambda <- rep(control.outer$lambda0, length(con0)) mu <- control.outer$mu0 inactive.idx <- integer(0) if (ncin > 0L) { slack <- lambda / mu inactive.idx <- which(cin.flag & con0 > slack) con0[inactive.idx] <- slack[inactive.idx] } K <- max(abs(con0)) if (control.outer$verbose) { cat("init cin0 values: ", cin0, "\n") cat("init ceq0 values: ", ceq0, "\n") cat("init slack values: ", lambda / mu, "\n") cat("init inactive idx: ", inactive.idx, "\n") cat("init con0 values: ", con0, "\n") cat("K = max con0: ", K, "\n") } r <- obj <- objective(start, ...) feval <- 0L geval <- 0L niter <- 0L ilack <- 0L Kprev <- K mu0 <- control.outer$mu0 / Kprev if (is.infinite(mu0)) mu0 <- 1.0 mu <- mu0 K <- Inf x.par <- start for (i in 1:control.outer$itmax) { x.old <- x.par r.old <- r ############################################################ if (control.outer$verbose) { cat("\nStarting inner optimization [", i, "]:\n") cat("lambda: ", lambda, "\n") cat("mu: ", mu, "\n") } optim.out <- nlminb( start = x.par, objective = auglag, gradient = fgrad, control = control, lower = lower, upper = upper, scale = scale, ... ) ############################################################ x.par <- optim.out$par r <- optim.out$objective feval <- feval + optim.out$evaluations[1] geval <- geval + optim.out$evaluations[2] niter <- niter + optim.out$iterations # check constraints ceq0 <- ceq(x.par, ...) cin0 <- cin(x.par, ...) con0 <- c(ceq0, cin0) if (ncin > 0L) { slack <- lambda / mu inactive.idx <- which(cin.flag & con0 > slack) con0[inactive.idx] <- slack[inactive.idx] } K <- max(abs(con0)) if (control.outer$verbose) { cat("cin0 values: ", cin0, "\n") cat("ceq0 values: ", ceq0, "\n") cat("active threshold: ", lambda / mu, "\n") cat("inactive idx: ", inactive.idx, "\n") cat("con0 values: ", con0, "\n") cat("K = max con0: ", K, " Kprev = ", Kprev, "\n") } # update K or mu (see Powell, 1969) if (K <= Kprev / 4) { lambda <- lambda - (mu * con0) Kprev <- K } else { mu <- 10 * mu } # check convergence pconv <- max(abs(x.par - x.old)) if (pconv < control.outer$tol) { ilack <- ilack + 1L } else { ilack <- 0L } if ((is.finite(r) && is.finite(r.old) && abs(r - r.old) < control.outer$tol && K < control.outer$tol) | ilack >= 3) { break } } # output a <- list() if (i == control.outer$itmax) { a$convergence <- 10L a$message <- "nlminb.constr ran out of iterations and did not converge" } else if (K > control.outer$tol) { a$convergence <- 11L a$message <- "Convergence due to lack of progress in parameter updates" } else { a$convergence <- 0L a$message <- "converged" } a$par <- optim.out$par a$outer.iterations <- i a$lambda <- lambda a$mu <- mu # a$value <- objective(a$start, ...) # a$cin <- cin(a$start, ...) # a$ceq <- ceq(a$start, ...) a$evaluations <- c(feval, geval) a$iterations <- niter # a$kkt1 <- max(abs(a$fgrad)) <= 0.01 * (1 + abs(a$value)) # a$kkt2 <- any(eigen(a$hessian)$value * control.optim$objectivescale> 0) # jacobian of ceq and 'active' cin ceq0 <- ceq(a$par, ...) cin0 <- cin(a$par, ...) con0 <- c(ceq0, cin0) JAC <- rbind(ceq.jac(a$par, ...), cin.jac(a$par, ...)) inactive.idx <- integer(0L) cin.idx <- which(cin.flag) # ceq.idx <- which(!cin.flag) if (ncin > 0L) { # FIXME: slack value not too strict?? slack <- 1e-05 # cat("DEBUG:\n"); print(con0) inactive.idx <- which(cin.flag & con0 > slack) # if(length(inactive.idx) > 0L) { # JAC <- JAC[-inactive.idx,,drop=FALSE] # } } attr(JAC, "inactive.idx") <- inactive.idx attr(JAC, "cin.idx") <- cin.idx attr(JAC, "ceq.idx") <- ceq.idx a$con.jac <- JAC a } lavaan/R/lav_optim_noniter.R0000644000176200001440000000701114627656441015606 0ustar liggesusers# YR 19 September 2022 # # Entry function to handle noniterative estimators lav_optim_noniter <- function(lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) # no support for many things: if (lavmodel@ngroups > 1L) { lav_msg_stop(gettext( "multiple groups not supported (yet) with optim.method = 'NONITER'.")) } if (lavdata@nlevels > 1L) { lav_msg_stop(gettext( "multilevel not supported (yet) with optim.method = 'NONITER'.")) } # no support (yet) for nonlinear constraints nonlinear.idx <- c( lavmodel@ceq.nonlinear.idx, lavmodel@cin.nonlinear.idx ) if (length(nonlinear.idx) > 0L) { lav_msg_stop(gettext( "nonlinear constraints not supported (yet) with optim.method = 'NONITER'." )) } # no support (yet) for inequality constraints if (!is.null(body(lavmodel@cin.function))) { lav_msg_stop(gettext( "inequality constraints not supported (yet) with optim.method = 'NONITER'." )) } # no support (yet) for equality constraints if (length(lavmodel@ceq.linear.idx) > 0L) { lav_msg_stop(gettext( "equality constraints not supported (yet) with optim.method = 'NONITER'." )) } # extract current set of free parameters x.old <- lav_model_get_parameters(lavmodel) npar <- length(x.old) # fabin? ok.flag <- FALSE if (lavoptions$estimator %in% c("FABIN2", "FABIN3")) { x <- try(lav_cfa_fabin_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE) } else if (lavoptions$estimator == "MGM") { x <- try(lav_cfa_guttman1952_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE) } else if (lavoptions$estimator == "BENTLER1982") { x <- try(lav_cfa_bentler1982_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE) } else if (lavoptions$estimator %in% c("JS", "JSA")) { x <- try(lav_cfa_jamesstein_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavdata = lavdata, lavoptions = lavoptions ), silent = TRUE) } else if (lavoptions$estimator == "BENTLER1982") { x <- try(lav_cfa_bentler1982_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE) } else if (lavoptions$estimator == "MIIV") { x <- try(lav_sem_miiv_internal( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE) } else { lav_msg_warn( gettextf("unknown (noniterative) estimator: %s (returning starting values)", lavoptions$estimator) ) } if (inherits(x, "try-error")) { x <- x.old } else { ok.flag <- TRUE } # closing fx <- 0 attr(fx, "fx.group") <- rep(0, lavmodel@ngroups) if (ok.flag) { attr(x, "converged") <- TRUE attr(x, "warn.txt") <- "" } else { attr(x, "converged") <- FALSE attr(x, "warn.txt") <- "noniterative estimation failed" } attr(x, "iterations") <- 1L attr(x, "control") <- list() attr(x, "fx") <- fx x } lavaan/R/lav_test_print.R0000644000176200001440000002257714627656441015131 0ustar liggesusers# print 'blocks' of test statistics # - blocks with 'scaling.factors' come first (in 'two columns') # - then come the 'single-column' test statistics (eg browne.residual.adf) # - print additional informatiation (eg information matrix, h1.information, ...) # if they deviate from what is used for the standard errors # this is used by the summary() function and lavTest(, output = "text") lav_test_print <- function(object, nd = 3L) { # object is list of tests TEST <- object # empty list? if (is.null(TEST) || length(TEST) == 0L || !is.list(TEST)) { return(character(0L)) } # test = "none"? if (TEST[[1]]$test == "none") { return(character(0L)) } # meta data info <- attr(object, "info") ngroups <- info$ngroups group.label <- info$group.label information <- info$information h1.information <- info$h1.information observed.information <- info$observed.information # num format num.format <- paste("%", max(8L, nd + 5L), ".", nd, "f", sep = "") # header cat("Model Test User Model:\n") # locate 'robust' tests (here: having a scaling factor) has.no.scaling <- unname(sapply( (lapply(TEST, "[[", "scaling.factor")), is.null )) robust.idx <- which(!has.no.scaling) non.robust.idx <- which(has.no.scaling) scaled.idx <- 1L if (length(robust.idx) > 0L) { scaled.idx <- which(names(TEST) == TEST[[robust.idx[1]]]$scaled.test) if (length(scaled.idx) == 0L) { scaled.idx <- 1L } # remove 'scaled.test', because it is shown together with robust non.robust.idx <- non.robust.idx[-scaled.idx] } BLOCKS <- c(robust.idx, non.robust.idx) nBlocks <- length(BLOCKS) # print out blocks for (block in BLOCKS) { # one or two-columns for this block? if (length(robust.idx) > 0L && block %in% robust.idx) { twocolumn <- TRUE } else { twocolumn <- FALSE } if (!twocolumn) { # print label c1 <- c2 <- c3 <- character(0L) if (!is.null(TEST[[block]]$label)) { c1 <- c(c1, TEST[[block]]$label) c2 <- c(c2, "") c3 <- c(c3, "") } if (is.na(TEST[[block]]$df) || TEST[[block]]$df == 0L) { c1 <- c(c1, c("Test statistic", "Degrees of freedom")) c2 <- c(c2, c( sprintf(num.format, TEST[[block]]$stat), ifelse(TEST[[block]]$df %% 1 == 0, # integer TEST[[block]]$df, sprintf(num.format, TEST[[block]]$df) ) )) c3 <- c(c3, c("", "")) } else { PLABEL <- "P-value" if (!is.null(TEST[[block]]$refdistr)) { if (TEST[[block]]$refdistr == "chisq") { PLABEL <- "P-value (Chi-square)" } else if (TEST[[block]]$refdistr == "unknown") { PLABEL <- "P-value (Unknown)" } else if (TEST[[block]]$refdistr == "bootstrap") { PLABEL <- "P-value (Bollen-Stine bootstrap)" } } c1 <- c(c1, c("Test statistic", "Degrees of freedom", PLABEL)) c2 <- c(c2, c( sprintf(num.format, TEST[[block]]$stat), ifelse(TEST[[block]]$df %% 1 == 0, # integer TEST[[block]]$df, sprintf(num.format, TEST[[block]]$df) ), sprintf(num.format, TEST[[block]]$pvalue) )) c3 <- c(c3, c("", "", "")) } # two-column } else { # print label c1 <- c2 <- c3 <- character(0L) if (!is.null(TEST[[scaled.idx]]$label)) { c1 <- c(c1, TEST[[scaled.idx]]$label) c2 <- c(c2, "") c3 <- c(c3, "") } if (is.na(TEST[[block]]$df) || TEST[[block]]$df == 0L) { c1 <- c(c1, c("Test Statistic", "Degrees of freedom")) c2 <- c( c2, c( sprintf(num.format, TEST[[scaled.idx]]$stat), ifelse(TEST[[scaled.idx]]$df %% 1 == 0, # integer TEST[[scaled.idx]]$df, sprintf(num.format, TEST[[scaled.idx]]$df) ) ) ) c3 <- c( c3, c( sprintf(num.format, TEST[[block]]$stat), ifelse(TEST[[block]]$df %% 1 == 0, # integer TEST[[block]]$df, sprintf(num.format, TEST[[block]]$df) ) ) ) } else { if (!is.null(TEST[[scaled.idx]]$refdistr)) { if (TEST[[scaled.idx]]$refdistr == "chisq") { PLABEL <- "P-value (Chi-square)" } else if (TEST[[scaled.idx]]$refdistr == "unknown") { PLABEL <- "P-value (Unknown)" } else { PLABEL <- "P-value" } } c1 <- c(c1, c( "Test Statistic", "Degrees of freedom", PLABEL, "Scaling correction factor" )) c2 <- c( c2, c( sprintf(num.format, TEST[[scaled.idx]]$stat), ifelse(TEST[[scaled.idx]]$df %% 1 == 0, # integer TEST[[scaled.idx]]$df, sprintf(num.format, TEST[[scaled.idx]]$df) ), sprintf(num.format, TEST[[scaled.idx]]$pvalue), "" ) ) c3 <- c( c3, c( sprintf(num.format, TEST[[block]]$stat), ifelse(TEST[[block]]$df %% 1 == 0, # integer TEST[[block]]$df, sprintf(num.format, TEST[[block]]$df) ), sprintf(num.format, TEST[[block]]$pvalue), sprintf(num.format, TEST[[block]]$scaling.factor) ) ) if (TEST[[block]]$test == "scaled.shifted") { if (ngroups == 1L || length(TEST[[block]]$shift.parameter) == 1L) { c1 <- c(c1, "Shift parameter") c2 <- c(c2, "") c3 <- c( c3, sprintf(num.format, TEST[[block]]$shift.parameter) ) } else { c1 <- c(c1, "Shift parameter for each group:") c2 <- c(c2, "") c3 <- c(c3, "") for (g in 1:ngroups) { c1 <- c(c1, sprintf(" %-38s", group.label[[g]])) c2 <- c(c2, "") c3 <- c(c3, sprintf( num.format, TEST[[block]]$shift.parameter[g] )) } } } # shift # which correction factor? c1 <- c(c1, paste(" ", TEST[[block]]$label, sep = "")) c2 <- c(c2, "") c3 <- c(c3, "") } } # if twocolumn, label first row if (twocolumn && block == BLOCKS[1]) { c1 <- c("", c1) c2 <- c("Standard", c2) c3 <- c("Scaled", c3) } else { # empty row c1 <- c("", c1) c2 <- c("", c2) c3 <- c("", c3) } # if information type is different from 'se', print it if (length(information) > 1L && information[1] != information[2]) { c1 <- c(c1, "Information") tmp.txt <- information[2] c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) c3 <- c(c3, "") } # if h1.information type is different from 'se', print it if (length(h1.information) > 1L && h1.information[1] != h1.information[2]) { c1 <- c(c1, "Information saturated (h1) model") tmp.txt <- h1.information[2] c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) c3 <- c(c3, "") } # if observed.information type is different from 'se', print it if (length(observed.information) > 1L && information[2] == "observed" && (observed.information[1] != observed.information[2])) { c1 <- c(c1, "Observed information based on") tmp.txt <- observed.information[2] c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) c3 <- c(c3, "") } # format c1/c2/c3 (note: fitMeasures uses 35/16/8) c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (twocolumn) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) # multiple groups? ngroups <- ngroups if (ngroups > 1L) { c1 <- c2 <- c3 <- character(ngroups) for (g in 1:ngroups) { tmp <- sprintf(" %-40s", group.label[[g]]) c1[g] <- format(tmp, width = 43L) if (!twocolumn) { tmp <- sprintf(num.format, TEST[[block]]$stat.group[g]) c2[g] <- format(tmp, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right" ) } else { tmp <- sprintf(num.format, TEST[[scaled.idx]]$stat.group[g]) c2[g] <- format(tmp, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right" ) tmp <- sprintf(num.format, TEST[[block]]$stat.group[g]) c3[g] <- format(tmp, width = 8L + nd, justify = "right") } } if (twocolumn) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) cat(" Test statistic for each group:\n") write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } } # blocks # invisible(M) } lavaan/R/lav_partable_flat.R0000644000176200001440000006365214627656441015535 0ustar liggesuserslav_partable_flat <- function(FLAT = NULL, # nolint blocks = "group", block.id = NULL, meanstructure = FALSE, int.ov.free = FALSE, int.lv.free = FALSE, orthogonal = FALSE, orthogonal.y = FALSE, orthogonal.x = FALSE, orthogonal.efa = FALSE, std.lv = FALSE, correlation = FALSE, conditional.x = FALSE, fixed.x = TRUE, parameterization = "delta", auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = FALSE, auto.cov.lv.x = FALSE, auto.cov.y = FALSE, auto.th = FALSE, auto.delta = FALSE, auto.efa = FALSE, varTable = NULL, # nolint group.equal = NULL, group.w.free = FALSE, ngroups = 1L, nthresholds = NULL, ov.names.x.block = NULL) { categorical <- FALSE ### tmp.default elements: parameters that are typically not specified by ### users, but should typically be considered, ### either free or fixed # extract `names' of various types of variables: lv.names <- lav_partable_vnames(FLAT, type = "lv") # latent variables # lv.names.r <- lav_partable_vnames(FLAT, type="lv.regular") # regular latent variables lv.names.f <- lav_partable_vnames(FLAT, type = "lv.formative") # formative latent variables ov.names <- lav_partable_vnames(FLAT, type = "ov") # observed variables ov.names.x <- lav_partable_vnames(FLAT, type = "ov.x") # exogenous x covariates lv.names.int <- lav_partable_vnames(FLAT, type = "lv.interaction") # lv interactions if (is.null(ov.names.x.block)) { ov.names.x.block <- ov.names.x } ov.names.nox <- lav_partable_vnames(FLAT, type = "ov.nox") lv.names.x <- lav_partable_vnames(FLAT, type = "lv.x") # exogenous lv ov.names.y <- lav_partable_vnames(FLAT, type = "ov.y") # dependent ov lv.names.y <- lav_partable_vnames(FLAT, type = "lv.y") # dependent lv lv.names.efa <- lav_partable_vnames(FLAT, type = "lv.efa") # lvov.names.y <- c(ov.names.y, lv.names.y) lvov.names.y <- c(lv.names.y, ov.names.y) # get 'ordered' variables, either from FLAT or varTable ov.names.ord1 <- lav_partable_vnames(FLAT, type = "ov.ord") # check if we have "|" for exogenous variables if (length(ov.names.ord1) > 0L) { idx <- which(ov.names.ord1 %in% ov.names.x) if (length(idx) > 0L) { lav_msg_warn(gettext("thresholds are defined for exogenous variables:"), lav_msg_view(ov.names.ord1[idx], "none")) } } # check data if (!is.null(varTable)) { ov.names.ord2 <- as.character(varTable$name[varTable$type == "ordered"]) # remove fixed.x variables idx <- which(ov.names.ord2 %in% ov.names.x) if (length(idx) > 0L) { ov.names.ord2 <- ov.names.ord2[-idx] } # remove those that do appear in the model syntax idx <- which(!ov.names.ord2 %in% ov.names) if (length(idx) > 0L) { ov.names.ord2 <- ov.names.ord2[-idx] } } else { ov.names.ord2 <- character(0L) } # check nthresholds, if it is a named vector ov.names.ord3 <- character(0L) if (!is.null(nthresholds)) { if (!is.null(varTable)) { lav_msg_stop(gettext( "the varTable and nthresholds arguments should not be used together.")) } if (!is.numeric(nthresholds)) { lav_msg_stop(gettext("nthresholds should be a named vector of integers.")) } nth.names <- names(nthresholds) if (!is.null(nth.names)) { ov.names.ord3 <- nth.names } else { # if nthresholds is just a number, all is good; otherwise it # should be a names vector if (length(nthresholds) > 1L) { lav_msg_warn(gettext("nthresholds must be a named vector of integers.")) } # just a single number -> assume ALL y variables are ordered ov.names.ord3 <- ov.names.nox } } # final ov.names.ord tmp <- unique(c(ov.names.ord1, ov.names.ord2, ov.names.ord3)) ov.names.ord <- ov.names[ov.names %in% tmp] # if we have the "|" in the model syntax, check the number of thresholds # if(!is.null(varTable) && length(ov.names.ord1) > 0L) { # for(o in ov.names.ord1) { # nth <- varTable$nlev[ varTable$name == o ] - 1L # nth.in.partable <- sum(FLAT$op == "|" & FLAT$lhs == o) # if(nth != nth.in.partable) { # stop("lavaan ERROR: expected ", max(0,nth), # " threshold(s) for variable ", # sQuote(o), "; syntax contains ", nth.in.partable, "\n") # } # } # } if (length(ov.names.ord) > 0L) { categorical <- TRUE } # std.lv = TRUE, group.equal includes "loadings" # if(ngroups > 1L && std.lv && "loadings" %in% group.equal) { # suggested by Michael Hallquist # in 0.6.3, we gave a warning, # warning("lavaan WARNING: std.lv = TRUE forces all variances to be unity", # " in all groups, despite group.equal = \"loadings\"") # in >0.6.4, we free the lv variances in all but the first group, # } # do we have any EFA lv's? they need special treatment if auto.efa = TRUE if (!is.null(FLAT$efa) && auto.efa) { lv.names.efa <- unique(FLAT$lhs[FLAT$op == "=~" & nchar(FLAT$efa) > 0L]) # remove them from lv.names.x # if(length(lv.names.x) > 0L) { # both.idx <- which(lv.names.x %in% lv.names.efa) # if(length(both.idx) > 0L) { # lv.names.x <- lv.names.x[ -both.idx ] # } # } # remove them from lvov.names.y # if(length(lvov.names.y) > 0L) { # both.idx <- which(lvov.names.y %in% lv.names.efa) # if(length(both.idx) > 0L) { # lvov.names.y <- lvov.names.y[ -both.idx ] # } # } } else { lv.names.efa <- character(0) } lhs <- rhs <- character(0) # 1. THRESHOLDS (based on varTable) # NOTE: - new in 0.5-18: ALWAYS include threshold parameters in partable, # but only free them if auto.th = TRUE # - [only ov.names.ord2, because ov.names.ord1 are already # in tmp.user and we only need to add 'default' parameters here] # (not any longer: we create them for ALL ordered var (0.6-12) nth <- 0L # if(auto.th && length(ov.names.ord2) > 0L) { # if(length(ov.names.ord2) > 0L) { if (length(ov.names.ord) > 0L) { # for(o in ov.names.ord2) { for (o in ov.names.ord) { if (!is.null(varTable)) { nth <- varTable$nlev[varTable$name == o] - 1L } else if (!is.null(nthresholds)) { if (length(nthresholds) == 1L && is.null(nth.names)) { nth <- nthresholds } else { # we can assume nthresholds is a named vector nth <- unname(nthresholds[o]) if (is.na(nth)) { lav_msg_stop(gettextf("ordered variable %s not found in the named vector nthresholds.", o)) } } } if (nth < 1L) next lhs <- c(lhs, rep(o, nth)) rhs <- c(rhs, paste("t", seq_len(nth), sep = "")) } nth <- length(lhs) } # 2. default (residual) variances and covariances # a) (residual) VARIANCES (all ov's except exo, and all lv's) # NOTE: change since 0.5-17: we ALWAYS include the vars in the # parameter table; but only if auto.var = TRUE, we set them free # if(auto.var) { ov.var <- ov.names.nox # auto-remove ordinal variables # idx <- match(ov.names.ord, ov.var) # if(length(idx)) ov.var <- ov.var[-idx] lhs <- c(lhs, ov.var, lv.names) rhs <- c(rhs, ov.var, lv.names) # } # b) `independent` latent variable COVARIANCES (lv.names.x) if (auto.cov.lv.x && length(lv.names.x) > 1L) { tmp <- utils::combn(lv.names.x, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri rhs <- c(rhs, tmp[2, ]) } # c) `dependent` latent variables COVARIANCES (lv.y.idx + ov.y.lv.idx) if (auto.cov.y && length(lvov.names.y) > 1L) { tmp <- utils::combn(lvov.names.y, 2L) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri rhs <- c(rhs, tmp[2, ]) } # d) exogenous x covariates: VARIANCES + COVARIANCES if ((nx <- length(ov.names.x)) > 0L) { if (conditional.x) { # new in 0.6-12: we make a distinction between ov.names.x and # ov.names.x.block: we treat them 'separately' (with no covariances # among them) # but we add 'regressions' instead (see below) ov.names.x1 <- ov.names.x[!ov.names.x %in% ov.names.x.block] ov.names.x2 <- ov.names.x.block nx1 <- length(ov.names.x1) # splitted x nx2 <- length(ov.names.x2) # regular x if (nx1 > 0L) { idx <- lower.tri(matrix(0, nx1, nx1), diag = TRUE) lhs <- c(lhs, rep(ov.names.x1, each = nx1)[idx]) # fill upper.tri rhs <- c(rhs, rep(ov.names.x1, times = nx1)[idx]) } if (nx2 > 0L) { idx <- lower.tri(matrix(0, nx2, nx2), diag = TRUE) lhs <- c(lhs, rep(ov.names.x2, each = nx2)[idx]) # fill upper.tri rhs <- c(rhs, rep(ov.names.x2, times = nx2)[idx]) } } else { idx <- lower.tri(matrix(0, nx, nx), diag = TRUE) lhs <- c(lhs, rep(ov.names.x, each = nx)[idx]) # fill upper.tri rhs <- c(rhs, rep(ov.names.x, times = nx)[idx]) } } # e) efa latent variables COVARIANCES; only needed for 'mediators' # (not in lv.names.x, not in lv.names.y) -- added in 0.6-18 if (auto.efa && length(lv.names.efa) > 1L) { efa.values <- lav_partable_efa_values(FLAT) for (set in efa.values) { # correlated factors within each set this.set.lv <- unique(FLAT$lhs[FLAT$op == "=~" & !FLAT$lhs %in% lv.names.x & !FLAT$lhs %in% lv.names.y & FLAT$efa == set]) if (length(this.set.lv) > 0L) { tmp <- utils::combn(this.set.lv, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri rhs <- c(rhs, tmp[2, ]) } } } # create 'op' (thresholds come first, then variances) op <- rep("~~", length(lhs)) op[seq_len(nth)] <- "|" # LATENT RESPONSE SCALES (DELTA) # NOTE: - new in 0.5-19: ALWAYS include scaling parameters in partable, # but only free them if auto.delta = TRUE (and parameterization # is "delta" # if(auto.delta && auto.th && length(ov.names.ord) > 0L && # # length(lv.names) > 0L && # (ngroups > 1L || any(FLAT$op == "~*~") || parameterization == "theta")) { if (length(ov.names.ord) > 0L) { lhs <- c(lhs, ov.names.ord) rhs <- c(rhs, ov.names.ord) op <- c(op, rep("~*~", length(ov.names.ord))) } # same for correlation structures, but now for ALL variables if (!categorical && correlation) { lhs <- c(lhs, ov.names) rhs <- c(rhs, ov.names) op <- c(op, rep("~*~", length(ov.names))) } # 3. INTERCEPTS if (meanstructure) { # if(conditional.x) { # ov.int <- ov.names.nox # } else { ov.int <- ov.names # } # auto-remove ordinal variables # idx <- which(ov.int %in% ov.names.ord) # if(length(idx)) ov.int <- ov.int[-idx] int.lhs <- c(ov.int, lv.names) lhs <- c(lhs, int.lhs) rhs <- c(rhs, rep("", length(int.lhs))) op <- c(op, rep("~1", length(int.lhs))) } # 4. REGRESSIONS if (conditional.x) { # new in 0.6-12: we make a distinction between ov.names.x and # ov.names.x.block: we treat them 'separately' (with no covariances # among them) # but we add 'regressions' instead! ov.names.x1 <- ov.names.x[!ov.names.x %in% ov.names.x.block] ov.names.x2 <- ov.names.x.block nx1 <- length(ov.names.x1) # splitted x nx2 <- length(ov.names.x2) # regular x if (nx1 > 0L && nx2 > 0L) { # add regressions for splitted-x ~ regular-x lhs <- c(lhs, rep(ov.names.x1, times = nx2)) op <- c(op, rep("~", nx2 * nx1)) rhs <- c(rhs, rep(ov.names.x2, each = nx1)) } } # free group weights if (group.w.free) { lhs <- c(lhs, "group") rhs <- c(rhs, "w") op <- c(op, "%") } tmp.default <- data.frame( lhs = lhs, op = op, rhs = rhs, mod.idx = rep(0L, length(lhs)), stringsAsFactors = FALSE ) # 4. USER: user-specified elements lhs <- FLAT$lhs op <- FLAT$op rhs <- FLAT$rhs mod.idx <- FLAT$mod.idx lv.names <- lav_partable_vnames(FLAT, type = "lv") # latent variables ov.names <- lav_partable_vnames(FLAT, type = "ov") # observed variables tmp.user <- data.frame( lhs = lhs, op = op, rhs = rhs, mod.idx = mod.idx, stringsAsFactors = FALSE ) # check for duplicated elements in tmp.user tmp.tmp <- tmp.user[, 1:3] idx <- which(duplicated(tmp.tmp)) if (length(idx) > 0L) { txt <- sapply(seq_along(idx), function(i) { paste( " ", tmp.tmp[idx[i], "lhs"], tmp.tmp[idx[i], "op"], tmp.tmp[idx[i], "rhs"] ) }) lav_msg_warn(gettext( "duplicated elements in model syntax have been ignored:"), lav_msg_view(txt, "none")) tmp.user <- tmp.user[-idx, ] } # check for duplicated elements in tmp.default # - FIXME: can we not avoid this somehow?? # - for example, if the user model includes 'x1 ~~ x1' # or 'x1 ~ 1' # - remove them from tmp.default tmp.tmp <- rbind(tmp.default[, 1:3], tmp.user[, 1:3]) idx <- which(duplicated(tmp.tmp, fromLast = TRUE)) # idx should be in tmp.default if (length(idx)) { for (i in idx) { flat.idx <- which(tmp.user$lhs == tmp.default$lhs[i] & tmp.user$op == tmp.default$op[i] & tmp.user$rhs == tmp.default$rhs[i]) if (length(flat.idx) != 1L) { cat("[lavaan DEBUG] idx in tmp.tmp: i = ", i, "\n") print(tmp.tmp[i, ]) cat("[lavaan DEBUG] idx in tmp.default: i = ", i, "\n") print(tmp.default[i, ]) cat("[lavaan DEBUG] flat.idx:") print(flat.idx) } } tmp.default <- tmp.default[-idx, ] } # now that we have removed all duplicated elements, we can construct # the tmp.list for a single group/block lhs <- c(tmp.user$lhs, tmp.default$lhs) op <- c(tmp.user$op, tmp.default$op) rhs <- c(tmp.user$rhs, tmp.default$rhs) user <- c( rep(1L, length(tmp.user$lhs)), rep(0L, length(tmp.default$lhs)) ) mod.idx <- c(tmp.user$mod.idx, tmp.default$mod.idx) free <- rep(1L, length(lhs)) ustart <- rep(as.numeric(NA), length(lhs)) # label <- paste(lhs, op, rhs, sep="") label <- rep(character(1), length(lhs)) exo <- rep(0L, length(lhs)) # 0a. if auto.th = FALSE, set fix the thresholds if (!auto.th) { th.idx <- which(op == "|" & user == 0L) free[th.idx] <- 0L } # 0b. if auto.var = FALSE, set the unspecified variances to zero if (!auto.var) { var.idx <- which(op == "~~" & lhs == rhs & user == 0L) ustart[var.idx] <- 0.0 free[var.idx] <- 0L } else if (length(lv.names.f) > 0L) { # 'formative' (residual) variances are set to zero by default var.idx <- which(op == "~~" & lhs == rhs & lhs %in% lv.names.f & user == 0L) ustart[var.idx] <- 0.0 free[var.idx] <- 0L } # 1. fix metric of regular latent variables if (std.lv) { # fix metric by fixing the variance of the latent variable lv.var.idx <- which(op == "~~" & lhs %in% lv.names & lhs == rhs) ustart[lv.var.idx] <- 1.0 free[lv.var.idx] <- 0L } if (auto.efa && length(lv.names.efa) > 0L) { # fix lv variances of efa blocks to unity lv.var.idx <- which(op == "~~" & lhs %in% lv.names.efa & lhs == rhs) ustart[lv.var.idx] <- 1.0 free[lv.var.idx] <- 0L } if (auto.fix.first) { # fix metric by fixing the loading of the first indicator # (but not for efa factors) mm.idx <- which(op == "=~" & !(lhs %in% lv.names.efa)) first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))] ustart[first.idx] <- 1.0 free[first.idx] <- 0L } # 2. fix residual variance of single indicators to zero if (auto.var && auto.fix.single) { mm.idx <- which(op == "=~") tmp.t <- table(lhs[mm.idx]) if (any(tmp.t == 1L)) { # ok, we have a LV with only a single indicator lv.names.single <- names(tmp.t)[tmp.t == 1L] # get corresponding indicator if unique lhs.mm <- lhs[mm.idx] rhs.mm <- rhs[mm.idx] single.ind <- rhs.mm[which(lhs.mm %in% lv.names.single & lhs.mm != rhs.mm & # exclude phantom !(duplicated(rhs.mm) | duplicated(rhs.mm, fromLast = TRUE)))] # is the indicator unique? if (length(single.ind) > 0L) { var.idx <- which(op == "~~" & lhs %in% single.ind & rhs %in% single.ind & lhs == rhs & user == 0L) ustart[var.idx] <- 0.0 free[var.idx] <- 0L } } } # 3. orthogonal = TRUE? if (orthogonal) { lv.cov.idx <- which(op == "~~" & lhs %in% lv.names & rhs %in% lv.names & lhs != rhs & user == 0L) ustart[lv.cov.idx] <- 0.0 free[lv.cov.idx] <- 0L } # 3b. orthogonal.y = TRUE? if (orthogonal.y) { lv.cov.idx <- which(op == "~~" & lhs %in% lv.names.y & rhs %in% lv.names.y & lhs != rhs & user == 0L) ustart[lv.cov.idx] <- 0.0 free[lv.cov.idx] <- 0L } # 3c. orthogonal.x = TRUE? if (orthogonal.x) { lv.cov.idx <- which(op == "~~" & lhs %in% lv.names.x & rhs %in% lv.names.x & lhs != rhs & user == 0L) ustart[lv.cov.idx] <- 0.0 free[lv.cov.idx] <- 0L } # 3d. orthogonal.efa = TRUE? if (orthogonal.efa) { lv.cov.idx <- which(op == "~~" & lhs %in% lv.names.efa & rhs %in% lv.names.efa & lhs != rhs & user == 0L) ustart[lv.cov.idx] <- 0.0 free[lv.cov.idx] <- 0L } # 4. intercepts if (meanstructure) { if (categorical) { # zero intercepts/means ordinal variables ov.int.idx <- which(op == "~1" & lhs %in% ov.names.ord & user == 0L) ustart[ov.int.idx] <- 0.0 free[ov.int.idx] <- 0L } if (int.ov.free == FALSE) { # zero intercepts/means observed variables ov.int.idx <- which(op == "~1" & lhs %in% ov.names & user == 0L) ustart[ov.int.idx] <- 0.0 free[ov.int.idx] <- 0L } if (int.lv.free == FALSE) { # zero intercepts/means latent variables lv.int.idx <- which(op == "~1" & lhs %in% lv.names & user == 0L) ustart[lv.int.idx] <- 0.0 free[lv.int.idx] <- 0L } # 4b. fixed effect (only if we have random slopes) if (!is.null(FLAT$rv) && any(nchar(FLAT$rv) > 0L)) { lv.names.rv <- lav_partable_vnames(FLAT, "lv.rv") lv.rv.idx <- which(op == "~1" & lhs %in% lv.names.rv & user == 0L) ustart[lv.rv.idx] <- as.numeric(NA) free[lv.rv.idx] <- 1L } if (length(lv.names.int) > 0L) { lv.int.idx <- which(op == "~1" & lhs %in% lv.names.int & user == 0L) ustart[lv.int.idx] <- as.numeric(NA) free[lv.int.idx] <- 1L } } # 4b. fixed effect (only if we have random slopes) # if(!is.null(FLAT$rv)) { # } # 5. handle exogenous `x' covariates # usually, ov.names.x.block == ov.names.x # except if multilevel, where 'splitted' ov.x are treated as endogenous # 5a conditional.x = FALSE if (!conditional.x && fixed.x && length(ov.names.x.block) > 0) { # 1. variances/covariances exo.var.idx <- which(op == "~~" & rhs %in% ov.names.x.block & lhs %in% ov.names.x.block & user == 0L) ustart[exo.var.idx] <- as.numeric(NA) # should be overriden later! free[exo.var.idx] <- 0L exo[exo.var.idx] <- 1L # 2. intercepts exo.int.idx <- which(op == "~1" & lhs %in% ov.names.x.block & user == 0L) ustart[exo.int.idx] <- as.numeric(NA) # should be overriden later! free[exo.int.idx] <- 0L exo[exo.int.idx] <- 1L } # 5a-bis. conditional.x = TRUE if (conditional.x && length(ov.names.x) > 0L) { # 1. variances/covariances exo.var.idx <- which(op == "~~" & rhs %in% ov.names.x & lhs %in% ov.names.x & user == 0L) if (fixed.x) { ustart[exo.var.idx] <- as.numeric(NA) # should be overriden later! free[exo.var.idx] <- 0L } exo[exo.var.idx] <- 1L # 2. intercepts exo.int.idx <- which(op == "~1" & lhs %in% ov.names.x & user == 0L) if (fixed.x) { ustart[exo.int.idx] <- as.numeric(NA) # should be overriden later! free[exo.int.idx] <- 0L } exo[exo.int.idx] <- 1L # 3. regressions ov + lv exo.reg.idx <- which(op %in% c("~", "<~") & lhs %in% c(lv.names, ov.names.nox) & rhs %in% ov.names.x) exo[exo.reg.idx] <- 1L # 3b regression splitted.x ~ regular.x exo.reg2.idx <- which(op %in% c("~", "<~") & lhs %in% ov.names.x & rhs %in% ov.names.x) if (fixed.x) { ustart[exo.reg2.idx] <- as.numeric(NA) # should be overriden later! free[exo.reg2.idx] <- 0L } exo[exo.reg2.idx] <- 1L } # 5b. residual variances of ordinal variables? if (length(ov.names.ord) > 0L) { ord.idx <- which(lhs %in% ov.names.ord & op == "~~" & user == 0L & ## New in 0.6-1 lhs == rhs) ustart[ord.idx] <- 1L ## FIXME!! or 0?? (0 breaks ex3.12) free[ord.idx] <- 0L } # 5c latent response scales of ordinal variables? # by default, all fixed to 1.0 if (length(ov.names.ord) > 0L) { delta.idx <- which(op == "~*~" & user == 0L) ## New in 0.6-1 ustart[delta.idx] <- 1.0 free[delta.idx] <- 0L } # correlation structure (new in 0.6-13) if (correlation) { var.idx <- which(lhs %in% ov.names & op == "~~" & user == 0L & lhs == rhs) ustart[var.idx] <- 1L free[var.idx] <- 0L delta.idx <- which(op == "~*~" & user == 0L) ustart[delta.idx] <- 1.0 free[delta.idx] <- 0L } # group proportions (group 1L) if (group.w.free) { group.idx <- which(lhs == "group" & op == "%") # if(ngroups > 1L) { free[group.idx] <- 1L ustart[group.idx] <- as.numeric(NA) # } else { # free[ group.idx ] <- 0L # ustart[ group.idx ] <- 0.0 # last group # } } # 6. multiple groups? group <- rep(1L, length(lhs)) if (ngroups > 1) { group <- rep(1:ngroups, each = length(lhs)) user <- rep(user, times = ngroups) lhs <- rep(lhs, times = ngroups) op <- rep(op, times = ngroups) rhs <- rep(rhs, times = ngroups) free <- rep(free, times = ngroups) ustart <- rep(ustart, times = ngroups) mod.idx <- rep(mod.idx, times = ngroups) label <- rep(label, times = ngroups) exo <- rep(exo, times = ngroups) # specific changes per group for (g in 2:ngroups) { # label # label[group == g] <- paste(label[group == 1], ".g", g, sep="") # free/fix intercepts if (meanstructure) { int.idx <- which(op == "~1" & lhs %in% lv.names & user == 0L & group == g) if (int.lv.free == FALSE && g > 1 && ("intercepts" %in% group.equal || "thresholds" %in% group.equal) && !("means" %in% group.equal)) { free[int.idx] <- 1L ustart[int.idx] <- as.numeric(NA) } } # latent variances if std.lv = TRUE (new in 0.6-4) if (std.lv && "loadings" %in% group.equal && !"lv.variances" %in% group.equal) { lv.var.idx <- which(op == "~~" & lhs %in% lv.names & !lhs %in% lv.names.efa & lhs == rhs & user == 0L & group == g) if (length(lv.var.idx) > 0L) { free[lv.var.idx] <- 1L ustart[lv.var.idx] <- as.numeric(NA) } } # latent variances if efa = TRUE (new in 0.6-5) if (auto.efa && "loadings" %in% group.equal && !"lv.variances" %in% group.equal) { lv.var.idx <- which(op == "~~" & lhs %in% lv.names.efa & lhs == rhs & user == 0L & group == g) if (length(lv.var.idx) > 0L) { free[lv.var.idx] <- 1L ustart[lv.var.idx] <- as.numeric(NA) } } # latent response scaling if (auto.delta && parameterization == "delta") { if (any(op == "~*~" & group == g) && ("thresholds" %in% group.equal)) { delta.idx <- which(op == "~*~" & group == g) free[delta.idx] <- 1L ustart[delta.idx] <- as.numeric(NA) } } else if (parameterization == "theta") { if (any(op == "~*~" & group == g) && ("thresholds" %in% group.equal)) { var.ord.idx <- which(op == "~~" & group == g & lhs %in% ov.names.ord & lhs == rhs) free[var.ord.idx] <- 1L ustart[var.ord.idx] <- as.numeric(NA) } } # group proportions if (group.w.free) { group.idx <- which(lhs == "group" & op == "%" & group == g) # if(g == ngroups) { # free[ group.idx ] <- 0L # ustart[ group.idx ] <- 0.0 # last group # } else { free[group.idx] <- 1L ustart[group.idx] <- as.numeric(NA) # } } } # g } # ngroups # construct tmp.list tmp.list <- list( id = seq_along(lhs), lhs = lhs, op = op, rhs = rhs, user = user ) # add block column (before group/level columns) if (!is.null(block.id)) { # only one block tmp.list$block <- rep(block.id, length(lhs)) } else { # block is a combination of at least group, level, ... # for now, only group tmp.list$block <- group } # block columns (typically only group) for (block in blocks) { if (block == "group") { tmp.list[[block]] <- group } else { tmp.list[[block]] <- rep(0L, length(lhs)) } } # other columns tmp.list2 <- list( mod.idx = mod.idx, free = free, ustart = ustart, exo = exo, label = label ) tmp.list <- c(tmp.list, tmp.list2) } lavaan/R/lav_cfa_guttman1952.R0000644000176200001440000002424114627656441015535 0ustar liggesusers# the 'multiple group' method as described in Guttman, 1952 # # Guttman, L. (1952). Multiple group methods for common-factor analysis, # their basis, computation, and interpretation. Psychometrika, 17(2) 209--222 # # YR 02 Feb 2023: - first version in lavaan, using quadprog (no std.lv yet) lav_cfa_guttman1952 <- function(S, marker.idx = NULL, lambda.nonzero.idx = NULL, theta = NULL, # vector! theta.bounds = FALSE, force.pd = FALSE, zero.after.efa = FALSE, quadprog = FALSE, psi.mapping = FALSE, nobs = 20L) { # for cutoff # dimensions nvar <- ncol(S) nfac <- length(marker.idx) stopifnot(length(theta) == nvar) # overview of lambda structure B <- matrix(0, nvar, nfac) lambda.marker.idx <- (seq_len(nfac) - 1L) * nvar + marker.idx B[lambda.marker.idx] <- 1L B[lambda.nonzero.idx] <- 1L # if we wish to keep SminTheta PD, we must keep theta within bounds if (force.pd) { theta.bounds <- TRUE } if (psi.mapping) { theta.bounds <- TRUE force.pd <- TRUE } # do we first 'clip' the theta values so they are within standard bounds? # (Question: do we need the 0.01 and 0.99 multipliers?) diagS <- diag(S) if (theta.bounds) { # lower bound lower.bound <- diagS * 0 # * 0.01 too.small.idx <- which(theta < lower.bound) if (length(too.small.idx) > 0L) { theta[too.small.idx] <- lower.bound[too.small.idx] } # upper bound upper.bound <- diagS * 1 # * 0.99 too.large.idx <- which(theta > upper.bound) if (length(too.large.idx) > 0L) { theta[too.large.idx] <- upper.bound[too.large.idx] } } # compute SminTheta: S where we replace diagonal with 'communalities' diag.theta <- diag(theta, nvar) SminTheta <- S - diag.theta if (force.pd) { lambda <- try(lav_matrix_symmetric_diff_smallest_root(S, diag.theta), silent = TRUE ) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) SminTheta <- S - diag.theta # and hope for the best } else { cutoff <- 1 + 1 / (nobs - 1) if (lambda < cutoff) { lambda.star <- lambda - 1 / (nobs - 1) SminTheta <- S - lambda.star * diag.theta } else { SminTheta <- S - diag.theta } } } else { # at least we force the diagonal elements of SminTheta to be nonnegative lower.bound <- diagS * 0.001 too.small.idx <- which(diag(SminTheta) < lower.bound) if (length(too.small.idx) > 0L) { diag(SminTheta)[too.small.idx] <- lower.bound[too.small.idx] } } # compute covariances among 1) (corrected) variables, and # 2) (corrected) sum-scores YS.COV <- SminTheta %*% B # compute covariance matrix of corrected sum-scores # SS.COV <- t(B) %*% SminTheta %*% B SS.COV <- crossprod(B, YS.COV) # scaling factors # D.inv.sqrt <- diag(1/sqrt(diag(SS.COV))) d.inv.sqrt <- 1 / sqrt(diag(SS.COV)) # factor correlation matrix # PHI <- D.inv.sqrt %*% SS.COV %*% D.inv.sqrt PHI <- t(SS.COV * d.inv.sqrt) * d.inv.sqrt # factor *structure* matrix # (covariances corrected Y & corrected normalized sum-scores) # YS.COR <- YS.COV %*% D.inv.sqrt YS.COR <- t(YS.COV) * d.inv.sqrt # transposed! if (zero.after.efa) { # we initially assume a saturated LAMBDA (like EFA) # then, we just fix the zero-elements to zero LAMBDA <- t(solve(PHI, YS.COR)) # = unconstrained EFA version # force zeroes LAMBDA <- LAMBDA * B } else if (quadprog) { # constained version using quadprog # only useful if (in)equality constraints are needed (TODo) # PHI MUST be positive-definite PHI <- cov2cor(lav_matrix_symmetric_force_pd(PHI, tol = 1e-04 )) # option? Dmat <- lav_matrix_bdiag(rep(list(PHI), nvar)) dvec <- as.vector(YS.COR) eq.idx <- which(t(B) != 1) # these must be zero (row-wise!) Rmat <- diag(nrow(Dmat))[eq.idx, , drop = FALSE] bvec <- rep(0, length(eq.idx)) # optional, 0=default out <- try(quadprog::solve.QP( Dmat = Dmat, dvec = dvec, Amat = t(Rmat), meq = length(eq.idx), bvec = bvec ), silent = TRUE) if (inherits(out, "try-error")) { lav_msg_warn(gettext("solve.QP failed to find a solution")) Lambda <- B Lambda[lambda.nonzero.idx] <- as.numeric(NA) Theta <- diag(rep(as.numeric(NA), nvar), nvar) Psi <- matrix(as.numeric(NA), nfac, nfac) return(list(lambda = Lambda, theta = Theta, psi = Psi)) } else { LAMBDA <- matrix(out$solution, nrow = nvar, ncol = nfac, byrow = TRUE) # zap almost zero elements LAMBDA[abs(LAMBDA) < sqrt(.Machine$double.eps)] <- 0 } } else { # default, if no (in)equality constraints YS.COR0 <- YS.COR YS.COR0[t(B) != 1] <- 0 LAMBDA <- t(YS.COR0) } # rescale LAMBDA, so that 'marker' indicator == 1 marker.lambda <- LAMBDA[lambda.marker.idx] Lambda <- t(t(LAMBDA) * (1 / marker.lambda)) # rescale PHI, covariance metric Psi <- t(PHI * marker.lambda) * marker.lambda # redo psi using ML mapping function? if (psi.mapping) { Ti <- 1 / theta zero.theta.idx <- which(abs(theta) < 0.01) # be conservative if (length(zero.theta.idx) > 0L) { Ti[zero.theta.idx] <- 1 } # ML mapping function M <- solve(t(Lambda) %*% diag(Ti, nvar) %*% Lambda) %*% t(Lambda) %*% diag(Ti, nvar) Psi <- M %*% SminTheta %*% t(M) } list(lambda = Lambda, theta = theta, psi = Psi) } # internal function to be used inside lav_optim_noniter # return 'x', the estimated vector of free parameters lav_cfa_guttman1952_internal <- function(lavobject = NULL, # convenience # internal slot lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL, theta.bounds = TRUE, force.pd = TRUE, zero.after.efa = FALSE, quadprog = FALSE, psi.mapping = TRUE) { lavpta <- NULL if (!is.null(lavobject)) { stopifnot(inherits(lavobject, "lavaan")) # extract slots lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavpta <- lavobject@pta lavdata <- lavobject@Data lavoptions <- lavobject@Options } if (is.null(lavpta)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } if (missing(psi.mapping) && !is.null(lavoptions$estimator.args$psi.mapping)) { psi.mapping <- lavoptions$estimator.args$psi.mapping } if (missing(quadprog) && !is.null(lavoptions$estimator.args$quadprog)) { quadprog <- lavoptions$estimator.args$quadprog } # no structural part! if (any(lavpartable$op == "~")) { lav_msg_stop(gettext("GUTTMAN1952 estimator only available for CFA models")) } # no BETA matrix! (i.e., no higher-order factors) if (!is.null(lavmodel@GLIST$beta)) { lav_msg_stop(gettext( "GUTTMAN1952 estimator not available for models that require a BETA matrix")) } # no std.lv = TRUE for now if (lavoptions$std.lv) { lav_msg_stop(gettext( "GUTTMAN1952 estimator not available if std.lv = TRUE")) } nblocks <- lav_partable_nblocks(lavpartable) stopifnot(nblocks == 1L) # for now b <- 1L sample.cov <- lavsamplestats@cov[[b]] nvar <- nrow(sample.cov) lv.names <- lavpta$vnames$lv.regular[[b]] nfac <- length(lv.names) marker.idx <- lavpta$vidx$lv.marker[[b]] lambda.idx <- which(names(lavmodel@GLIST) == "lambda") lambda.nonzero.idx <- lavmodel@m.free.idx[[lambda.idx]] # only diagonal THETA for now... # because if we have correlated residuals, we should remove the # corresponding variables as instruments before we estimate lambda... # (see MIIV) theta.idx <- which(names(lavmodel@GLIST) == "theta") # usually '2' m.theta <- lavmodel@m.free.idx[[theta.idx]] nondiag.idx <- m.theta[!m.theta %in% lav_matrix_diag_idx(nvar)] if (length(nondiag.idx) > 0L) { lav_msg_warn(gettext( "this implementation of FABIN does not handle correlated residuals yet!")) } # 1. obtain estimate for (diagonal elements of) THETA # for now we use Spearman per factor B <- matrix(0, nvar, nfac) lambda.marker.idx <- (seq_len(nfac) - 1L) * nvar + marker.idx B[lambda.marker.idx] <- 1L B[lambda.nonzero.idx] <- 1L theta <- numeric(nvar) for (f in seq_len(nfac)) { ov.idx <- which(B[, f] == 1L) S.fac <- sample.cov[ov.idx, ov.idx, drop = FALSE] theta[ov.idx] <- lav_cfa_theta_spearman(S.fac, bounds = "wide") } # 2. run Guttman1952 'Multiple Groups' algorithm out <- lav_cfa_guttman1952( S = sample.cov, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx, theta = theta, # experimental theta.bounds = theta.bounds, force.pd = force.pd, zero.after.efa = zero.after.efa, quadprog = quadprog, psi.mapping = psi.mapping, # nobs = lavsamplestats@ntotal ) LAMBDA <- out$lambda THETA <- diag(out$theta, nvar) PSI <- out$psi # store matrices in lavmodel@GLIST lavmodel@GLIST$lambda <- LAMBDA lavmodel@GLIST$theta <- THETA lavmodel@GLIST$psi <- PSI # extract free parameters only x <- lav_model_get_parameters(lavmodel) # apply bounds (if any) if (!is.null(lavpartable$lower)) { lower.x <- lavpartable$lower[lavpartable$free > 0] too.small.idx <- which(x < lower.x) if (length(too.small.idx) > 0L) { x[too.small.idx] <- lower.x[too.small.idx] } } if (!is.null(lavpartable$upper)) { upper.x <- lavpartable$upper[lavpartable$free > 0] too.large.idx <- which(x > upper.x) if (length(too.large.idx) > 0L) { x[too.large.idx] <- upper.x[too.large.idx] } } x } lavaan/R/ctr_pml_plrt2.R0000644000176200001440000002002314627656441014637 0ustar liggesusersctr_pml_plrt2 <- function(lavobject = NULL, lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavpartable = NULL, lavoptions = NULL, x = NULL, VCOV = NULL, lavcache = NULL) { lavpta <- NULL if (!is.null(lavobject)) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavoptions <- lavobject@Options lavsamplestats <- lavobject@SampleStats lavcache <- lavobject@Cache lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavpta <- lavobject@pta } if (is.null(lavpta)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } if (is.null(x)) { # compute 'fx' = objective function value # (NOTE: since 0.5-18, NOT divided by N!!) fx <- lav_model_objective( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache ) H0.fx <- as.numeric(fx) H0.fx.group <- attr(fx, "fx.group") } else { H0.fx <- attr(attr(x, "fx"), "fx.pml") H0.fx.group <- attr(attr(x, "fx"), "fx.group") } # fit a saturated model 'fittedSat' ModelSat <- lav_partable_unrestricted( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavpta = lavpta, lavsamplestats = lavsamplestats ) # FIXME: se="none", test="none"?? Options <- lavoptions Options$se <- "none" Options$test <- "none" Options$baseline <- FALSE Options$h1 <- FALSE fittedSat <- lavaan(ModelSat, slotOptions = Options, verbose = FALSE, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache ) fx <- lav_model_objective( lavmodel = fittedSat@Model, lavsamplestats = fittedSat@SampleStats, lavdata = fittedSat@Data, lavcache = fittedSat@Cache ) SAT.fx <- as.numeric(fx) SAT.fx.group <- attr(fx, "fx.group") # we also need a `saturated model', but where the moments are based # on the model-implied sample statistics under H0 ModelSat2 <- lav_partable_unrestricted( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavpta = lavpta, lavsamplestats = NULL, sample.cov = computeSigmaHat(lavmodel), sample.mean = computeMuHat(lavmodel), sample.th = computeTH(lavmodel), sample.th.idx = lavsamplestats@th.idx ) Options2 <- Options Options2$optim.method <- "none" Options2$optim.force.converged <- TRUE fittedSat2 <- lavaan(ModelSat2, slotOptions = Options2, verbose = FALSE, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache ) # the code below was contributed by Myrsini Katsikatsou (Jan 2015) # for now, only a single group is supported: # g = 1L ########################### The code for PLRT for overall goodness of fit ##### Section 1. Compute the asymptotic mean and variance ##### of the first quadratic quantity # if(is.null(VCOV)) { # VCOV <- lav_model_vcov(lavmodel = lavmodel, # lavsamplestats = lavsamplestats, # lavoptions = lavoptions, # lavdata = lavdata, # lavpartable = lavpartable, # lavcache = lavcache) # } # G.inv # InvG_attheta0 <- lavsamplestats@ntotal * VCOV[,] # Hessian # H_attheta0 <- solve(attr(VCOV, "E.inv")) # inverted observed information ('H.inv') if (is.null(VCOV)) { H0.inv <- lav_model_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, augmented = TRUE, inverted = TRUE ) } else { H0.inv <- attr(VCOV, "E.inv") } # first order information ('J') if (is.null(VCOV)) { J0 <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache )[, ] } else { # we do not get J, but J.group, FIXME? J0 <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache )[, ] } # inverted Godambe information G0.inv <- H0.inv %*% J0 %*% H0.inv H0tmp_prod1 <- H0.inv %*% J0 # H0tmp_prod1 <- InvG_attheta0 %*% H_attheta0 H0tmp_prod2 <- H0tmp_prod1 %*% H0tmp_prod1 E_tww <- sum(diag(H0tmp_prod1)) var_tww <- 2 * sum(diag(H0tmp_prod2)) ##### Section 2: Compute the asymptotic mean and variance ##### of the second quadratic quantity. tmp.options <- fittedSat2@Options tmp.options$se <- "robust.huber.white" VCOV.Sat2 <- lav_model_vcov( lavmodel = fittedSat2@Model, lavsamplestats = fittedSat2@SampleStats, lavoptions = tmp.options, lavdata = fittedSat2@Data, lavpartable = fittedSat2@ParTable, lavcache = fittedSat2@Cache ) # G.inv at vartheta_0 InvG_at_vartheta0 <- lavsamplestats@ntotal * VCOV.Sat2[, ] # Hessian at vartheta_0 H_at_vartheta0 <- solve(attr(VCOV.Sat2, "E.inv")) # should always work # H1.inv <- lavTech(fittedSat2, "inverted.information.observed") # J1 <- lavTech(fittedSat2, "information.first.order") # H1tmp_prod1 <- H1.inv %*% J1 H1tmp_prod1 <- InvG_at_vartheta0 %*% H_at_vartheta0 H1tmp_prod2 <- H1tmp_prod1 %*% H1tmp_prod1 E_tzz <- sum(diag(H1tmp_prod1)) var_tzz <- 2 * sum(diag(H1tmp_prod2)) ##### Section 3: Compute the asymptotic covariance ##### of the two quadratic quantities drhodpsi_MAT <- vector("list", length = lavsamplestats@ngroups) group.values <- lav_partable_group_values(fittedSat2@ParTable) for (g in 1:lavsamplestats@ngroups) { delta.g <- computeDelta(lavmodel)[[g]] # order of the rows: first the thresholds, then the correlations # we need to map the rows of delta.g to the rows/cols of H_at_vartheta0 # of H1 PT <- fittedSat2@ParTable PT$label <- lav_partable_labels(PT) free.idx <- which(PT$free > 0 & PT$group == group.values[g]) PARLABEL <- PT$label[free.idx] # for now, we can assume that computeDelta will always return # the thresholds first, then the correlations # # later, we should add a (working) add.labels = TRUE option to # computeDelta th.names <- lavpta$vnames$th[[g]] ov.names <- lavpta$vnames$ov[[g]] tmp <- utils::combn(ov.names, 2) cor.names <- paste(tmp[1, ], "~~", tmp[2, ], sep = "") NAMES <- c(th.names, cor.names) if (g > 1L) { NAMES <- paste(NAMES, ".g", g, sep = "") } par.idx <- match(PARLABEL, NAMES) drhodpsi_MAT[[g]] <- delta.g[par.idx, , drop = FALSE] } drhodpsi_mat <- do.call(rbind, drhodpsi_MAT) # tmp_prod <- ( t(drhodpsi_mat) %*% H_at_vartheta0 %*% # drhodpsi_mat %*% InvG_attheta0 %*% # H_attheta0 %*% InvG_attheta0 ) tmp_prod <- (t(drhodpsi_mat) %*% H_at_vartheta0 %*% drhodpsi_mat %*% H0.inv %*% J0 %*% G0.inv) cov_tzztww <- 2 * sum(diag(tmp_prod)) ##### Section 4: compute the adjusted PLRT and its p-value PLRTH0Sat <- 2 * (H0.fx - SAT.fx) PLRTH0Sat.group <- 2 * (H0.fx.group - SAT.fx.group) asym_mean_PLRTH0Sat <- E_tzz - E_tww asym_var_PLRTH0Sat <- var_tzz + var_tww - 2 * cov_tzztww scaling.factor <- (asym_mean_PLRTH0Sat / (asym_var_PLRTH0Sat / 2)) FSA_PLRT_SEM <- (asym_mean_PLRTH0Sat / (asym_var_PLRTH0Sat / 2)) * PLRTH0Sat adjusted_df <- (asym_mean_PLRTH0Sat * asym_mean_PLRTH0Sat) / (asym_var_PLRTH0Sat / 2) # In some very few cases (simulations show very few cases # in small sample sizes) # the adjusted_df is a negative number, we should then # print a warning like: "The adjusted df is computed to be a negative number # and for this the first and second moment adjusted PLRT is not computed." . pvalue <- 1 - pchisq(FSA_PLRT_SEM, df = adjusted_df) list( PLRTH0Sat = PLRTH0Sat, PLRTH0Sat.group = PLRTH0Sat.group, stat = FSA_PLRT_SEM, df = adjusted_df, p.value = pvalue, scaling.factor = scaling.factor ) } ############################################################################ lavaan/R/lav_matrix_rotate.R0000644000176200001440000005456714627656440015623 0ustar liggesusers# rotation algorithms # # YR 3 April 2019 -- gradient projection algorithm # YR 21 April 2019 -- pairwise rotation algorithm # YR 11 May 2020 -- order.idx is done in rotation matrix # (suggested by Florian Scharf) # YR 02 June 2024 -- add group argument, so target and target.mask can # be a list # main function to rotate a single matrix 'A' lav_matrix_rotate <- function(A = NULL, # original matrix orthogonal = FALSE, # default is oblique method = "geomin", # default rot method method.args = list( geomin.epsilon = 0.01, orthomax.gamma = 1, cf.gamma = 0, oblimin.gamma = 0, promax.kappa = 4, target = matrix(0, 0, 0), target.mask = matrix(0, 0, 0) ), init.ROT = NULL, # initial rotation matrix init.ROT.check = TRUE, # check if init ROT is ok rstarts = 100L, # number of random starts row.weights = "default", # row weighting std.ov = FALSE, # rescale ov ov.var = NULL, # ov variances algorithm = "gpa", # rotation algorithm reflect = TRUE, # refect sign order.lv.by = "index", # how to order the lv's gpa.tol = 0.00001, # stopping tol gpa tol = 1e-07, # stopping tol others keep.rep = FALSE, # store replications max.iter = 10000L, # max gpa iterations group = 1L) { # group number # check A if (!inherits(A, "matrix")) { lav_msg_stop(gettext("A does not seem to a matrix")) } P <- nrow(A) M <- ncol(A) if (M < 2L) { # single dimension res <- list( LAMBDA = A, PHI = matrix(1, 1, 1), ROT = matrix(1, 1, 1), orthogonal = orthogonal, method = "none", method.args = list(), row.weights = "none", algorithm = "none", iter = 0L, converged = TRUE, method.value = 0 ) return(res) } # method method <- tolower(method) # if promax, skip everything, then call promax() later if (method == "promax") { # orig.algorithm <- algorithm # orig.rstarts <- rstarts algorithm <- "none" rstarts <- 0L init.ROT <- NULL ROT <- diag(M) } # check init.ROT if (!is.null(init.ROT) && init.ROT.check) { if (!inherits(init.ROT, "matrix")) { lav_msg_stop(gettext("init.ROT does not seem to a matrix")) } if (nrow(init.ROT) != M) { lav_msg_stop(gettextf( "nrow(init.ROT) = %1$s does not equal ncol(A) = %2$s", nrow(init.ROT), M)) } if (nrow(init.ROT) != ncol(init.ROT)) { lav_msg_stop(gettextf( "nrow(init.ROT) = %1$s does not equal ncol(init.ROT) = %2$s", nrow(init.ROT), ncol(init.ROT))) } # rotation matrix? init.ROT^T %*% init.ROT = I RR <- crossprod(init.ROT) if (!lav_matrix_rotate_check(init.ROT, orthogonal = orthogonal)) { lav_msg_stop(gettext("init.ROT does not look like a rotation matrix")) } } # determine method function name if (method %in% c( "cf-quartimax", "cf-varimax", "cf-equamax", "cf-parsimax", "cf-facparsim" )) { method.fname <- "lav_matrix_rotate_cf" method.args$cf.gamma <- switch(method, "cf-quartimax" = 0, "cf-varimax" = 1 / P, "cf-equamax" = M / (2 * P), "cf-parsimax" = (M - 1) / (P + M - 2), "cf-facparsim" = 1 ) } else if (method %in% c("bi-quartimin", "biquartimin")) { method.fname <- "lav_matrix_rotate_biquartimin" } else if (method %in% c("bi-geomin", "bigeomin")) { method.fname <- "lav_matrix_rotate_bigeomin" } else { method.fname <- paste("lav_matrix_rotate_", method, sep = "") } # check if rotation method exists check <- try(get(method.fname), silent = TRUE) if (inherits(check, "try-error")) { lav_msg_stop(gettext("unknown rotation method:"), method.fname) } # if target, check target matrix if (method == "target" || method == "pst") { target <- method.args$target if (is.list(target)) { method.args$target <- target <- target[[group]] } # check dimension of target/A if (nrow(target) != nrow(A)) { lav_msg_stop(gettext("nrow(target) != nrow(A)")) } if (ncol(target) != ncol(A)) { lav_msg_stop(gettext("ncol(target) != ncol(A)")) } } if (method == "pst") { target.mask <- method.args$target.mask if (is.list(target.mask)) { method.args$target.mask <- target.mask <- target.mask[[group]] } # check dimension of target.mask/A if (nrow(target.mask) != nrow(A)) { lav_msg_stop(gettext("nrow(target.mask) != nrow(A)")) } if (ncol(target.mask) != ncol(A)) { lav_msg_stop(gettext("col(target.mask) != ncol(A)")) } } # we keep this here, so lav_matrix_rotate() can be used independently if (method == "target" && anyNA(target)) { method <- "pst" method.fname <- "lav_matrix_rotate_pst" target.mask <- matrix(1, nrow = nrow(target), ncol = ncol(target)) target.mask[is.na(target)] <- 0 method.args$target.mask <- target.mask } # set orthogonal option if (missing(orthogonal)) { # the default is oblique, except for varimax, entropy and a few others if (method %in% c( "varimax", "entropy", "mccammon", "tandem1", "tandem2" )) { orthogonal <- TRUE } else { orthogonal <- FALSE } } else { if (!orthogonal && method %in% c( "varimax", "entropy", "mccammon", "tandem1", "tandem2" )) { lav_msg_warn(gettextf( "rotation method %s may not work with oblique rotation.", dQuote(method) )) } } # set row.weights row.weights <- tolower(row.weights) if (row.weights == "default") { # the default is "none", except for varimax if (method %in% c("varimax", "promax")) { row.weights <- "kaiser" } else { row.weights <- "none" } } # check algorithm algorithm <- tolower(algorithm) if (algorithm %in% c("gpa", "pairwise", "none")) { # nothing to do } else { lav_msg_stop(gettext("algorithm must be gpa or pairwise")) } # 1. compute row weigths # 1.a cov -> cor? if (std.ov) { A <- A * 1 / sqrt(ov.var) } if (row.weights == "none") { weights <- rep(1.0, P) } else if (row.weights == "kaiser") { weights <- lav_matrix_rotate_kaiser_weights(A) } else if (row.weights == "cureton-mulaik") { weights <- lav_matrix_rotate_cm_weights(A) } else { lav_msg_stop(gettext("row.weights can be none, kaiser or cureton-mulaik")) } A <- A * weights # 2. rotate # multiple random starts? if (rstarts > 0L) { REP <- sapply(seq_len(rstarts), function(rep) { # random start (always orthogonal) init.ROT <- lav_matrix_rotate_gen(M = M, orthogonal = TRUE) # init.ROT <- lav_matrix_rotate_gen(M = M, orthogonal = orthogonal) if (lav_verbose()) { cat("\n") cat("rstart = ", sprintf("%4d", rep), " start:\n") } # choose rotation algorithm if (algorithm == "gpa") { ROT <- lav_matrix_rotate_gpa( A = A, orthogonal = orthogonal, init.ROT = init.ROT, method.fname = method.fname, method.args = method.args, gpa.tol = gpa.tol, max.iter = max.iter ) info <- attr(ROT, "info") attr(ROT, "info") <- NULL res <- c(info$method.value, lav_matrix_vec(ROT)) } else if (algorithm == "pairwise") { ROT <- lav_matrix_rotate_pairwise( A = A, orthogonal = orthogonal, init.ROT = init.ROT, method.fname = method.fname, method.args = method.args, tol = tol, max.iter = max.iter ) info <- attr(ROT, "info") attr(ROT, "info") <- NULL res <- c(info$method.value, lav_matrix_vec(ROT)) } if (lav_verbose()) { cat( "rstart = ", sprintf("%4d", rep), " end; current crit = ", sprintf("%17.15f", res[1]), "\n" ) } res }) best.idx <- which.min(REP[1, ]) ROT <- matrix(REP[-1, best.idx], nrow = M, ncol = M) if (keep.rep) { info <- list(method.value = REP[1, best.idx], REP = REP) } else { info <- list(method.value = REP[1, best.idx]) } } else if (algorithm != "none") { # initial rotation matrix if (is.null(init.ROT)) { init.ROT <- diag(M) } # Gradient Projection Algorithm if (algorithm == "gpa") { ROT <- lav_matrix_rotate_gpa( A = A, orthogonal = orthogonal, init.ROT = init.ROT, method.fname = method.fname, method.args = method.args, gpa.tol = gpa.tol, max.iter = max.iter ) } else if (algorithm == "pairwise") { ROT <- lav_matrix_rotate_pairwise( A = A, orthogonal = orthogonal, init.ROT = init.ROT, method.fname = method.fname, method.args = method.args, tol = tol, max.iter = max.iter ) } info <- attr(ROT, "info") attr(ROT, "info") <- NULL } # final rotation if (orthogonal) { # LAMBDA <- A %*% solve(t(ROT)) # note: when ROT is orthogonal, solve(t(ROT)) == ROT LAMBDA <- A %*% ROT PHI <- diag(ncol(LAMBDA)) # correlation matrix == I } else { # LAMBDA <- A %*% solve(t(ROT)) LAMBDA <- t(solve(ROT, t(A))) PHI <- crossprod(ROT) # correlation matrix } # 3. undo row weighting LAMBDA <- LAMBDA / weights # here, after re-weighted, we run promax if needed if (method == "promax") { LAMBDA.orig <- LAMBDA # first, run 'classic' varimax using varimax() from the stats package # we split varimax from promax, so we can control the normalize flag normalize.flag <- row.weights == "kaiser" xx <- stats::varimax(x = LAMBDA, normalize = normalize.flag) # promax kappa <- method.args$promax.kappa out <- lav_matrix_rotate_promax( x = xx$loadings, m = kappa, varimax.ROT = xx$rotmat ) LAMBDA <- out$loadings PHI <- solve(crossprod(out$rotmat)) # compute 'ROT' to be compatible with GPa ROTt.inv <- solve( crossprod(LAMBDA.orig), crossprod(LAMBDA.orig, LAMBDA) ) ROT <- solve(t(ROTt.inv)) info <- list( algorithm = "promax", iter = 0L, converged = TRUE, method.value = as.numeric(NA) ) } # 3.b undo cov -> cor if (std.ov) { LAMBDA <- LAMBDA * sqrt(ov.var) } # 4.a reflect so that column sum is always positive if (reflect) { SUM <- colSums(LAMBDA) neg.idx <- which(SUM < 0) if (length(neg.idx) > 0L) { LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] ROT[, neg.idx] <- -1 * ROT[, neg.idx, drop = FALSE] if (!orthogonal) { # recompute PHI PHI <- crossprod(ROT) } } } # 4.b reorder the columns if (order.lv.by == "sumofsquares") { L2 <- LAMBDA * LAMBDA order.idx <- base::order(colSums(L2), decreasing = TRUE) } else if (order.lv.by == "index") { # reorder using Asparouhov & Muthen 2009 criterion (see Appendix D) max.loading <- apply(abs(LAMBDA), 2, max) # 1: per factor, number of the loadings that are at least 0.8 of the # highest loading of the factor # 2: mean of the index numbers average.index <- sapply(seq_len(ncol(LAMBDA)), function(i) { mean(which(abs(LAMBDA[, i]) >= 0.8 * max.loading[i])) }) # order of the factors order.idx <- base::order(average.index) } else if (order.lv.by == "none") { order.idx <- seq_len(ncol(LAMBDA)) } else { lav_msg_stop(gettext("order must be index, sumofsquares or none")) } # do the same in PHI LAMBDA <- LAMBDA[, order.idx, drop = FALSE] PHI <- PHI[order.idx, order.idx, drop = FALSE] # new in 0.6-6, also do this in ROT, so we won't have to do this # again upstream ROT <- ROT[, order.idx, drop = FALSE] # 6. return results as a list res <- list( LAMBDA = LAMBDA, PHI = PHI, ROT = ROT, order.idx = order.idx, orthogonal = orthogonal, method = method, method.args = method.args, row.weights = row.weights ) # add method info res <- c(res, info) res } # Gradient Projection Algorithm (Jennrich 2001, 2002) # # - this is a translation of the SAS PROC IML code presented in the Appendix # of Bernaards & Jennrich (2005) # - as the orthogonal and oblique algorithm are so similar, they are # combined in a single function # - the default is oblique rotation # lav_matrix_rotate_gpa <- function(A = NULL, # original matrix orthogonal = FALSE, # default is oblique init.ROT = NULL, # initial rotation method.fname = NULL, # criterion function method.args = list(), # optional method args gpa.tol = 0.00001, max.iter = 10000L) { # number of columns M <- ncol(A) # transpose of A (not needed for orthogonal) At <- t(A) # check init.ROT if (is.null(init.ROT)) { ROT <- diag(M) } else { ROT <- init.ROT } # set initial value of alpha to 1 alpha <- 1 # initial rotation if (orthogonal) { LAMBDA <- A %*% ROT } else { LAMBDA <- t(solve(ROT, At)) } # using the current LAMBDA, evaluate the user-specified # rotation criteron; return Q (the criterion) and its gradient Gq Q <- do.call( method.fname, c(list(LAMBDA = LAMBDA), method.args, list(grad = TRUE)) ) Gq <- attr(Q, "grad") attr(Q, "grad") <- NULL Q.current <- Q # compute gradient GRAD of f() at ROT from the gradient Gq of Q at LAMBDA # in a manner appropiate for orthogonal or oblique rotation if (orthogonal) { GRAD <- crossprod(A, Gq) } else { GRAD <- -1 * solve(t(init.ROT), crossprod(Gq, LAMBDA)) } # start iterations converged <- FALSE for (iter in seq_len(max.iter + 1L)) { # compute projection Gp of GRAD onto the linear manifold tangent at # ROT to the manifold of orthogonal or normal (for oblique) matrices # # this projection is zero if and only if ROT is a stationary point of # f() restricted to the orthogonal/normal matrices if (orthogonal) { MM <- crossprod(ROT, GRAD) SYMM <- (MM + t(MM)) / 2 Gp <- GRAD - (ROT %*% SYMM) } else { Gp <- GRAD - t(t(ROT) * colSums(ROT * GRAD)) } # check Frobenius norm of Gp frob <- sqrt(sum(Gp * Gp)) # if verbose, print if (lav_verbose()) { cat( "iter = ", sprintf("%4d", iter - 1), " Q = ", sprintf("%9.7f", Q.current), " frob.log10 = ", sprintf("%10.7f", log10(frob)), " alpha = ", sprintf("%9.7f", alpha), "\n" ) } if (frob < gpa.tol) { converged <- TRUE break } # update alpha <- 2 * alpha for (i in seq_len(1000)) { # make option? # step in the negative projected gradient direction # (note, the original algorithm in Jennrich 2001 used G, not Gp) X <- ROT - alpha * Gp if (orthogonal) { # use SVD to compute the projection ROTt of X onto the manifold # of orthogonal matrices svd.out <- svd(X) U <- svd.out$u V <- svd.out$v ROTt <- U %*% t(V) } else { # compute the projection ROTt of X onto the manifold # of normal matrices v <- 1 / sqrt(apply(X^2, 2, sum)) ROTt <- X %*% diag(v) } # rotate again if (orthogonal) { LAMBDA <- A %*% ROTt } else { LAMBDA <- t(solve(ROTt, At)) } # evaluate criterion Q.new <- do.call(method.fname, c( list(LAMBDA = LAMBDA), method.args, list(grad = TRUE) )) Gq <- attr(Q.new, "grad") attr(Q.new, "grad") <- NULL # check stopping criterion if (Q.new < Q.current - 0.5 * frob * frob * alpha) { break } else { alpha <- alpha / 2 } if (i == 1000) { lav_msg_warn(gettext("half-stepping failed in GPA")) } } # update ROT <- ROTt Q.current <- Q.new if (orthogonal) { GRAD <- crossprod(A, Gq) } else { GRAD <- -1 * solve(t(ROT), crossprod(Gq, LAMBDA)) } } # iter # warn if no convergence if (!converged) { lav_msg_warn(gettextf( "GP rotation algorithm did not converge after %s iterations", max.iter )) } # algorithm information info <- list( algorithm = "gpa", iter = iter - 1L, converged = converged, method.value = Q.current ) attr(ROT, "info") <- info ROT } # pairwise rotation algorithm with direct line search # # based on Kaiser's (1959) algorithm and Jennrich and Sampson (1966) algorithm # but to make it generic, a line search is used; inspired by Browne 2001 # # - orthogonal: rotate one pair of columns (=plane) at a time # - oblique: rotate 1 factor in one pair of columns (=plane) at a time # note: in the oblique case, (1,2) is not the same as (2,1) # - BUT use optimize() to find the optimal angle (for each plane) # (see Browne, 2001, page 130) # - repeat until the changes in the f() criterion are below tol # lav_matrix_rotate_pairwise <- function(A = NULL, # original matrix orthogonal = FALSE, init.ROT = NULL, method.fname = NULL, # crit function method.args = list(), # method args tol = 1e-8, max.iter = 1000L) { # number of columns M <- ncol(A) # initial LAMBDA + PHI if (is.null(init.ROT)) { LAMBDA <- A if (!orthogonal) { PHI <- diag(M) } } else { if (orthogonal) { LAMBDA <- A %*% init.ROT } else { LAMBDA <- t(solve(init.ROT, t(A))) PHI <- crossprod(init.ROT) } } # using the current LAMBDA, evaluate the user-specified # rotation criteron; return Q (the criterion) only Q.current <- do.call(method.fname, c( list(LAMBDA = LAMBDA), method.args, list(grad = FALSE) )) # if verbose, print if (lav_verbose()) { cat( "iter = ", sprintf("%4d", 0), " Q = ", sprintf("%13.11f", Q.current), "\n" ) } # plane combinations if (orthogonal) { PLANE <- utils::combn(M, 2) } else { tmp <- utils::combn(M, 2) PLANE <- cbind(tmp, tmp[c(2, 1), , drop = FALSE]) } # define objective function -- orthogonal objf_orth <- function(theta = 0, A = NULL, col1 = 0L, col2 = 0L) { # construct ROT ROT <- diag(M) ROT[col1, col1] <- base::cos(theta) ROT[col1, col2] <- base::sin(theta) ROT[col2, col1] <- -1 * base::sin(theta) ROT[col2, col2] <- base::cos(theta) # rotate LAMBDA <- A %*% ROT # evaluate criterion Q <- do.call(method.fname, c( list(LAMBDA = LAMBDA), method.args, list(grad = FALSE) )) Q } # define objective function -- oblique objf_obliq <- function(delta = 0, A = NULL, col1 = 0L, col2 = 0L, phi12 = 0) { # construct ROT ROT <- diag(M) # gamma gamma2 <- 1 + (2 * delta * phi12) + (delta * delta) ROT[col1, col1] <- sqrt(abs(gamma2)) ROT[col1, col2] <- -1 * delta ROT[col2, col1] <- 0 ROT[col2, col2] <- 1 # rotate LAMBDA <- A %*% ROT # evaluate criterion Q <- do.call(method.fname, c( list(LAMBDA = LAMBDA), method.args, list(grad = FALSE) )) Q } # start iterations converged <- FALSE Q.old <- Q.current for (iter in seq_len(max.iter)) { # rotate - one cycle for (pl in seq_len(ncol(PLANE))) { # choose plane col1 <- PLANE[1, pl] col2 <- PLANE[2, pl] # optimize if (orthogonal) { out <- optimize( f = objf_orth, interval = c(-pi / 4, +pi / 4), A = LAMBDA, col1 = col1, col2 = col2, maximum = FALSE, tol = .Machine$double.eps^0.25 ) # best rotation - for this plane theta <- out$minimum # construct ROT ROT <- diag(M) ROT[col1, col1] <- base::cos(theta) ROT[col1, col2] <- base::sin(theta) ROT[col2, col1] <- -1 * base::sin(theta) ROT[col2, col2] <- base::cos(theta) } else { phi12 <- PHI[col1, col2] out <- optimize( f = objf_obliq, interval = c(-1, +1), A = LAMBDA, col1 = col1, col2 = col2, phi12 = phi12, maximum = FALSE, tol = .Machine$double.eps^0.25 ) # best rotation - for this plane delta <- out$minimum # construct ROT ROT <- diag(M) # gamma gamma2 <- 1 + (2 * delta * phi12) + (delta * delta) gamma <- sqrt(abs(gamma2)) ROT[col1, col1] <- gamma ROT[col1, col2] <- -1 * delta ROT[col2, col1] <- 0 ROT[col2, col2] <- 1 } # rotate LAMBDA <- LAMBDA %*% ROT if (!orthogonal) { # rotate PHI PHI[col1, ] <- (1 / gamma) * PHI[col1, ] + (delta / gamma) * PHI[col2, ] PHI[, col1] <- PHI[col1, ] PHI[col1, col1] <- 1 } } # all planes # check for convergence Q.current <- do.call(method.fname, c( list(LAMBDA = LAMBDA), method.args, list(grad = FALSE) )) # absolute change in Q diff <- abs(Q.old - Q.current) # if verbose, print if (lav_verbose()) { cat( "iter = ", sprintf("%4d", iter), " Q = ", sprintf("%13.11f", Q.current), " change = ", sprintf("%13.11f", diff), "\n" ) } if (diff < tol) { converged <- TRUE break } else { Q.old <- Q.current } } # iter # warn if no convergence if (!converged) { lav_msg_warn(gettextf( "pairwise rotation algorithm did not converge after %s iterations", max.iter )) } # compute final rotation matrix if (orthogonal) { ROT <- solve(crossprod(A), crossprod(A, LAMBDA)) } else { # to be compatible with GPa ROTt.inv <- solve(crossprod(A), crossprod(A, LAMBDA)) ROT <- solve(t(ROTt.inv)) } # algorithm information info <- list( algorithm = "pairwise", iter = iter, converged = converged, method.value = Q.current ) attr(ROT, "info") <- info ROT } lavaan/R/lav_model_lik.R0000644000176200001440000001334314627656441014664 0ustar liggesusers# casewise likelihoods # closed-form marginal likelihood # - classic SEM models, continous observed variables only lav_model_lik_ml <- function(lavmodel = NULL, GLIST = NULL, lavdata = NULL, lavsamplestats = NULL) { } # marginal ML lav_model_lik_mml <- function(lavmodel = NULL, THETA = NULL, TH = NULL, GLIST = NULL, group = 1L, lavdata = NULL, sample.mean = NULL, sample.mean.x = NULL, lavcache = NULL) { conditional.x <- lavmodel@conditional.x # data for this group X <- lavdata@X[[group]] nobs <- nrow(X) nvar <- ncol(X) eXo <- lavdata@eXo[[group]] # MLIST (for veta and yhat) mm.in.group <- 1:lavmodel@nmat[group] + cumsum(c(0, lavmodel@nmat))[group] MLIST <- GLIST[mm.in.group] # quadrature points GH <- lavcache[[group]]$GH nGH <- nrow(GH$x) nfac <- ncol(GH$x) # compute VETAx (latent lv only) lv.dummy.idx <- c( lavmodel@ov.y.dummy.lv.idx[[group]], lavmodel@ov.x.dummy.lv.idx[[group]] ) VETAx <- computeVETAx.LISREL( MLIST = MLIST, lv.dummy.idx = lv.dummy.idx ) # VETAx <- computeVETAx.LISREL(MLIST = MLIST) # check for negative values? if (any(diag(VETAx) < 0)) { lav_msg_warn(gettext("--- VETAx contains negative values")) print(VETAx) return(0) } # cholesky? # if(is.null(lavmodel@control$cholesky)) { CHOLESKY <- TRUE # } else { # CHOLESKY <- as.logical(lavmodel@control$cholesky) # if(nfac > 1L && !CHOLESKY) { # warning("lavaan WARNING: CHOLESKY is OFF but nfac > 1L") # } # } if (!CHOLESKY) { # we should still 'scale' the factors, if std.lv=FALSE ETA.sd <- sqrt(diag(VETAx)) } else { # cholesky takes care of scaling tchol.VETA <- try(chol(VETAx), silent = TRUE) if (inherits(tchol.VETA, "try-error")) { lav_msg_warn(gettext("--- VETAx not positive definite")) print(VETAx) return(0) } if (!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { if (conditional.x) { EETAx <- computeEETAx.LISREL( MLIST = MLIST, eXo = eXo, N = nobs, sample.mean = sample.mean, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[group]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[group]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[group]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[group]] ) } else { EETA <- computeEETA.LISREL( MLIST = MLIST, mean.x = sample.mean.x, sample.mean = sample.mean, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[group]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[group]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[group]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[group]] ) } # if(length(lv.dummy.idx) > 0L) { # EETAx <- EETAx[,-lv.dummy.idx,drop=FALSE] # } } } # compute (log)lik for each node, for each observation SUM.LOG.FY <- matrix(0, nrow = nGH, ncol = nobs) for (q in 1:nGH) { # current value(s) for ETA # eta <- matrix(0, nrow = 1, ncol = ncol(MLIST$lambda)) # non-dummy elements -> quadrature points # eta[1L, -lv.dummy.idx] <- GH$x[q,,drop=FALSE] XQ <- GH$x[q, , drop = FALSE] # rescale/unwhiten if (CHOLESKY) { # un-orthogonalize XQ <- XQ %*% tchol.VETA } else { # no unit scale? (un-standardize) XQ <- sweep(XQ, MARGIN = 2, STATS = ETA.sd, FUN = "*") } eta <- matrix(0, nrow = 1, ncol = ncol(MLIST$lambda)) if (length(lv.dummy.idx) > 0L) { eta[, -lv.dummy.idx] <- XQ } else { eta <- XQ } # eta_i = alpha + BETA eta_i + GAMMA eta_i + error # # - direct effect of BETA is already in VETAx, and hence tchol.VETA # - need to add alpha, and GAMMA eta_i if (!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { if (conditional.x) { eta <- sweep(EETAx, MARGIN = 2, STATS = eta, FUN = "+") } else { eta <- eta + EETA } } # compute yhat for this node (eta) if (lavmodel@conditional.x) { yhat <- computeEYetax.LISREL( MLIST = MLIST, eXo = eXo, ETA = eta, sample.mean = sample.mean, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[group]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[group]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[group]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[group]] ) } else { yhat <- computeEYetax3.LISREL( MLIST = MLIST, ETA = eta, sample.mean = sample.mean, mean.x = sample.mean.x, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[group]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[group]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[group]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[group]] ) } # compute fy.var, for this node (eta): P(Y_i = y_i | eta_i, x_i) log.fy.var <- lav_predict_fy_internal( X = X, yhat = yhat, TH = TH, THETA = THETA, num.idx = lavmodel@num.idx[[group]], th.idx = lavmodel@th.idx[[group]], link = lavmodel@link, log. = TRUE ) # if log, fy is just the sum of log.fy.var log.fy <- apply(log.fy.var, 1L, sum) # store log likelihoods for this node SUM.LOG.FY[q, ] <- log.fy } # integration lik <- as.numeric(t(GH$w) %*% exp(SUM.LOG.FY)) # avoid underflow idx <- which(lik < exp(-600)) if (length(idx) > 0L) { lik[idx] <- exp(-600) } lik } lavaan/R/lav_model_gradient_mml.R0000644000176200001440000002434014627656441016546 0ustar liggesuserslav_model_gradient_mml <- function(lavmodel = NULL, THETA = NULL, TH = NULL, GLIST = NULL, group = 1L, lavdata = NULL, sample.mean = NULL, sample.mean.x = NULL, lavcache = NULL) { if (lavmodel@link == "logit") { lav_msg_stop(gettext("logit link not implemented yet; use probit")) } # shortcut ov.y.dummy.ov.idx <- lavmodel@ov.y.dummy.ov.idx[[group]] ov.x.dummy.ov.idx <- lavmodel@ov.x.dummy.ov.idx[[group]] ov.y.dummy.lv.idx <- lavmodel@ov.y.dummy.lv.idx[[group]] ov.x.dummy.lv.idx <- lavmodel@ov.x.dummy.lv.idx[[group]] ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) th.idx <- lavmodel@th.idx[[group]] num.idx <- lavmodel@num.idx[[group]] ord.idx <- unique(th.idx[th.idx > 0L]) # data for this group X <- lavdata@X[[group]] nobs <- nrow(X) nvar <- ncol(X) eXo <- lavdata@eXo[[group]] # MLIST (for veta and yhat) mm.in.group <- 1:lavmodel@nmat[group] + cumsum(c(0, lavmodel@nmat))[group] MLIST <- GLIST[mm.in.group] # quadrature points GH <- lavcache[[group]]$GH nGH <- nrow(GH$x) nfac <- ncol(GH$x) # compute VETAx (latent lv only) # VETAx <- computeVETAx.LISREL(MLIST = MLIST, lv.dummy.idx = lv.dummy.idx) VETAx <- computeVETAx.LISREL(MLIST = MLIST) # check for negative values? if (any(diag(VETAx) < 0)) { lav_msg_warn(gettext("--- VETAx contains negative values")) print(VETAx) return(0) } # cholesky? # if(is.null(lavmodel@control$cholesky)) { CHOLESKY <- TRUE # } else { # CHOLESKY <- as.logical(lavmodel@control$cholesky) # if(nfac > 1L && !CHOLESKY) { # warning("lavaan WARNING: CHOLESKY is OFF but nfac > 1L") # } # } if (!CHOLESKY) { # we should still 'scale' the factors, if std.lv=FALSE ETA.sd <- sqrt(diag(VETAx)) } else { # cholesky takes care of scaling ETA.sd <- rep(1, nfac) tchol.VETA <- try(chol(VETAx), silent = TRUE) if (inherits(tchol.VETA, "try-error")) { lav_msg_warn(gettext("--- VETAx not positive definite")) print(VETAx) return(0) } if (!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { EETAx <- computeEETAx.LISREL( MLIST = MLIST, eXo = eXo, N = nobs, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # if(length(lv.dummy.idx) > 0L) { # EETAx <- EETAx[,-lv.dummy.idx,drop=FALSE] # } } } # prepare common stuff # fix Lambda? LAMBDA <- computeLAMBDA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # fix ALPHA ALPHA <- MLIST$alpha if (is.null(ALPHA)) { ALPHA <- numeric(nfac) } else if (length(lv.dummy.idx)) { ALPHA <- ALPHA[-lv.dummy.idx, , drop = FALSE] } # Beta? BETA <- MLIST$beta if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { tmp <- -BETA nr <- nrow(BETA) i <- seq_len(nr) tmp[cbind(i, i)] <- 1 IB.inv <- solve(tmp) LAMBDA..IB.inv <- MLIST$lambda %*% IB.inv ## no need to FIX??? if (length(lv.dummy.idx) > 0L) { LAMBDA..IB.inv <- LAMBDA..IB.inv[, -lv.dummy.idx, drop = FALSE] } # fix BETA if (length(lv.dummy.idx)) { BETA <- MLIST$beta[-lv.dummy.idx, -lv.dummy.idx, drop = FALSE] } tmp <- -BETA nr <- nrow(BETA) i <- seq_len(nr) tmp[cbind(i, i)] <- 1 IB.inv <- solve(tmp) } # fix GAMMA GAMMA <- MLIST$gamma if (is.null(GAMMA)) { ALPHA.GAMMA.eXo <- matrix(as.numeric(ALPHA), nobs, nfac, byrow = TRUE) } else if (length(lv.dummy.idx)) { GAMMA <- GAMMA[-lv.dummy.idx, , drop = FALSE] ALPHA.GAMMA.eXo <- sweep(eXo %*% t(GAMMA), MARGIN = 2, STATS = as.numeric(ALPHA), FUN = "+" ) } # Delta ## DD <- lavcache[[group]]$DD DD <- lav_model_gradient_DD(lavmodel, GLIST = GLIST, group = group) ## FIXME!!! do this analytically... x <- lav_model_get_parameters(lavmodel = lavmodel, GLIST = MLIST) dVetadx <- function(x, lavmodel = lavmodel, g = 1L) { GLIST <- lav_model_x2GLIST(lavmodel, x = x, type = "free") VETAx <- computeVETAx(lavmodel, GLIST = GLIST)[[g]] if (CHOLESKY) { S <- chol(VETAx) ### FIXME or t(chol())???? } else { S <- diag(sqrt(diag(VETAx))) } S } Delta.S <- lav_func_jacobian_simple(func = dVetadx, x = x, lavmodel = lavmodel, g = group) DD$S <- Delta.S # compute dL/dx for each node # dLdx <- matrix(0, nGH, lavmodel@nx.free) dFYp <- matrix(0, nobs, lavmodel@nx.free) SUM.LOG.FY <- matrix(0, nrow = nGH, ncol = nobs) for (q in 1:nGH) { # contribution to dFYp for this q dFYp.q <- matrix(0, nobs, lavmodel@nx.free) # current value(s) for ETA eta <- ksi <- GH$x[q, , drop = FALSE] # rescale/unwhiten if (CHOLESKY) { eta <- eta %*% tchol.VETA } else { # no unit scale? (un-standardize) eta <- sweep(eta, MARGIN = 2, STATS = ETA.sd, FUN = "*") } # eta_i = alpha + BETA eta_i + GAMMA eta_i + error # # - direct effect of BETA is already in VETAx, and hence tchol.VETA # - need to add alpha, and GAMMA eta_i if (!is.null(MLIST$alpha) || !is.null(MLIST$gamma)) { eta <- sweep(EETAx, MARGIN = 2, STATS = eta, FUN = "+") } # again, compute yhat for this node (eta) if (lavmodel@conditional.x) { yhat <- computeEYetax.LISREL( MLIST = MLIST, eXo = eXo, ETA = eta, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) } else { yhat <- computeEYetax3.LISREL( MLIST = MLIST, ETA = eta, sample.mean = sample.mean, mean.x = sample.mean.x, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[group]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[group]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[group]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[group]] ) } # compute fy.var, for this node (eta): P(Y_i = y_i | eta_i, x_i) log.fy.var <- lav_predict_fy_internal( X = X, yhat = yhat, TH = TH, THETA = THETA, num.idx = num.idx, th.idx = th.idx, link = lavmodel@link, log. = TRUE ) # if log, fy is just the sum of log.fy.var log.fy <- apply(log.fy.var, 1L, sum) # store log likelihoods for this node SUM.LOG.FY[q, ] <- log.fy # FY FY <- exp(log.fy.var) ### FIXME log/exp/log/... LIK.eta <- apply(FY, 1, prod) # fyp <- LIK.eta * GH$w[q] ######### dFY_p ########################################### # note, dFYp is actually 1/FY[,p] * dFYp PRE <- matrix(0, nobs, nvar) if (length(num.idx) > 0L) { tmp <- X[, num.idx, drop = FALSE] - yhat[, num.idx, drop = FALSE] theta.var <- diag(THETA)[num.idx] PRE[, num.idx] <- sweep(tmp, MARGIN = 2, STATS = 1 / theta.var, FUN = "*") } if (length(ord.idx) > 0L) { for (p in ord.idx) { # just in case we need theta[v,v] after all... sd.v.inv <- 1 / sqrt(THETA[p, p]) # lav_probit y <- X[, p] th.y <- TH[th.idx == p] TH.Y <- c(-Inf, th.y, Inf) ncat <- length(th.y) + 1L nth <- ncat - 1L Y1 <- matrix(1:nth, nobs, nth, byrow = TRUE) == y Y2 <- matrix(1:nth, nobs, nth, byrow = TRUE) == (y - 1L) z1 <- pmin(100, TH.Y[y + 1L] - yhat[, p]) z2 <- pmax(-100, TH.Y[y + 1L - 1L] - yhat[, p]) p1 <- dnorm(z1) p2 <- dnorm(z2) # probits = p1 - p2 PRE[, p] <- -1 * (p1 - p2) * sd.v.inv * (1 / FY[, p]) # [nobx * n.th] # dth <- -1 * (Y2*p2 - Y1*p1) * sd.v.inv dth <- -1 * (Y2 * p2 - Y1 * p1) * sd.v.inv * (1 / FY[, p]) dFYp.q <- dFYp.q + (dth %*% DD$tau[which(th.idx == p), , drop = FALSE]) } } if (length(num.idx) > 0L) { # THETA (num only) dsigma2 <- sweep(0.5 * PRE[, num.idx] * PRE[, num.idx], MARGIN = 2, STATS = 1 / (2 * theta.var), FUN = "-" ) dFYp.q <- dFYp.q + (dsigma2 %*% DD$theta) # NU (num only) dnu <- PRE[, num.idx] dFYp.q <- dFYp.q + (dnu %*% DD$nu) } # LAMBDA if (nrow(eta) == 1L) { dlambda <- PRE %*% eta ### FIXME!!!!! } else { dlambda <- matrix(apply(PRE, 2, function(x) x * eta), nobs, ) # dlambda <- sweep(PRE, MARGIN=1, STATS=eta, FUN="*") } dFYp.q <- dFYp.q + (dlambda %*% DD$lambda) # PSI # if(nrow(ksi) == 1L) { dpsi <- PRE %*% kronecker(LAMBDA[, , drop = FALSE], ksi) # } else { # dpsi <- PRE * kronecker(LAMBDA[,,drop=FALSE], ksi) # } dFYp.q <- dFYp.q + (dpsi %*% DD$S) # KAPPA if (length(ov.y.dummy.ov.idx) > 0L) { dkappa <- matrix(apply( PRE[, ov.y.dummy.ov.idx, drop = FALSE], 2, function(x) x * eXo ), nobs, ) dFYp.q <- dFYp.q + (dkappa %*% DD$kappa) } # GAMMA if (!is.null(eXo)) { dgamma <- matrix(apply( PRE %*% LAMBDA..IB.inv, 2, function(x) x * eXo ), nobs, ) dFYp.q <- dFYp.q + (dgamma %*% DD$gamma) } # BETA if (!is.null(BETA)) { # tmp <- kronecker(LAMBDA, ALPHA.GAMMA.eXo) %*% # t( kronecker(t(IB.inv), IB.inv) ) # dbeta <- apply(matrix(as.numeric(PRE) * tmp, nobs, ), 1, sum) dbeta <- matrix(apply( PRE %*% LAMBDA..IB.inv, 2, function(x) x * ALPHA.GAMMA.eXo ), nobs, ) dFYp.q <- dFYp.q + (dbeta %*% DD$beta) } dFYp <- dFYp + ((LIK.eta * GH$w[q]) * dFYp.q) } lik <- as.numeric(t(GH$w) %*% exp(SUM.LOG.FY)) # avoid underflow idx <- which(lik < exp(-600)) if (length(idx) > 0L) { lik[idx] <- exp(-600) } dFYp <- 1 / lik * dFYp dx <- apply(dFYp, 2, sum) # integration # dx <- apply(as.numeric(GH$w) * dLdx, 2, sum) # minimize dx <- -1 * dx dx } lavaan/R/lav_samplestats.R0000644000176200001440000020606014627656441015265 0ustar liggesusers# constructor for the 'lavSampleStats' class # # initial version: YR 25/03/2009 # major revision: YR 5/11/2011: separate data.obs and sample statistics # YR 5/01/2016: add rescov, resvar, ... if conditional.x = TRUE # YR 18 Jan 2021: use lavoptions lav_samplestats_from_data <- function(lavdata = NULL, lavoptions = NULL, WLS.V = NULL, NACOV = NULL) { # extra info from lavoptions stopifnot(!is.null(lavoptions)) missing <- lavoptions$missing rescale <- lavoptions$sample.cov.rescale estimator <- lavoptions$estimator mimic <- lavoptions$mimic meanstructure <- lavoptions$meanstructure correlation <- lavoptions$correlation conditional.x <- lavoptions$conditional.x fixed.x <- lavoptions$fixed.x group.w.free <- lavoptions$group.w.free se <- lavoptions$se test <- lavoptions$test ridge <- lavoptions$ridge zero.add <- lavoptions$zero.add zero.keep.margins <- lavoptions$zero.keep.margins zero.cell.warn <- lavoptions$zero.cell.warn dls.a <- lavoptions$estimator.args$dls.a dls.GammaNT <- lavoptions$estimator.args$dls.GammaNT # sample.icov (new in 0.6-9; ensure it exists, for older objects) sample.icov <- TRUE if (!is.null(lavoptions$sample.icov)) { sample.icov <- lavoptions$sample.icov } # ridge default if (ridge) { if (is.numeric(lavoptions$ridge.constant)) { ridge.eps <- lavoptions$ridge.constant } else { ridge.eps <- 1e-5 } } else { ridge.eps <- 0.0 } # check lavdata stopifnot(!is.null(lavdata)) # lavdata slots (FIXME: keep lavdata@ names) X <- lavdata@X Mp <- lavdata@Mp ngroups <- lavdata@ngroups nlevels <- lavdata@nlevels nobs <- lavdata@nobs ov.names <- lavdata@ov.names ov.names.x <- lavdata@ov.names.x DataOv <- lavdata@ov eXo <- lavdata@eXo WT <- lavdata@weights # new in 0.6-6 # if sampling weights have been used, redefine nobs: # per group, we define nobs == sum(wt) for (g in seq_len(ngroups)) { if (!is.null(WT[[g]])) { nobs[[g]] <- sum(WT[[g]]) } } # sample.cov.robust cannot be used if sampling weights are used if (lavoptions$sample.cov.robust) { if (!is.null(WT[[1]])) { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if sampling weights are provided.")) } } # sample statistics per group # joint (y,x) cov <- vector("list", length = ngroups) var <- vector("list", length = ngroups) mean <- vector("list", length = ngroups) th <- vector("list", length = ngroups) th.idx <- vector("list", length = ngroups) th.names <- vector("list", length = ngroups) # residual (y | x) res.cov <- vector("list", length = ngroups) res.var <- vector("list", length = ngroups) res.th <- vector("list", length = ngroups) res.th.nox <- vector("list", length = ngroups) res.slopes <- vector("list", length = ngroups) res.int <- vector("list", length = ngroups) # fixed.x mean.x <- vector("list", length = ngroups) cov.x <- vector("list", length = ngroups) # binary/ordinal bifreq <- vector("list", length = ngroups) # extra sample statistics per group icov <- vector("list", length = ngroups) cov.log.det <- vector("list", length = ngroups) res.icov <- vector("list", length = ngroups) res.cov.log.det <- vector("list", length = ngroups) WLS.obs <- vector("list", length = ngroups) missing. <- vector("list", length = ngroups) missing.h1. <- vector("list", length = ngroups) missing.flag. <- FALSE zero.cell.tables <- vector("list", length = ngroups) YLp <- vector("list", length = ngroups) # group weights group.w <- vector("list", length = ngroups) # convenience? # FIXME! x.idx <- vector("list", length = ngroups) WLS.VD <- vector("list", length = ngroups) if (is.null(WLS.V)) { WLS.V <- vector("list", length = ngroups) WLS.V.user <- FALSE } else { if (!is.list(WLS.V)) { if (ngroups == 1L) { WLS.V <- list(WLS.V) } else { lav_msg_stop(gettextf( "WLS.V argument should be a list of length %s", ngroups) ) } } else { if (length(WLS.V) != ngroups) { lav_msg_stop(gettextf( "WLS.V assumes %1$s groups; data contains %2$s groups", length(WLS.V), ngroups)) } } # is WLS.V full? check first if (is.null(dim(WLS.V[[1]]))) { # we will assume it is the diagonal only WLS.VD <- WLS.V WLS.V <- lapply(WLS.VD, diag) } else { # create WLS.VD WLS.VD <- lapply(WLS.V, diag) } WLS.V.user <- TRUE # FIXME: check dimension of WLS.V!! } NACOV.compute <- FALSE # since 0.6-6 if (is.null(NACOV)) { NACOV <- vector("list", length = ngroups) NACOV.user <- FALSE if (se == "robust.sem" && missing == "listwise") { NACOV.compute <- TRUE } # note: test can be a vector... if (missing == "listwise" && any(test %in% c( "satorra.bentler", "mean.var.adjusted", "scaled.shifted" ))) { NACOV.compute <- TRUE } } else if (is.logical(NACOV)) { if (!NACOV) { NACOV.compute <- FALSE } else { NACOV.compute <- TRUE } NACOV.user <- FALSE NACOV <- vector("list", length = ngroups) } else { if (!is.list(NACOV)) { if (ngroups == 1L) { NACOV <- list(NACOV) } else { lav_msg_stop(gettextf( "NACOV argument should be a list of length ", ngroups)) } } else { if (length(NACOV) != ngroups) { lav_msg_stop(gettextf( "NACOV assumes %1$s groups; data contains %2$s groups", length(NACOV), ngroups)) } } NACOV.user <- TRUE # FIXME: check dimension of NACOV!! } # compute some sample statistics per group for (g in 1:ngroups) { # switch off computing all sample statistics? (housekeeping only) if (!is.null(lavoptions$samplestats) && !lavoptions$samplestats) { next } # check nobs if (is.null(WT[[g]])) { if (nobs[[g]] < 2L) { if (nobs[[g]] == 0L) { lav_msg_stop(gettext("data contains no observations"), if (ngroups > 1L) gettextf("in group %s", g) else "") } else { lav_msg_stop(gettext("data contains only a single observation"), if (ngroups > 1L) gettextf("in group %s", g) else "") } } } # exogenous x? nexo <- length(ov.names.x[[g]]) if (nexo) { stopifnot(nexo == NCOL(eXo[[g]])) # two cases: ov.names contains 'x' variables, or not if (conditional.x) { # ov.names.x are NOT in ov.names x.idx[[g]] <- length(ov.names[[g]]) + seq_len(nexo) } else { if (fixed.x) { # ov.names.x are a subset of ov.names x.idx[[g]] <- match(ov.names.x[[g]], ov.names[[g]]) stopifnot(!anyNA(x.idx[[g]])) } else { x.idx[[g]] <- integer(0L) } } } else { x.idx[[g]] <- integer(0L) conditional.x <- FALSE fixed.x <- FALSE } # group weight group.w[[g]] <- nobs[[g]] / sum(unlist(nobs)) # check if we have categorical data in this group categorical <- FALSE ov.types <- DataOv$type[match(ov.names[[g]], DataOv$name)] ov.levels <- DataOv$nlev[match(ov.names[[g]], DataOv$name)] CAT <- list() if ("ordered" %in% ov.types) { categorical <- TRUE if (nlevels > 1L) { lav_msg_warn(gettext("multilevel + categorical not supported yet.")) } } if (categorical) { # compute CAT if (estimator %in% c("ML", "REML", "PML", "FML", "MML", "none", "ULS")) { WLS.W <- FALSE if (estimator == "ULS" && se == "robust.sem") { #|| # any(test %in% c("satorra.bentler", "scaled.shifted", # "mean.var.adjusted")))) { WLS.W <- TRUE } } else { WLS.W <- TRUE } # check cat.wls.w option (new in 0.6-18) if (!is.null(lavoptions$cat.wls.w) && !lavoptions$cat.wls.w) { WLS.W <- FALSE # perhaps do.fit = FALSE? (eg sam()) } if (lav_verbose()) { cat("Estimating sample thresholds and correlations ... ") } current.verbose <- lav_verbose() if (lav_verbose(lav_debug())) on.exit(lav_verbose(current.verbose), TRUE) if (conditional.x) { CAT <- muthen1984( Data = X[[g]], wt = WT[[g]], ov.names = ov.names[[g]], ov.types = ov.types, ov.levels = ov.levels, ov.names.x = ov.names.x[[g]], eXo = eXo[[g]], group = g, # for error messages only WLS.W = WLS.W, zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = FALSE, zero.cell.tables = TRUE ) } else { CAT <- muthen1984( Data = X[[g]], wt = WT[[g]], ov.names = ov.names[[g]], ov.types = ov.types, ov.levels = ov.levels, ov.names.x = NULL, eXo = NULL, group = g, # for error messages only WLS.W = WLS.W, zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = FALSE, zero.cell.tables = TRUE ) } lav_verbose(current.verbose) # empty cell tables zero.cell.tables[[g]] <- CAT$zero.cell.tables if (lav_verbose()) cat("done\n") } if (categorical) { # convenience th.idx[[g]] <- unlist(CAT$TH.IDX) th.names[[g]] <- unlist(CAT$TH.NAMES) if (conditional.x) { # residual var/cov res.var[[g]] <- unlist(CAT$VAR) res.cov[[g]] <- unname(CAT$COV) if (ridge) { diag(res.cov[[g]]) <- diag(res.cov[[g]]) + ridge.eps res.var[[g]] <- diag(res.cov[[g]]) } # th also contains the means of numeric variables res.th[[g]] <- unlist(CAT$TH) res.th.nox[[g]] <- unlist(CAT$TH.NOX) # for convenience, we store the intercept of numeric # variables in res.int NVAR <- NCOL(res.cov[[g]]) mean[[g]] <- res.int[[g]] <- numeric(NVAR) num.idx <- which(!seq_len(NVAR) %in% th.idx[[g]]) if (length(num.idx) > 0L) { NUM.idx <- which(th.idx[[g]] == 0L) mean[[g]][num.idx] <- res.th.nox[[g]][NUM.idx] res.int[[g]][num.idx] <- res.th[[g]][NUM.idx] } # slopes res.slopes[[g]] <- CAT$SLOPES } else { # var/cov var[[g]] <- unlist(CAT$VAR) cov[[g]] <- unname(CAT$COV) if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps var[[g]] <- diag(cov[[g]]) } # th also contains the means of numeric variables th[[g]] <- unlist(CAT$TH) # mean (numeric only) NVAR <- NCOL(cov[[g]]) mean[[g]] <- numeric(NVAR) num.idx <- which(!seq_len(NVAR) %in% th.idx[[g]]) if (length(num.idx) > 0L) { NUM.idx <- which(th.idx[[g]] == 0L) mean[[g]][num.idx] <- th[[g]][NUM.idx] } } # only for catML if (estimator == "catML") { COV <- cov2cor(lav_matrix_symmetric_force_pd(cov[[g]], tol = 1e-04 )) # overwrite cov[[g]] <- COV out <- lav_samplestats_icov( COV = COV, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) icov[[g]] <- out$icov cov.log.det[[g]] <- out$cov.log.det # the same for res.cov if conditional.x = TRUE if (conditional.x) { RES.COV <- cov2cor(lav_matrix_symmetric_force_pd(res.cov[[g]], tol = 1e-04 )) # overwrite res.cov[[g]] <- RES.COV out <- lav_samplestats_icov( COV = RES.COV, ridge = 1e-05, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) res.icov[[g]] <- out$icov res.cov.log.det[[g]] <- out$cov.log.det } } } # categorical # continuous -- multilevel else if (nlevels > 1L) { # level-based sample statistics YLp[[g]] <- lav_samplestats_cluster_patterns( Y = X[[g]], Lp = lavdata@Lp[[g]], conditional.x = lavoptions$conditional.x ) if (conditional.x) { # for starting values only # no handling of missing data yet.... if (missing %in% c( "ml", "ml.x", "two.stage", "robust.two.stage" )) { lav_msg_stop(gettextf( "missing = %s + conditional.x + two.level not supported yet", missing)) } # residual covariances! Y <- X[[g]] # contains eXo COV <- unname(stats::cov(Y, use = "pairwise.complete.obs")) # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 MEAN <- unname(colMeans(Y, na.rm = TRUE)) var[[g]] <- diag(COV) # rescale cov by (N-1)/N? (only COV!) if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' COV <- ((nobs[[g]] - 1) / nobs[[g]]) * COV } cov[[g]] <- COV if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps var[[g]] <- diag(cov[[g]]) } mean[[g]] <- MEAN A <- COV[-x.idx[[g]], -x.idx[[g]], drop = FALSE] B <- COV[-x.idx[[g]], x.idx[[g]], drop = FALSE] C <- COV[x.idx[[g]], x.idx[[g]], drop = FALSE] # FIXME: make robust against singular C!!! res.cov[[g]] <- A - B %*% solve(C) %*% t(B) res.var[[g]] <- diag(cov[[g]]) MY <- MEAN[-x.idx[[g]]] MX <- MEAN[x.idx[[g]]] C3 <- rbind( c(1, MX), cbind(MX, C + tcrossprod(MX)) ) B3 <- cbind(MY, B + tcrossprod(MY, MX)) COEF <- unname(solve(C3, t(B3))) res.int[[g]] <- COEF[1, ] # intercepts res.slopes[[g]] <- t(COEF[-1, , drop = FALSE]) # slopes } else { # FIXME: needed? COV <- unname(stats::cov(X[[g]], use = "pairwise.complete.obs")) # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 cov[[g]] <- COV mean[[g]] <- unname(colMeans(X[[g]], na.rm = TRUE)) var[[g]] <- diag(cov[[g]]) # missing patterns if (missing %in% c("ml", "ml.x")) { missing.flag. <- TRUE missing.[[g]] <- lav_samplestats_missing_patterns( Y = X[[g]], Mp = Mp[[g]], wt = WT[[g]], Lp = lavdata@Lp[[g]] ) } } } # multilevel # continuous -- single-level else { if (conditional.x) { # FIXME! # no correlation structures yet if (correlation) { lav_msg_stop(gettext( "conditional.x = TRUE is not supported (yet) for correlation structures.")) } # FIXME! # no handling of missing data yet.... if (missing %in% c( "ml", "ml.x", "two.stage", "robust.two.stage" )) { lav_msg_stop(gettextf( "missing = %s + conditional.x not supported yet", missing)) } # residual covariances! Y <- cbind(X[[g]], eXo[[g]]) COV <- unname(stats::cov(Y, use = "pairwise.complete.obs")) # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 MEAN <- unname(colMeans(Y, na.rm = TRUE)) # rescale cov by (N-1)/N? (only COV!) if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' COV <- ((nobs[[g]] - 1) / nobs[[g]]) * COV } cov[[g]] <- COV var[[g]] <- diag(COV) if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps var[[g]] <- diag(cov[[g]]) } mean[[g]] <- MEAN A <- COV[-x.idx[[g]], -x.idx[[g]], drop = FALSE] B <- COV[-x.idx[[g]], x.idx[[g]], drop = FALSE] C <- COV[x.idx[[g]], x.idx[[g]], drop = FALSE] # FIXME: make robust against singular C!!! res.cov[[g]] <- A - B %*% solve(C) %*% t(B) res.var[[g]] <- diag(cov[[g]]) MY <- MEAN[-x.idx[[g]]] MX <- MEAN[x.idx[[g]]] C3 <- rbind( c(1, MX), cbind(MX, C + tcrossprod(MX)) ) B3 <- cbind(MY, B + tcrossprod(MY, MX)) COEF <- unname(solve(C3, t(B3))) res.int[[g]] <- COEF[1, ] # intercepts res.slopes[[g]] <- t(COEF[-1, , drop = FALSE]) # slopes } else if (missing == "two.stage" || missing == "robust.two.stage") { missing.flag. <- FALSE # !!! just use sample statistics missing.[[g]] <- lav_samplestats_missing_patterns( Y = X[[g]], Mp = Mp[[g]], wt = WT[[g]] ) current.warn <- lav_warn() if (lav_warn(lavoptions$em.h1.warn)) on.exit(lav_warn(current.warn), TRUE) out <- lav_mvnorm_missing_h1_estimate_moments( Y = X[[g]], wt = WT[[g]], Mp = Mp[[g]], Yp = missing.[[g]], max.iter = lavoptions$em.h1.iter.max, tol = lavoptions$em.h1.tol, ) lav_warn(current.warn) missing.h1.[[g]]$sigma <- out$Sigma missing.h1.[[g]]$mu <- out$Mu missing.h1.[[g]]$h1 <- out$fx # here, sample statistics == EM estimates cov[[g]] <- missing.h1.[[g]]$sigma if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) mean[[g]] <- missing.h1.[[g]]$mu } else if (missing %in% c("ml", "ml.x")) { missing.flag. <- TRUE missing.[[g]] <- lav_samplestats_missing_patterns( Y = X[[g]], Mp = Mp[[g]], wt = WT[[g]] ) if (nlevels == 1L) { # estimate moments unrestricted model current.warn <- lav_warn() if (lav_warn(lavoptions$em.h1.warn)) on.exit(lav_warn(current.warn), TRUE) out <- lav_mvnorm_missing_h1_estimate_moments( Y = X[[g]], wt = WT[[g]], Mp = Mp[[g]], Yp = missing.[[g]], max.iter = lavoptions$em.h1.iter.max, tol = lavoptions$em.h1.tol ) lav_warn(current.warn) missing.h1.[[g]]$sigma <- out$Sigma missing.h1.[[g]]$mu <- out$Mu missing.h1.[[g]]$h1 <- out$fx } if (!is.null(WT[[g]])) { # here, sample statistics == EM estimates cov[[g]] <- missing.h1.[[g]]$sigma if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) mean[[g]] <- missing.h1.[[g]]$mu } else { # NEEDED? why not just EM-based? COV <- unname(stats::cov(X[[g]], use = "pairwise.complete.obs")) # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 cov[[g]] <- COV # rescale cov by (N-1)/N? (only COV!) if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' cov[[g]] <- ((nobs[[g]] - 1) / nobs[[g]]) * cov[[g]] } if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) mean[[g]] <- colMeans(X[[g]], na.rm = TRUE) } } else { # LISTWISE if (!is.null(WT[[g]])) { out <- stats::cov.wt(X[[g]], wt = WT[[g]], method = "ML" ) COV <- out$cov # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 cov[[g]] <- COV if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) mean[[g]] <- out$center } else if (lavoptions$sample.cov.robust) { # fixme: allow prob/max.it to be options out <- lav_cov_huber( Y = X[[g]], prob = 0.95, max.it = 200L, tol = 1e-07 ) cov[[g]] <- out$Sigma var[[g]] <- diag(cov[[g]]) mean[[g]] <- out$Mu } else { COV <- unname(stats::cov(X[[g]], use = "pairwise.complete.obs")) # if we have missing values (missing by design?), replace them by 0 COV[is.na(COV)] <- 0 cov[[g]] <- COV # rescale cov by (N-1)/N? (only COV!) if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' cov[[g]] <- ((nobs[[g]] - 1) / nobs[[g]]) * cov[[g]] } if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) mean[[g]] <- colMeans(X[[g]], na.rm = TRUE) } } # correlation structure? if (correlation) { cov[[g]] <- cov2cor(cov[[g]]) var[[g]] <- rep(1, length(var[[g]])) if (conditional.x) { res.cov[[g]] <- cov2cor(res.cov[[g]]) res.var[[g]] <- rep(1, length(res.var[[g]])) cov.x[[g]] <- cov2cor(cov.x[[g]]) # FIXME: slopes? more? } } # icov and cov.log.det (but not if missing) if (sample.icov && !missing %in% c("ml", "ml.x")) { out <- lav_samplestats_icov( COV = cov[[g]], ridge = 1e-05, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) icov[[g]] <- out$icov cov.log.det[[g]] <- out$cov.log.det # the same for res.cov if conditional.x = TRUE if (conditional.x) { out <- lav_samplestats_icov( COV = res.cov[[g]], ridge = 1e-05, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) res.icov[[g]] <- out$icov res.cov.log.det[[g]] <- out$cov.log.det } } } # continuous - single level # WLS.obs if (nlevels == 1L) { if (estimator == "catML") { # correlations only (for now) tmp.categorical <- FALSE tmp.meanstructure <- FALSE } else { tmp.categorical <- categorical tmp.meanstructure <- meanstructure } WLS.obs[[g]] <- lav_samplestats_wls_obs( mean.g = mean[[g]], cov.g = cov[[g]], var.g = var[[g]], th.g = th[[g]], th.idx.g = th.idx[[g]], res.int.g = res.int[[g]], res.cov.g = res.cov[[g]], res.var.g = res.var[[g]], res.th.g = res.th[[g]], res.slopes.g = res.slopes[[g]], group.w.g = log(nobs[[g]]), categorical = tmp.categorical, conditional.x = conditional.x, meanstructure = tmp.meanstructure, correlation = correlation, slopestructure = conditional.x, group.w.free = group.w.free ) } # fill in the other slots if (!is.null(eXo[[g]])) { if (!is.null(WT[[g]])) { if (missing != "listwise") { cov.x[[g]] <- missing.h1.[[g]]$sigma[x.idx[[g]], x.idx[[g]], drop = FALSE ] mean.x[[g]] <- missing.h1.[[g]]$mu[x.idx[[g]]] } else { out <- stats::cov.wt(eXo[[g]], wt = WT[[g]], method = "ML" ) cov.x[[g]] <- out$cov mean.x[[g]] <- out$center } } else { cov.x[[g]] <- cov(eXo[[g]], use = "pairwise") if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' cov.x[[g]] <- ((nobs[[g]] - 1) / nobs[[g]]) * cov.x[[g]] } mean.x[[g]] <- colMeans(eXo[[g]]) } } # NACOV (=GAMMA) if (!NACOV.user && nlevels == 1L) { if (estimator == "ML" && !missing.flag. && NACOV.compute) { if (conditional.x) { Y <- Y } else { Y <- X[[g]] } if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } if (correlation) { NACOV[[g]] <- lav_samplestats_cor_Gamma( Y = Y, meanstructure = meanstructure ) } else { NACOV[[g]] <- lav_samplestats_Gamma( Y = Y, x.idx = x.idx[[g]], cluster.idx = cluster.idx, fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x, gamma.n.minus.one = lavoptions$gamma.n.minus.one, unbiased = lavoptions$gamma.unbiased, Mplus.WLS = FALSE ) } } else if (estimator %in% c("WLS", "DWLS", "ULS", "DLS", "catML")) { if (!categorical) { # sample size large enough? nvar <- ncol(X[[g]]) # if(conditional.x && nexo > 0L) { # nvar <- nvar - nexo # } pstar <- nvar * (nvar + 1) / 2 if (meanstructure) pstar <- pstar + nvar if (conditional.x && nexo > 0L) { pstar <- pstar + (nvar * nexo) } if (nrow(X[[g]]) < pstar) { lav_msg_warn(gettextf( "number of observations (%s) too small to compute Gamma", nrow(X[[g]])), if (ngroups > 1L) gettextf("in group %s", g) else "" ) } if (conditional.x) { Y <- Y } else { Y <- X[[g]] } if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } if (correlation) { NACOV[[g]] <- lav_samplestats_cor_Gamma( Y = Y, meanstructure = meanstructure ) } else { NACOV[[g]] <- lav_samplestats_Gamma( Y = Y, x.idx = x.idx[[g]], cluster.idx = cluster.idx, fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x, gamma.n.minus.one = lavoptions$gamma.n.minus.one, unbiased = lavoptions$gamma.unbiased, Mplus.WLS = (mimic == "Mplus") ) } } else { # categorical case NACOV[[g]] <- CAT$WLS.W * nobs[[g]] if (lavoptions$gamma.n.minus.one) { NACOV[[g]] <- NACOV[[g]] * (nobs[[g]] / (nobs[[g]] - 1L)) } if (estimator == "catML") { # remove all but the correlation part ntotal <- nrow(NACOV[[g]]) pstar <- nrow(CAT$A22) nocor <- ntotal - pstar if (length(nocor) > 0L) { NACOV[[g]] <- NACOV[[g]][ -seq_len(nocor), -seq_len(nocor) ] } } } } else if (estimator == "PML") { # no NACOV ... for now } # group.w.free if (!is.null(NACOV[[g]]) && group.w.free) { # unweight!! a <- group.w[[g]] * sum(unlist(nobs)) / nobs[[g]] # always 1!!! NACOV[[g]] <- lav_matrix_bdiag(matrix(a, 1, 1), NACOV[[g]]) } } # WLS.V if (!WLS.V.user && nlevels == 1L) { if (estimator == "DLS" && dls.GammaNT == "sample" && dls.a < 1.0) { # compute GammaNT here if (correlation) { GammaNT <- lav_samplestats_cor_Gamma_NT( COV = cov[[g]], MEAN = mean[[g]], rescale = FALSE, x.idx = x.idx[[g]], # not used yet fixed.x = fixed.x, # not used yet conditional.x = conditional.x, # not used yet meanstructure = meanstructure, # not used yet slopestructure = conditional.x # not used yet ) } else { GammaNT <- lav_samplestats_Gamma_NT( COV = cov[[g]], MEAN = mean[[g]], rescale = FALSE, x.idx = x.idx[[g]], fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x ) } } if (estimator == "GLS" || (estimator == "DLS" && dls.GammaNT == "sample" && dls.a == 1.0)) { # Note: we need the 'original' COV/MEAN/ICOV # sample statistics; not the 'residual' version if (correlation) { GammaNT <- lav_samplestats_cor_Gamma_NT( COV = cov[[g]], MEAN = mean[[g]], #rescale = FALSE, x.idx = x.idx[[g]], # not used yet fixed.x = fixed.x, # not used yet conditional.x = conditional.x, # not used yet meanstructure = meanstructure, # not used yet slopestructure = conditional.x # not used yet ) WLS.V[[g]] <- lav_matrix_symmetric_inverse(GammaNT) } else { WLS.V[[g]] <- lav_samplestats_Gamma_inverse_NT( ICOV = icov[[g]], COV = cov[[g]], MEAN = mean[[g]], rescale = FALSE, x.idx = x.idx[[g]], fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x ) } if (mimic == "Mplus" && !conditional.x && meanstructure) { # bug in Mplus? V11 rescaled by nobs[[g]]/(nobs[[g]]-1) nvar <- NCOL(cov[[g]]) WLS.V[[g]][1:nvar, 1:nvar] <- WLS.V[[g]][1:nvar, 1:nvar, drop = FALSE ] * (nobs[[g]] / (nobs[[g]] - 1)) } } else if (estimator == "ML") { # no WLS.V here, since function of model-implied moments } else if (estimator %in% c("WLS", "DWLS", "ULS", "DLS")) { if (!categorical) { if (estimator == "WLS" || estimator == "DLS") { if (!fixed.x) { if (estimator != "DLS") { # Gamma should be po before we invert ev <- eigen(NACOV[[g]], # symmetric=FALSE, only.values = TRUE )$values if (is.complex(ev)) { lav_msg_stop(gettext( "Gamma (NACOV) matrix is not positive-definite")) } if (any(Re(ev) < 0)) { lav_msg_stop(gettext( "Gamma (NACOV) matrix is not positive-definite")) } } if (estimator == "DLS" && dls.GammaNT == "sample") { if (dls.a == 1.0) { # nothing to do, use GLS version } else { W.DLS <- (1 - dls.a) * NACOV[[g]] + dls.a * GammaNT WLS.V[[g]] <- lav_matrix_symmetric_inverse(W.DLS) } } else { # WLS WLS.V[[g]] <- lav_matrix_symmetric_inverse(NACOV[[g]]) } } else { # fixed.x: we have zero cols/rows # ginv does the trick, but perhaps this is overkill # just removing the zero rows/cols, invert, and # fill back in the zero rows/cols would do it # WLS.V[[g]] <- MASS::ginv(NACOV[[g]]) if (estimator == "DLS" && dls.GammaNT == "sample") { W.DLS <- (1 - dls.a) * NACOV[[g]] + dls.a * GammaNT WLS.V[[g]] <- lav_matrix_symmetric_inverse(W.DLS) } else { # WLS WLS.V[[g]] <- lav_matrix_symmetric_inverse(NACOV[[g]]) } } } else if (estimator == "DWLS") { dacov <- diag(NACOV[[g]]) if (!all(is.finite(dacov))) { lav_msg_stop(gettext( "diagonal of Gamma (NACOV) contains non finite values")) } if (fixed.x) { # structural zeroes! zero.idx <- which(dacov == 0.0) idacov <- 1 / dacov idacov[zero.idx] <- 0.0 } else { idacov <- 1 / dacov } WLS.V[[g]] <- diag(idacov, nrow = NROW(NACOV[[g]]), ncol = NCOL(NACOV[[g]]) ) WLS.VD[[g]] <- idacov } else if (estimator == "ULS") { # WLS.V[[g]] <- diag(length(WLS.obs[[g]])) WLS.VD[[g]] <- rep(1, length(WLS.obs[[g]])) } } else { if (estimator == "WLS") { WLS.V[[g]] <- inv.chol(CAT$WLS.W * nobs[[g]]) } else if (estimator == "DWLS") { dacov <- diag(CAT$WLS.W * nobs[[g]]) # WLS.V[[g]] <- diag(1/dacov, nrow=NROW(CAT$WLS.W), # ncol=NCOL(CAT$WLS.W)) WLS.VD[[g]] <- 1 / dacov } else if (estimator == "ULS") { # WLS.V[[g]] <- diag(length(WLS.obs[[g]])) WLS.VD[[g]] <- rep(1, length(WLS.obs[[g]])) } } } else if (estimator == "PML" || estimator == "FML") { # no WLS.V here } # group.w.free (only if categorical) if (group.w.free && categorical) { if (!is.null(WLS.V[[g]])) { # unweight!! a <- group.w[[g]] * sum(unlist(nobs)) / nobs[[g]] # always 1!!! # invert a <- 1 / a WLS.V[[g]] <- lav_matrix_bdiag(matrix(a, 1, 1), WLS.V[[g]]) } if (!is.null(WLS.VD[[g]])) { # unweight!! a <- group.w[[g]] * sum(unlist(nobs)) / nobs[[g]] # always 1!!! # invert a <- 1 / a WLS.VD[[g]] <- c(a, WLS.VD[[g]]) } } } } # ngroups # remove 'CAT', unless debug -- this is to save memory if (!lav_debug()) { CAT <- list() } # construct SampleStats object lavSampleStats <- new("lavSampleStats", # sample moments th = th, th.idx = th.idx, th.names = th.names, mean = mean, cov = cov, var = var, # residual (y | x) res.cov = res.cov, res.var = res.var, res.th = res.th, res.th.nox = res.th.nox, res.slopes = res.slopes, res.int = res.int, mean.x = mean.x, cov.x = cov.x, bifreq = bifreq, group.w = group.w, # convenience nobs = nobs, ntotal = sum(unlist(nobs)), ngroups = ngroups, x.idx = x.idx, # extra sample statistics icov = icov, cov.log.det = cov.log.det, res.icov = res.icov, res.cov.log.det = res.cov.log.det, ridge = ridge.eps, WLS.obs = WLS.obs, WLS.V = WLS.V, WLS.VD = WLS.VD, NACOV = NACOV, NACOV.user = NACOV.user, # cluster/levels YLp = YLp, # missingness missing.flag = missing.flag., missing = missing., missing.h1 = missing.h1., zero.cell.tables = zero.cell.tables ) # just a SINGLE warning if we have empty cells if ((!is.null(lavoptions$samplestats) && lavoptions$samplestats) && categorical && zero.cell.warn && any(sapply(zero.cell.tables, nrow) > 0L)) { nempty <- sum(sapply(zero.cell.tables, nrow)) lav_msg_warn(gettextf( "%s bivariate tables have empty cells; to see them, use: lavInspect(fit, \"zero.cell.tables\")", nempty) ) } lavSampleStats } lav_samplestats_from_moments <- function(sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, ov.names = NULL, # including x ov.names.x = NULL, WLS.V = NULL, NACOV = NULL, lavoptions = NULL) { # extract options estimator <- lavoptions$estimator mimic <- lavoptions$mimic meanstructure <- lavoptions$meanstructure correlation <- lavoptions$correlation group.w.free <- lavoptions$group.w.free ridge <- lavoptions$ridge rescale <- lavoptions$sample.cov.rescale # no multilevel yet nlevels <- 1L # ridge default if (ridge) { if (is.numeric(lavoptions$ridge.constant)) { ridge.eps <- lavoptions$ridge.constant } else { ridge.eps <- 1e-5 } } else { ridge.eps <- 0.0 } # new in 0.6-3: # check if sample.cov has attributes if conditional.x = TRUE sample.res.slopes <- attr(sample.cov, "res.slopes") sample.cov.x <- attr(sample.cov, "cov.x") sample.mean.x <- attr(sample.cov, "mean.x") if (!is.null(sample.res.slopes)) { conditional.x <- TRUE # strip attributes attr(sample.cov, "res.slopes") <- NULL attr(sample.cov, "cov.x") <- NULL attr(sample.cov, "mean.x") <- NULL # make list if (!is.list(sample.res.slopes)) { sample.res.slopes <- list(sample.res.slopes) } if (!is.list(sample.cov.x)) { sample.cov.x <- list(sample.cov.x) } if (!is.list(sample.mean.x)) { sample.mean.x <- list(sample.mean.x) } } else if (!is.null(sample.cov.x)) { conditional.x <- FALSE fixed.x <- TRUE # strip attributes attr(sample.cov, "cov.x") <- NULL attr(sample.cov, "mean.x") <- NULL # make list if (!is.list(sample.cov.x)) { sample.cov.x <- list(sample.cov.x) } if (!is.list(sample.mean.x)) { sample.mean.x <- list(sample.mean.x) } } else if (is.null(sample.cov.x) && length(unlist(ov.names.x)) > 0L) { # fixed.x = TRUE, but only joint sample.cov is provided conditional.x <- FALSE fixed.x <- TRUE # create sample.cov.x and sample.mean.x later... } else { conditional.x <- FALSE fixed.x <- FALSE } # matrix -> list if (!is.list(sample.cov)) { sample.cov <- list(sample.cov) } # number of groups ngroups <- length(sample.cov) # ov.names if (!is.list(ov.names)) { ov.names <- rep(list(ov.names), ngroups) } if (!is.list(ov.names.x)) { ov.names.x <- rep(list(ov.names.x), ngroups) } if (!is.null(sample.mean)) { meanstructure <- TRUE if (!is.list(sample.mean)) { # check if sample.mean is string (between single quotes) if (is.character(sample.mean)) { sample.mean <- char2num(sample.mean) } sample.mean <- list(unname(sample.mean)) } else { sample.mean <- lapply(lapply(sample.mean, unname), unclass) } } if (!is.null(sample.th)) { th.idx <- attr(sample.th, "th.idx") attr(sample.th, "th.idx") <- NULL if (is.null(th.idx)) { lav_msg_stop(gettext("sample.th should have a th.idx attribute")) } else { if (is.list(th.idx)) { th.names <- lapply(th.idx, names) th.idx <- lapply(lapply(th.idx, unname), unclass) } else { th.names <- list(names(th.idx)) th.idx <- list(unclass(unname(th.idx))) } } if (is.list(sample.th)) { # strip names and lavaan.vector class sample.th <- lapply(lapply(sample.th, unname), unclass) } else { # strip names and lavaan.vector class, make list sample.th <- list(unclass(unname(sample.th))) } } else { th.idx <- vector("list", length = ngroups) th.names <- vector("list", length = ngroups) } # sample statistics per group cov <- vector("list", length = ngroups) var <- vector("list", length = ngroups) mean <- vector("list", length = ngroups) th <- vector("list", length = ngroups) # th.idx <- vector("list", length = ngroups) # th.names <- vector("list", length = ngroups) # residual (y | x) res.cov <- vector("list", length = ngroups) res.var <- vector("list", length = ngroups) res.slopes <- vector("list", length = ngroups) res.int <- vector("list", length = ngroups) res.th <- vector("list", length = ngroups) res.th.nox <- vector("list", length = ngroups) # fixed.x / conditional.x mean.x <- vector("list", length = ngroups) cov.x <- vector("list", length = ngroups) bifreq <- vector("list", length = ngroups) # extra sample statistics per group icov <- vector("list", length = ngroups) cov.log.det <- vector("list", length = ngroups) res.icov <- vector("list", length = ngroups) res.cov.log.det <- vector("list", length = ngroups) WLS.obs <- vector("list", length = ngroups) missing. <- vector("list", length = ngroups) missing.h1. <- vector("list", length = ngroups) missing.flag. <- FALSE zero.cell.tables <- vector("list", length = ngroups) YLp <- vector("list", length = ngroups) # group weights group.w <- vector("list", length = ngroups) x.idx <- vector("list", length = ngroups) categorical <- FALSE if (!is.null(sample.th)) { categorical <- TRUE } WLS.VD <- vector("list", length = ngroups) if (is.null(WLS.V)) { WLS.V <- vector("list", length = ngroups) WLS.V.user <- FALSE } else { if (!is.list(WLS.V)) { if (ngroups == 1L) { WLS.V <- list(unclass(WLS.V)) } else { lav_msg_stop(gettextf("WLS.V argument should be a list of length %s", ngroups) ) } } else { if (length(WLS.V) != ngroups) { lav_msg_stop(gettextf( "WLS.V assumes %1$s groups; data contains %2$s groups", length(WLS.V), ngroups)) } WLS.V <- lapply(WLS.V, unclass) } # is WLS.V full? check first if (is.null(dim(WLS.V[[1]]))) { # we will assume it is the diagonal only WLS.VD <- WLS.V WLS.V <- lapply(WLS.VD, diag) } else { # create WLS.VD WLS.VD <- lapply(WLS.V, diag) # we could remove WLS.V to save space... } WLS.V.user <- TRUE # FIXME: check dimension of WLS.V!! } if (is.null(NACOV)) { NACOV <- vector("list", length = ngroups) NACOV.user <- FALSE } else { if (!is.list(NACOV)) { if (ngroups == 1L) { NACOV <- list(unclass(NACOV)) } else { lav_msg_stop(gettextf( "NACOV argument should be a list of length %s", ngroups)) } } else { if (length(NACOV) != ngroups) { lav_msg_stop(gettextf( "NACOV assumes %1$s groups; data contains %2$s groups", length(NACOV), ngroups)) } NACOV <- lapply(NACOV, unclass) } NACOV.user <- TRUE # FIXME: check dimension of NACOV!! } nobs <- as.list(as.integer(sample.nobs)) for (g in 1:ngroups) { # exogenous x? nexo <- length(ov.names.x[[g]]) if (nexo) { # two cases: ov.names contains 'x' variables, or not if (conditional.x) { # ov.names.x are NOT in ov.names x.idx[[g]] <- which(ov.names[[g]] %in% ov.names.x[[g]]) } else { if (fixed.x) { # ov.names.x are a subset of ov.names x.idx[[g]] <- match(ov.names.x[[g]], ov.names[[g]]) stopifnot(!anyNA(x.idx[[g]])) } else { x.idx[[g]] <- integer(0L) } } } else { x.idx[[g]] <- integer(0L) conditional.x <- FALSE fixed.x <- FALSE } # group weight group.w[[g]] <- nobs[[g]] / sum(unlist(nobs)) tmp.cov <- sample.cov[[g]] # make sure that the matrix is fully symmetric (NEEDED?) T <- t(tmp.cov) tmp.cov[upper.tri(tmp.cov)] <- T[upper.tri(T)] # check dimnames if (!is.null(rownames(tmp.cov))) { cov.names <- rownames(tmp.cov) } else if (!is.null(colnames(tmp.cov))) { cov.names <- colnames(tmp.cov) } else { lav_msg_stop(gettext( "please provide row/col names for the covariance matrix!")) } # extract only the part we need (using ov.names) if (conditional.x) { idx <- match(ov.names[[g]][-x.idx[[g]]], cov.names) } else { idx <- match(ov.names[[g]], cov.names) } if (any(is.na(idx))) { cat("found: ", cov.names, "\n") cat("expected: ", ov.names[[g]], "\n") lav_msg_stop(gettextf( "rownames of covariance matrix do not match the model! found: %1$s expected: %2$s", lav_msg_view(cov.names), lav_msg_view(ov.names[[g]]))) } else { tmp.cov <- tmp.cov[idx, idx, drop = FALSE] } # strip dimnames dimnames(tmp.cov) <- NULL if (is.null(sample.mean)) { # assume zero mean vector tmp.mean <- numeric(ncol(tmp.cov)) } else { # extract only the part we need tmp.mean <- unclass(sample.mean[[g]][idx]) } if (categorical) { # categorical + conditional.x = TRUE if (conditional.x) { th.g <- numeric(length(th.idx[[g]])) ord.idx <- which(th.idx[[g]] > 0) num.idx <- which(th.idx[[g]] == 0) if (length(ord.idx) > 0L) { th.g[ord.idx] <- sample.th[[g]] } if (length(num.idx) > 0L) { ord.var.idx <- unique(th.idx[[g]][th.idx[[g]] > 0]) th.g[num.idx] <- -1 * sample.mean[[g]][-ord.var.idx] } res.th[[g]] <- th.g res.th.nox[[g]] <- sample.th[[g]] res.cov[[g]] <- tmp.cov if (ridge) { diag(res.cov[[g]]) <- diag(res.cov[[g]]) + ridge.eps } res.var[[g]] <- diag(tmp.cov) res.int[[g]] <- tmp.mean res.slopes[[g]] <- unclass(unname(sample.res.slopes[[g]])) cov.x[[g]] <- unclass(unname(sample.cov.x[[g]])) mean.x[[g]] <- unclass(unname(sample.mean.x[[g]])) # th.idx and th.names are already ok # categorical + conditional.x = FALSE } else { th.g <- numeric(length(th.idx[[g]])) ord.idx <- which(th.idx[[g]] > 0) num.idx <- which(th.idx[[g]] == 0) if (length(ord.idx) > 0L) { th.g[ord.idx] <- sample.th[[g]] } if (length(num.idx) > 0L) { ord.var.idx <- unique(th.idx[[g]][th.idx[[g]] > 0]) th.g[num.idx] <- -1 * sample.mean[[g]][-ord.var.idx] } th[[g]] <- th.g cov[[g]] <- tmp.cov if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(tmp.cov) mean[[g]] <- tmp.mean # fixed.x? (needed?) if (fixed.x) { cov.x[[g]] <- unclass(unname(sample.cov.x[[g]])) mean.x[[g]] <- unclass(unname(sample.mean.x[[g]])) } # th, th.idx and th.names are already ok } # multilevel } else if (nlevels > 1L) { lav_msg_stop(gettext("multilevel + sample stats not ready yet")) # single level } else { # single-level + continuous + conditional.x = TRUE if (conditional.x) { res.cov[[g]] <- tmp.cov if (ridge) { diag(res.cov[[g]]) <- diag(res.cov[[g]]) + ridge.eps } res.var[[g]] <- diag(tmp.cov) res.int[[g]] <- tmp.mean res.slopes[[g]] <- unclass(unname(sample.res.slopes[[g]])) cov.x[[g]] <- unclass(unname(sample.cov.x[[g]])) mean.x[[g]] <- unclass(unname(sample.mean.x[[g]])) # no rescale! # icov and cov.log.det # if(lavoptions$sample.icov) { out <- lav_samplestats_icov( COV = res.cov[[g]], ridge = 1e-05, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) res.icov[[g]] <- out$icov res.cov.log.det[[g]] <- out$cov.log.det # } # continuous + conditional.x = FALSE } else { cov[[g]] <- tmp.cov mean[[g]] <- tmp.mean # rescale cov by (N-1)/N? if (rescale) { # we 'transform' the sample cov (divided by n-1) # to a sample cov divided by 'n' cov[[g]] <- ((nobs[[g]] - 1) / nobs[[g]]) * cov[[g]] } if (ridge) { diag(cov[[g]]) <- diag(cov[[g]]) + ridge.eps } var[[g]] <- diag(cov[[g]]) # icov and cov.log.det # if(lavoptions$sample.icov) { out <- lav_samplestats_icov( COV = cov[[g]], ridge = 1e-05, x.idx = x.idx[[g]], ngroups = ngroups, g = g ) icov[[g]] <- out$icov cov.log.det[[g]] <- out$cov.log.det # } # fixed.x? if (fixed.x) { if (is.null(sample.cov.x)) { cov.x[[g]] <- cov[[g]][x.idx[[g]], x.idx[[g]], drop = FALSE ] } else { cov.x[[g]] <- unclass(unname(sample.cov.x[[g]])) } if (is.null(sample.mean.x)) { mean.x[[g]] <- mean[[g]][x.idx[[g]]] } else { mean.x[[g]] <- unclass(unname(sample.mean.x[[g]])) } } } # correlation structure? if (correlation) { cov[[g]] <- cov2cor(cov[[g]]) var[[g]] <- rep(1, length(var[[g]])) if (conditional.x) { res.cov[[g]] <- cov2cor(res.cov[[g]]) res.var[[g]] <- rep(1, length(res.var[[g]])) cov.x[[g]] <- cov2cor(cov.x[[g]]) # FIXME: slopes? more? } } } # WLS.obs WLS.obs[[g]] <- lav_samplestats_wls_obs( mean.g = mean[[g]], cov.g = cov[[g]], var.g = var[[g]], th.g = th[[g]], th.idx.g = th.idx[[g]], res.int.g = res.int[[g]], res.cov.g = res.cov[[g]], res.var.g = res.var[[g]], res.th.g = res.th[[g]], res.slopes.g = res.slopes[[g]], group.w.g = log(nobs[[g]]), categorical = categorical, conditional.x = conditional.x, meanstructure = meanstructure, correlation = correlation, slopestructure = conditional.x, group.w.free = group.w.free ) # WLS.V if (!WLS.V.user) { if (estimator == "GLS") { # FIXME: in <0.5-21, we had # V11 <- icov[[g]] # if(mimic == "Mplus") { # is this a bug in Mplus? # V11 <- V11 * nobs[[g]]/(nobs[[g]]-1) # } if (correlation) { GammaNT <- lav_samplestats_cor_Gamma_NT( COV = cov[[g]], MEAN = mean[[g]], #rescale = FALSE, x.idx = x.idx[[g]], # not used yet fixed.x = fixed.x, # not used yet conditional.x = conditional.x, # not used yet meanstructure = meanstructure, # not used yet slopestructure = conditional.x # not used yet ) WLS.V[[g]] <- lav_matrix_symmetric_inverse(GammaNT) } else { WLS.V[[g]] <- lav_samplestats_Gamma_inverse_NT( ICOV = icov[[g]], COV = cov[[g]], MEAN = mean[[g]], rescale = FALSE, x.idx = x.idx[[g]], fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x ) } } else if (estimator == "ULS") { WLS.V[[g]] <- diag(length(WLS.obs[[g]])) WLS.VD[[g]] <- rep(1, length(WLS.obs[[g]])) } else if (estimator == "WLS" || estimator == "DWLS") { if (is.null(WLS.V[[g]])) { lav_msg_stop(gettext( "the (D)WLS estimator is only available with full data or with a user-provided WLS.V")) } } # group.w.free if (!is.null(WLS.V[[g]]) && group.w.free) { # FIXME!!! WLS.V[[g]] <- lav_matrix_bdiag(matrix(1, 1, 1), WLS.V[[g]]) } } } # ngroups # construct SampleStats object lavSampleStats <- new("lavSampleStats", # sample moments th = th, th.idx = th.idx, th.names = th.names, mean = mean, cov = cov, var = var, # residual (y | x) res.cov = res.cov, res.var = res.var, res.th = res.th, res.th.nox = res.th.nox, res.slopes = res.slopes, res.int = res.int, # fixed.x mean.x = mean.x, cov.x = cov.x, # other bifreq = bifreq, group.w = group.w, # convenience nobs = nobs, ntotal = sum(unlist(nobs)), ngroups = ngroups, x.idx = x.idx, # extra sample statistics icov = icov, cov.log.det = cov.log.det, res.icov = res.icov, res.cov.log.det = res.cov.log.det, ridge = ridge.eps, WLS.obs = WLS.obs, WLS.V = WLS.V, WLS.VD = WLS.VD, NACOV = NACOV, NACOV.user = NACOV.user, # cluster/level YLp = YLp, # missingness missing.flag = missing.flag., missing = missing., missing.h1 = missing.h1., zero.cell.tables = zero.cell.tables ) lavSampleStats } # compute sample statistics, per missing pattern lav_samplestats_missing_patterns <- function(Y = NULL, Mp = NULL, wt = NULL, Lp = NULL) { # coerce Y to matrix Y <- as.matrix(Y) # handle two-level data if (!is.null(Lp)) { Y.orig <- Y Z <- NULL if (length(Lp$between.idx[[2]]) > 0L) { Y <- Y[, -Lp$between.idx[[2]], drop = FALSE] z.idx <- which(!duplicated(Lp$cluster.idx[[2]])) Z <- Y.orig[z.idx, Lp$between.idx[[2]], drop = FALSE] } } if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y, sort.freq = FALSE, coverage = FALSE, Lp = Lp ) } Yp <- vector("list", length = Mp$npatterns) # fill in pattern statistics for (p in seq_len(Mp$npatterns)) { # extract raw data for these cases RAW <- Y[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] # more than one case if (Mp$freq[p] > 1L) { if (!is.null(wt)) { out <- stats::cov.wt(RAW, wt = wt[Mp$case.idx[[p]]], method = "ML" ) SY <- out$cov MY <- out$center } else { MY <- base::.colMeans(RAW, m = NROW(RAW), n = NCOL(RAW)) # SY <- crossprod(RAW)/Mp$freq[p] - tcrossprod(MY) # bad practice, better like this: SY <- lav_matrix_cov(RAW) } } # only a single observation (no need to weight!) else { SY <- 0 MY <- as.numeric(RAW) } if (!is.null(wt)) { FREQ <- sum(wt[Mp$case.idx[[p]]]) } else { FREQ <- Mp$freq[p] } # store sample statistics, var.idx and freq Yp[[p]] <- list( SY = SY, MY = MY, var.idx = Mp$pat[p, ], freq = FREQ ) # if clustered data, add rowsum over all cases per cluster if (!is.null(Lp)) { tmp <- rowsum.default(RAW, group = Mp$j.idx[[p]], reorder = FALSE) Yp[[p]]$ROWSUM <- tmp } } # add Zp as an attribute # if(!is.null(Lp)) { # Zp <- lav_samplestats_missing_patterns(Y = Z, Mp = Mp$Zp) # for(p in Mp$Zp$npatterns) { # this.z <- Z[Mp$Zp$case.idx[[p]], drop = FALSE] # Zp[[p]]$ROWSUM <- t(this.z) # # } # attr(Yp, "Zp") <- Zp # } Yp } # compute sample statistics, per cluster lav_samplestats_cluster_patterns <- function(Y = NULL, Lp = NULL, conditional.x = FALSE) { # coerce Y to matrix Y1 <- as.matrix(Y) N <- NROW(Y1) P <- NCOL(Y1) if (is.null(Lp)) { lav_msg_stop(gettext("Lp is NULL")) } # how many levels? nlevels <- length(Lp$cluster) + 1L # compute some sample statistics per level YLp <- vector("list", length = nlevels) for (l in 2:nlevels) { ncluster.sizes <- Lp$ncluster.sizes[[l]] cluster.size <- Lp$cluster.size[[l]] cluster.sizes <- Lp$cluster.sizes[[l]] nclusters <- Lp$nclusters[[l]] both.idx <- Lp$both.idx[[l]] within.idx <- Lp$within.idx[[l]] between.idx <- Lp$between.idx[[l]] cluster.idx <- Lp$cluster.idx[[l]] cluster.size.ns <- Lp$cluster.size.ns[[l]] # s <- (N^2 - sum(cluster.size^2)) / (N*(nclusters - 1L)) # same as s <- (N - sum(cluster.size^2) / N) / (nclusters - 1) # NOTE: must be (nclusters - 1), otherwise, s is not average cluster # size even in the balanced case Y1.means <- colMeans(Y1, na.rm = TRUE) Y1Y1 <- lav_matrix_crossprod(Y1) both.idx <- all.idx <- seq_len(P) if (length(within.idx) > 0L || length(between.idx) > 0L) { both.idx <- all.idx[-c(within.idx, between.idx)] # hm, this assumes the 'order' is the # same at both levels... } # cluster-means Y2 <- rowsum.default(Y1, group = cluster.idx, reorder = FALSE, na.rm = FALSE, # must be FALSE! ) / cluster.size Y2c <- t(t(Y2) - Y1.means) # compute S.w # center within variables by grand mean instead of group mean? # (YR: apparently not for S.PW) Y2a <- Y2 # if(length(within.idx) > 0L) { # for(i in 1:length(within.idx)) { # Y2a[, within.idx[i]] <- Y1.means[within.idx[i]] # } # } Y1a <- Y1 - Y2a[cluster.idx, , drop = FALSE] S.w <- lav_matrix_crossprod(Y1a) / (N - nclusters) # S.b # three parts: within/within, between/between, between/within # standard definition of the between variance matrix # divides by (nclusters - 1) S.b <- lav_matrix_crossprod(Y2c * cluster.size, Y2c) / (nclusters - 1) # check for zero variances if (length(both.idx) > 0L) { zero.idx <- which(diag(S.b)[both.idx] < 0.0001) if (length(zero.idx) > 0L && !anyNA(Y2)) { lav_msg_warn(gettext( "(near) zero variance at between level for splitted variable:"), paste(Lp$both.names[[l]][zero.idx], collapse = " ") ) } } S <- cov(Y1, use = "pairwise.complete.obs") * (N - 1L) / N # missing by design? S[is.na(S)] <- as.numeric(NA) # loglik.x # extract 'fixed' level-1 loglik from here wx.idx <- Lp$ov.x.idx[[1]] if (length(wx.idx) > 0L) { loglik.x.w <- lav_mvnorm_h1_loglik_samplestats( sample.nobs = Lp$nclusters[[1]], sample.cov = S[wx.idx, wx.idx, drop = FALSE] ) } else { loglik.x.w <- 0 } # extract 'fixed' level-2 loglik bx.idx <- Lp$ov.x.idx[[2]] if (length(bx.idx) > 0L) { COVB <- cov(Y2[, bx.idx, drop = FALSE]) * (nclusters - 1) / nclusters loglik.x.b <- lav_mvnorm_h1_loglik_samplestats( sample.nobs = Lp$nclusters[[2]], sample.cov = COVB ) } else { loglik.x.b <- 0 } loglik.x <- loglik.x.w + loglik.x.b S.PW.start <- S.w if (length(within.idx) > 0L) { S.PW.start[within.idx, within.idx] <- S[within.idx, within.idx, drop = FALSE] } if (length(between.idx) > 0L) { S.w[between.idx, ] <- 0 S.w[, between.idx] <- 0 S.PW.start[between.idx, ] <- 0 S.PW.start[, between.idx] <- 0 } if (length(between.idx) > 0L) { # this is what is needed for MUML: S.b[, between.idx] <- (s * nclusters / N) * S.b[, between.idx, drop = FALSE] S.b[between.idx, ] <- (s * nclusters / N) * S.b[between.idx, , drop = FALSE] S.b[between.idx, between.idx] <- (s * lav_matrix_crossprod( Y2c[, between.idx, drop = FALSE], Y2c[, between.idx, drop = FALSE] ) / nclusters) } Sigma.B <- (S.b - S.w) / s Sigma.B[within.idx, ] <- 0 Sigma.B[, within.idx] <- 0 # what if we have negative variances in Sigma.B? # this may happen if 'split' a variable that has no between variance zero.idx <- which(diag(Sigma.B) < 1e-10) if (length(zero.idx) > 0L) { Sigma.B[zero.idx, ] <- 0 Sigma.B[, zero.idx] <- 0 } Mu.W <- numeric(P) Mu.W[within.idx] <- Y1.means[within.idx] Mu.B <- Y1.means Mu.B[within.idx] <- 0 if (length(between.idx) > 0L) { # replace between.idx by cov(Y2)[,] elements... Mu.B[between.idx] <- colMeans(Y2[, between.idx, drop = FALSE], na.rm = TRUE ) S2 <- (cov(Y2, use = "pairwise.complete.obs") * (nclusters - 1L) / nclusters) Sigma.B[between.idx, between.idx] <- S2[between.idx, between.idx, drop = FALSE] } # FIXME: Mu.B not quite ok for (fixed.x) x variables if they # occur both at level 1 AND level 2 Mu.B.start <- Mu.B # Mu.B.start[both.idx] <- Mu.B.start[both.idx] - colMeans(Y2c[,both.idx]) # sample statistics PER CLUSTER-SIZE # summary statistics for complete data, conditional.x = FALSE # also needed for h1 (even if conditional.x = TRUE) cov.d <- vector("list", length = ncluster.sizes) mean.d <- vector("list", length = ncluster.sizes) for (clz in seq_len(ncluster.sizes)) { nj <- cluster.sizes[clz] # select clusters with this size d.idx <- which(cluster.size == nj) ns <- length(d.idx) # NOTE:!!!! # reorder columns # to match A.inv and m.k later on in objective!!! tmp2 <- Y2[d.idx, c(between.idx, sort.int(c(both.idx, within.idx))), drop = FALSE ] mean.d[[clz]] <- colMeans(tmp2, na.rm = TRUE) bad.idx <- which(!is.finite(mean.d[[clz]])) # if nrow = 1 + NA if (length(bad.idx) > 0L) { mean.d[[clz]][bad.idx] <- 0 # ugly, only for starting values } if (length(d.idx) > 1L) { if (any(is.na(tmp2))) { # if full column has NA, this will fail... # not needed anyway # out <- lav_mvnorm_missing_h1_estimate_moments(Y = tmp2, # max.iter = 10L) # cov.d[[clz]] <- out$Sigma cov.d[[clz]] <- 0 } else { cov.d[[clz]] <- (cov(tmp2, use = "complete.obs") * (ns - 1) / ns) } } else { cov.d[[clz]] <- 0 } } # clz # new in 0.6-12: # summary statistics for complete data, conditional.x = TRUE # ONLY for twolevel if (conditional.x) { within.x.idx <- Lp$within.x.idx[[1]] between.y.idx <- Lp$between.y.idx[[2]] between.x.idx <- Lp$between.x.idx[[2]] y1.idx <- Lp$ov.y.idx[[1]] x1.idx <- c(within.x.idx, between.x.idx) # in that order # data Y1.wb <- Y1[, y1.idx, drop = FALSE] Y2.wb <- Y2[, y1.idx, drop = FALSE] if (length(between.y.idx) > 0L) { Y2.z <- Y2[, between.y.idx, drop = FALSE] } if (length(x1.idx) > 0L) { EXO.wb1 <- cbind(1, Y1[, x1.idx, drop = FALSE]) EXO.wb2 <- cbind(1, Y2[, x1.idx, drop = FALSE]) } else { EXO.wb1 <- matrix(1, nrow(Y1), 1L) EXO.wb2 <- matrix(1, nrow(Y2), 1L) } # sample beta.wb (level 1) sample.wb <- solve(crossprod(EXO.wb1), crossprod(EXO.wb1, Y1.wb)) sample.yhat.wb1 <- EXO.wb1 %*% sample.wb sample.yres.wb1 <- Y1.wb - sample.yhat.wb1 sample.YYres.wb1 <- crossprod(sample.yres.wb1) sample.XX.wb1 <- crossprod(EXO.wb1) # sample beta.wb (level 2) XX.wb2 <- crossprod(EXO.wb2) sample.wb2 <- try(solve(XX.wb2, crossprod(EXO.wb2, Y2.wb)), silent = TRUE ) if (inherits(sample.wb2, "try-error")) { # this may happen if the covariate is cluster-centered # using the observed cluster means; then the 'means' will # be all (near) zero, and there is no variance sample.wb2 <- MASS::ginv(XX.wb2) %*% crossprod(EXO.wb2, Y2.wb) } sample.yhat.wb2 <- EXO.wb2 %*% sample.wb2 sample.yres.wb2 <- Y2.wb - sample.yhat.wb2 # weighted by cluster.size sample.YYres.wb2 <- crossprod( sample.yres.wb2, sample.yres.wb2 * cluster.size ) sample.YresX.wb2 <- crossprod( sample.yres.wb2, EXO.wb2 * cluster.size ) sample.XX.wb2 <- crossprod( EXO.wb2, EXO.wb2 * cluster.size ) sample.clz.Y2.res <- vector("list", ncluster.sizes) sample.clz.Y2.XX <- vector("list", ncluster.sizes) sample.clz.Y2.B <- vector("list", ncluster.sizes) if (length(between.y.idx) > 0L) { sample.clz.ZZ.res <- vector("list", ncluster.sizes) sample.clz.ZZ.XX <- vector("list", ncluster.sizes) sample.clz.ZZ.B <- vector("list", ncluster.sizes) sample.clz.YZ.res <- vector("list", ncluster.sizes) sample.clz.YZ.XX <- vector("list", ncluster.sizes) sample.clz.YresXZ <- vector("list", ncluster.sizes) sample.clz.XWZres <- vector("list", ncluster.sizes) } for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] nj.idx <- which(cluster.size == nj) # Y2 Y2.clz <- Y2[nj.idx, y1.idx, drop = FALSE] if (length(x1.idx) > 0L) { EXO2.clz <- cbind(1, Y2[nj.idx, x1.idx, drop = FALSE]) } else { EXO2.clz <- matrix(1, nrow(Y2.clz), 1L) } XX.clz <- crossprod(EXO2.clz) clz.Y2.B <- try(solve(XX.clz, crossprod(EXO2.clz, Y2.clz)), silent = TRUE ) if (inherits(clz.Y2.B, "try-error")) { clz.Y2.B <- MASS::ginv(XX.clz) %*% crossprod(EXO2.clz, Y2.clz) } clz.Y2.hat <- EXO2.clz %*% clz.Y2.B clz.Y2.res <- Y2.clz - clz.Y2.hat sample.clz.Y2.B[[clz]] <- clz.Y2.B sample.clz.Y2.res[[clz]] <- crossprod(clz.Y2.res) sample.clz.Y2.XX[[clz]] <- crossprod(EXO2.clz) # Z if (length(between.y.idx) > 0L) { Z.clz.z <- Y2[nj.idx, between.y.idx, drop = FALSE] if (length(between.x.idx) > 0L) { EXO.clz.z <- cbind( 1, Y2[nj.idx, between.x.idx, drop = FALSE] ) } else { EXO.clz.z <- matrix(1, nrow(Z.clz.z), 1L) } ZZ.clz <- crossprod(EXO.clz.z) clz.ZZ.B <- try( solve( ZZ.clz, crossprod(EXO.clz.z, Z.clz.z) ), silent = TRUE ) if (inherits(clz.ZZ.B, "try-error")) { clz.ZZ.B <- MASS::ginv(ZZ.clz) %*% crossprod(EXO.clz.z, Z.clz.z) } clz.Z.hat <- EXO.clz.z %*% clz.ZZ.B clz.Z.res <- Z.clz.z - clz.Z.hat sample.clz.ZZ.B[[clz]] <- clz.ZZ.B sample.clz.ZZ.res[[clz]] <- crossprod(clz.Z.res) sample.clz.ZZ.XX[[clz]] <- crossprod(EXO.clz.z) sample.clz.YZ.res[[clz]] <- crossprod(clz.Y2.res, clz.Z.res) sample.clz.YZ.XX[[clz]] <- crossprod(EXO2.clz, EXO.clz.z) sample.clz.YresXZ[[clz]] <- crossprod(clz.Y2.res, EXO.clz.z) sample.clz.XWZres[[clz]] <- crossprod(EXO2.clz, clz.Z.res) } } # clz } # conditional.x YLp[[l]] <- list( Y1Y1 = Y1Y1, Y2 = Y2, s = s, S.b = S.b, S.PW.start = S.PW.start, Sigma.W = S.w, Mu.W = Mu.W, Sigma.B = Sigma.B, Mu.B = Mu.B, Mu.B.start = Mu.B.start, loglik.x = loglik.x, mean.d = mean.d, cov.d = cov.d ) # if conditional, add more stuff if (conditional.x) { if (length(between.y.idx) > 0L) { extra <- list( sample.wb = sample.wb, sample.YYres.wb1 = sample.YYres.wb1, sample.XX.wb1 = sample.XX.wb1, sample.wb2 = sample.wb2, sample.YYres.wb2 = sample.YYres.wb2, sample.YresX.wb2 = sample.YresX.wb2, sample.XX.wb2 = sample.XX.wb2, sample.clz.Y2.res = sample.clz.Y2.res, sample.clz.Y2.XX = sample.clz.Y2.XX, sample.clz.Y2.B = sample.clz.Y2.B, sample.clz.ZZ.res = sample.clz.ZZ.res, sample.clz.ZZ.XX = sample.clz.ZZ.XX, sample.clz.ZZ.B = sample.clz.ZZ.B, sample.clz.YZ.res = sample.clz.YZ.res, sample.clz.YZ.XX = sample.clz.YZ.XX, sample.clz.YresXZ = sample.clz.YresXZ, # zero? sample.clz.XWZres = sample.clz.XWZres ) } else { extra <- list( sample.wb = sample.wb, sample.YYres.wb1 = sample.YYres.wb1, sample.XX.wb1 = sample.XX.wb1, sample.wb2 = sample.wb2, sample.YYres.wb2 = sample.YYres.wb2, sample.YresX.wb2 = sample.YresX.wb2, sample.XX.wb2 = sample.XX.wb2, sample.clz.Y2.res = sample.clz.Y2.res, sample.clz.Y2.XX = sample.clz.Y2.XX, sample.clz.Y2.B = sample.clz.Y2.B ) } YLp[[l]] <- c(YLp[[l]], extra) } } # l YLp } lavaan/R/ctr_pml_doubly_robust_utils.R0000644000176200001440000004202414627656441017715 0ustar liggesusers# This code was contributed by Myrsini Katsikatsou (LSE) -- September 2016 # # compute_uniCondProb_based_on_bivProb() # pairwiseExpProbVec_GivenObs() # LongVecTH.Rho.Generalised() # pairwiseExpProbVec_GivenObs_UncMod() compute_uniCondProb_based_on_bivProb <- function(bivProb, nvar, idx.pairs, idx.Y1, idx.Gy2, idx.cat.y1.split, idx.cat.y2.split) { bivProb.split <- split(bivProb, idx.pairs) lngth <- 2 * length(bivProb) idx.vec.el <- 1:lngth ProbY1Gy2 <- rep(NA, lngth) no.pairs <- nvar * (nvar - 1) / 2 idx2.pairs <- combn(nvar, 2) for (k in 1:no.pairs) { y2Sums <- tapply(bivProb.split[[k]], idx.cat.y2.split[[k]], sum) y2Sums.mult <- y2Sums[idx.cat.y2.split[[k]]] Y1Gy2 <- bivProb.split[[k]] / y2Sums.mult tmp.idx.vec.el <- idx.vec.el[(idx.Y1 == idx2.pairs[1, k]) & (idx.Gy2 == idx2.pairs[2, k])] ProbY1Gy2[tmp.idx.vec.el] <- Y1Gy2 } for (k in 1:no.pairs) { y1Sums <- tapply(bivProb.split[[k]], idx.cat.y1.split[[k]], sum) y1Sums.mult <- y1Sums[idx.cat.y1.split[[k]]] Y2Gy1 <- bivProb.split[[k]] / y1Sums.mult reordered_Y2Gy1 <- Y2Gy1[order(idx.cat.y1.split[[k]])] tmp.idx.vec.el <- idx.vec.el[(idx.Y1 == idx2.pairs[2, k]) & (idx.Gy2 == idx2.pairs[1, k])] ProbY1Gy2[tmp.idx.vec.el] <- reordered_Y2Gy1 } ProbY1Gy2 } # The input of the function is a lavobject, which, in turn, is the output of the # sem function having specified estimator="PML", missing="available.cases" # The output of the function is a list of two lists: the pairwiseProbGivObs list and # the univariateProbGivObs list. Each of the two lists consists of G matrices where G # is the number of groups in a multigroup analysis. If G=1 each of the lists # contains only one matrix that can be called as pairwiseProbGivObs[[1]], and # univariateProbGivObs[[1]]. # Each of the matrices in the pairwiseProbGivObs list is of dimension: nrow=sample size, # ncol=sum of the number of response categories for all pairs of variables # (i.e. the length of the vector pxixj.ab where i1, it is a list of G matrices # where G the number of groups and the matrices are fo dimension # nrow=sample size and ncol=number of items. # If lavobject@Data@ngroups=1 then yhat is a matrix. yhat <- lavPredict(object = lavobject, type = "yhat") # compute bivariate probabilities ngroups <- lavobject@Data@ngroups univariateProb <- vector("list", length = ngroups) pairwiseProb <- vector("list", length = ngroups) # save the indices of the Theta matrices for the groups stored in GLIST idx.ThetaMat <- which(names(lavobject@Model@GLIST) == "theta") for (g in seq_len(ngroups)) { # g<-1 if (ngroups > 1L) { yhat_group <- yhat[[g]] } else { yhat_group <- yhat } nsize <- lavobject@Data@nobs[[g]] nvar <- lavobject@Model@nvar[[g]] Data <- lavobject@Data@X[[g]] TH <- lavobject@Fit@TH[[g]] th.idx <- lavobject@Model@th.idx[[g]] Theta <- lavobject@Model@GLIST[idx.ThetaMat[g]]$theta error.stddev <- diag(Theta)^0.5 # for the computation of the univariate probabilities nlev <- lavobject@Data@ov$nlev idx.uniy <- rep(1:nvar, times = nlev) # indices vectors for the computation of bivariate probabilities idx.pairs.yiyj <- combn(1:nvar, 2) no_biv_resp_cat_yiyj <- sapply(1:ncol(idx.pairs.yiyj), function(x) { prod(nlev[idx.pairs.yiyj[, x]]) }) idx.y1 <- unlist( mapply(rep, idx.pairs.yiyj[1, ], each = no_biv_resp_cat_yiyj) ) idx.y2 <- unlist( mapply(rep, idx.pairs.yiyj[2, ], each = no_biv_resp_cat_yiyj) ) univariateProb[[g]] <- matrix(0, nrow = nsize, ncol = sum(nlev)) pairwiseProb[[g]] <- matrix(0, nrow = nsize, ncol = length(lavobject@Cache[[g]]$bifreq) ) idx.MissVar.casewise <- apply(Data, 1, function(x) { which(is.na(x)) }) for (i in 1:nsize) { idx.MissVar <- idx.MissVar.casewise[[i]] noMissVar <- length(idx.MissVar) if (noMissVar > 0L) { # compute the univariate probabilities TH.list <- split(TH, th.idx) tmp.TH <- TH.list[idx.MissVar] tmp.lowerTH <- unlist(lapply(tmp.TH, function(x) { c(-Inf, x) })) tmp.upperTH <- unlist(lapply(tmp.TH, function(x) { c(x, Inf) })) idx.items <- rep(c(1:noMissVar), times = nlev[idx.MissVar]) tmp.mean <- yhat_group[i, idx.MissVar] tmp.mean.extended <- tmp.mean[idx.items] tmp.stddev <- error.stddev[idx.MissVar] tmp.stddev.extended <- tmp.stddev[idx.items] tmp.uniProb <- pnorm((tmp.upperTH - tmp.mean.extended) / tmp.stddev.extended) - pnorm((tmp.lowerTH - tmp.mean.extended) / tmp.stddev.extended) idx.columnsUni <- which(idx.uniy %in% idx.MissVar) univariateProb[[g]][i, idx.columnsUni] <- tmp.uniProb # compute the bivariate probabilities if (noMissVar > 1L) { idx.pairsMiss <- combn(idx.MissVar, 2) no.pairs <- ncol(idx.pairsMiss) idx.pairsV2 <- combn(noMissVar, 2) idx.columns <- unlist(lapply(1:no.pairs, function(x) { which((idx.y1 == idx.pairsMiss[1, x]) & (idx.y2 == idx.pairsMiss[2, x])) })) if (all(Theta[t(idx.pairsMiss)] == 0)) { # items independence given eta tmp.uniProb.list <- split(tmp.uniProb, idx.items) pairwiseProb[[g]][i, idx.columns] <- unlist(lapply(1:no.pairs, function(x) { c(outer( tmp.uniProb.list[[idx.pairsV2[1, x]]], tmp.uniProb.list[[idx.pairsV2[2, x]]] )) })) } else { # when correlation between measurement errors tmp.th.idx <- th.idx[th.idx %in% idx.MissVar] # recode so that it is always 1,1,..,1, 2,...,2, etc. tmp.th.idx.recoded <- rep(c(1:noMissVar), times = table(tmp.th.idx)) tmp.TH <- TH[th.idx %in% idx.MissVar] tmp.ind.vec <- LongVecInd( no.x = noMissVar, all.thres = tmp.TH, index.var.of.thres = tmp.th.idx.recoded ) tmp.th.rho.vec <- LongVecTH.Rho.Generalised( no.x = noMissVar, TH = tmp.TH, th.idx = tmp.th.idx.recoded, cov.xixj = Theta[t(idx.pairsMiss)], mean.x = yhat_group[i, idx.MissVar], stddev.x = error.stddev[idx.MissVar] ) tmp.bivProb <- pairwiseExpProbVec( ind.vec = tmp.ind.vec, th.rho.vec = tmp.th.rho.vec ) pairwiseProb[[g]][i, idx.columns] <- tmp.bivProb } # end of else of if( all( Theta[t(idx.pairsMiss)]==0 ) ) # which checks item local independence } # end of if( noMissVar>1L ) # cat(i, "\n") } # end of if(noMissVar>0L) } # end of for(i in 1:nsize) } # end of for(g in seq_len(lavobject@Data@ngroups)) list( univariateProbGivObs = univariateProb, pairwiseProbGivObs = pairwiseProb ) } # end of the function pairwiseExpProbVec_GivenObs ################################################################## # LongVecTH.Rho.Generalised function is defined as follows LongVecTH.Rho.Generalised <- function(no.x, TH, th.idx, cov.xixj, mean.x, stddev.x) { all.std.thres <- (TH - mean.x[th.idx]) / stddev.x[th.idx] id.pairs <- utils::combn(no.x, 2) cor.xixj <- cov.xixj / (stddev.x[id.pairs[1, ]] * stddev.x[id.pairs[2, ]]) LongVecTH.Rho( no.x = no.x, all.thres = all.std.thres, index.var.of.thres = th.idx, rho.xixj = cor.xixj ) } # LongVecTH.Rho.Generalised is a generalisation of the function # lavaan:::LongVecTH.Rho . The latter assumes that all y* follow standard # normal so the thresholds are automatically the standardised ones. # LongVecTH.Rho.Generalised does not assume that, each of y*'s can follow # a normal distribution with mean mu and standard deviation sigma. # LongVecTH.Rho.Generalised has the following input arguments: # no.x (same as in lavaan:::LongVecTH.Rho), # TH (similar to the TH in lavaan:::LongVecTH.Rho but here they are the unstandardised thresholds, i.e. of the normal distribution with mean mu and standard deviation sigma) # th.idx (same as index.var.of.thres in lavaan:::LongVecTH.Rho) # cov.xixj which are the polychoric covariances of the pairs of underlying variables provided in a similar fashion as rho.xixj in lavaan:::LongVecTH.Rho) # mean.x is a vector including the means of y*'s provided in the order mean.x1, mean.x2, ...., mean.xp # stddev.x is a vector including the standard deviations of y*'s provided in the order stddev.x1, stddev.x2, ...., stddev.xp # The output of the new function is similar to that of lavaan:::LongVecTH.Rho############################################# # lavobject is the output of lavaan function where either the unconstrained # or a hypothesized model has been fitted pairwiseExpProbVec_GivenObs_UncMod <- function(lavobject) { ngroups <- lavobject@Data@ngroups TH <- lavobject@implied$th # these are the standardized thresholds # mean and variance of y* have been taken into account TH.IDX <- lavobject@SampleStats@th.idx Sigma.hat <- lavobject@implied$cov univariateProb <- vector("list", length = ngroups) pairwiseProb <- vector("list", length = ngroups) for (g in 1:ngroups) { Sigma.hat.g <- Sigma.hat[[g]] # is Sigma.hat always a correlation matrix? Cor.hat.g <- cov2cor(Sigma.hat.g) cors <- Cor.hat.g[lower.tri(Cor.hat.g)] if (any(abs(cors) > 1)) { lav_msg_warn(gettext( "some model-implied correlations are larger than 1.0")) } nvar <- nrow(Sigma.hat.g) MEAN <- rep(0, nvar) TH.g <- TH[[g]] th.idx.g <- TH.IDX[[g]] nlev <- lavobject@Data@ov$nlev # create index vector to keep track which variable each column of # univariateProb matrix refers to idx.uniy <- rep(1:nvar, times = nlev) # create index vector to keep track which variables each column of # pairwiseProb matrix refers to idx.pairs.yiyj <- combn(1:nvar, 2) no_biv_resp_cat_yiyj <- sapply(1:ncol(idx.pairs.yiyj), function(x) { prod(nlev[idx.pairs.yiyj[, x]]) }) idx.y1 <- unlist( mapply(rep, idx.pairs.yiyj[1, ], each = no_biv_resp_cat_yiyj) ) idx.y2 <- unlist( mapply(rep, idx.pairs.yiyj[2, ], each = no_biv_resp_cat_yiyj) ) Data <- lavobject@Data@X[[g]] nsize <- nrow(Data) # create the lists of matrices univariateProb[[g]] <- matrix(0, nrow = nsize, ncol = sum(nlev)) pairwiseProb[[g]] <- matrix(0, nrow = nsize, ncol = length(lavobject@Cache[[g]]$bifreq) ) idx.MissVar.casewise <- apply(Data, 1, function(x) { which(is.na(x)) }) for (i in 1:nsize) { idx.MissVar <- idx.MissVar.casewise[[i]] noMissVar <- length(idx.MissVar) if (noMissVar > 0L) { # compute the denominator of the conditional probability TH.VAR <- lapply(1:nvar, function(x) c(-Inf, TH.g[th.idx.g == x], +Inf)) lower <- sapply(1:nvar, function(x) TH.VAR[[x]][Data[i, x]]) upper <- sapply(1:nvar, function(x) TH.VAR[[x]][Data[i, x] + 1L]) lower.denom <- lower[-idx.MissVar] upper.denom <- upper[-idx.MissVar] MEAN.i <- MEAN[-idx.MissVar] Corhat.i <- Cor.hat.g[-idx.MissVar, -idx.MissVar, drop = FALSE] denom <- sadmvn(lower.denom, upper.denom, mean = MEAN.i, varcov = Corhat.i)[1] } # end of if( noMissVar>0L ) if (noMissVar == 1L) { # only univariate probabilities for one item # compute the numerator TH.MissVar <- c(-Inf, TH.g[th.idx.g == idx.MissVar], +Inf) # for all response categories of the missing item no.cat <- nlev[idx.MissVar] numer <- sapply(1:no.cat, function(x) { lower[idx.MissVar] <- TH.MissVar[x] upper[idx.MissVar] <- TH.MissVar[x + 1L] sadmvn(lower, upper, mean = MEAN, varcov = Cor.hat.g)[1] }) idx.columnsUni <- which(idx.uniy %in% idx.MissVar) univariateProb[[g]][i, idx.columnsUni] <- numer / denom } # end of if( noMissVar==1L ) if (noMissVar > 1L) { # compute the bivariate probabilities and based on them # calculate the univariate ones # form all possible pairs of items with missing values idx.pairsMiss <- combn(idx.MissVar, 2) no.pairs <- ncol(idx.pairsMiss) for (j in 1:no.pairs) { idx.Missy1y2 <- idx.pairsMiss[, j] idx.Missy1 <- idx.Missy1y2[1] idx.Missy2 <- idx.Missy1y2[2] idx.MissRestItems <- idx.MissVar[!(idx.MissVar %in% idx.Missy1y2)] TH.Missy1 <- c(-Inf, TH.g[th.idx.g == idx.Missy1], +Inf) TH.Missy2 <- c(-Inf, TH.g[th.idx.g == idx.Missy2], +Inf) no.cat.Missy1 <- nlev[idx.Missy1] no.cat.Missy2 <- nlev[idx.Missy2] no.bivRespCat <- no.cat.Missy1 * no.cat.Missy2 mat_bivRespCat <- matrix(1:no.bivRespCat, nrow = no.cat.Missy1, ncol = no.cat.Missy2 ) numer <- sapply(1:no.bivRespCat, function(x) { idx_y1_cat <- which(mat_bivRespCat == x, arr.ind = TRUE)[1] idx_y2_cat <- which(mat_bivRespCat == x, arr.ind = TRUE)[2] lower[idx.Missy1y2] <- c(TH.Missy1[idx_y1_cat], TH.Missy2[idx_y2_cat]) upper[idx.Missy1y2] <- c(TH.Missy1[idx_y1_cat + 1L], TH.Missy2[idx_y2_cat + 1L]) lower.tmp <- lower upper.tmp <- upper MEAN.tmp <- MEAN Cor.hat.g.tmp <- Cor.hat.g if (length(idx.MissRestItems) > 0) { lower.tmp <- lower[-idx.MissRestItems] upper.tmp <- upper[-idx.MissRestItems] MEAN.tmp <- MEAN[-idx.MissRestItems] Cor.hat.g.tmp <- Cor.hat.g[-idx.MissRestItems, -idx.MissRestItems] } sadmvn(lower.tmp, upper.tmp, mean = MEAN.tmp, varcov = Cor.hat.g.tmp )[1] }) idx.columns <- which((idx.y1 == idx.Missy1) & (idx.y2 == idx.Missy2)) tmp_biv <- numer / denom pairwiseProb[[g]][i, idx.columns] <- tmp_biv # compute the univariateProb based on the above bivariate # probabilities if (j == 1L) { univariateProb[[g]][i, which(idx.uniy %in% idx.Missy1)] <- apply(mat_bivRespCat, 1, function(x) { sum(tmp_biv[x]) }) univariateProb[[g]][i, which(idx.uniy %in% idx.Missy2)] <- apply(mat_bivRespCat, 2, function(x) { sum(tmp_biv[x]) }) } if (j > 1L & j < noMissVar) { univariateProb[[g]][i, which(idx.uniy %in% idx.Missy2)] <- apply(mat_bivRespCat, 2, function(x) { sum(tmp_biv[x]) }) } } # end of for(j in 1:no.pairs ) #no.pairs is that of missing items } # end of if( noMissVar>1L ) } # end of for(i in 1:nsize) } # end of for(g in 1:ngroups) list( univariateProbGivObs = univariateProb, pairwiseProbGivObs = pairwiseProb ) } # end of function lavaan/R/lav_lavaanList_methods.R0000644000176200001440000001413114627656441016542 0ustar liggesusers# methods setMethod( "show", "lavaanList", function(object) { # show only basic information lav_lavaanList_short_summary(object, print = TRUE) } ) lav_lavaanList_short_summary <- function(object, print = TRUE) { txt <- sprintf( "lavaanList (%s) -- based on %d datasets (%d converged)\n", object@version, object@meta$ndat, sum(object@meta$ok) ) if (print) { cat(txt) } invisible(txt) } setMethod( "summary", "lavaanList", function(object, header = TRUE, estimates = TRUE, print = TRUE, nd = 3L) { lav_lavaanList_summary(object, header = header, estimates = estimates, print = print, nd = nd ) } ) lav_lavaanList_summary <- function(object, header = TRUE, estimates = TRUE, est.bias = TRUE, se.bias = TRUE, zstat = TRUE, pvalue = TRUE, print = TRUE, nd = 3L) { out <- list() if (header) { out$header <- lav_lavaanList_short_summary(object, print = print) # if(print) { # # show only basic information # lav_lavaanList_short_summary(object) # } } if (print) { output <- "text" } else { output <- "data.frame" } if (estimates && "partable" %in% object@meta$store.slots) { pe <- parameterEstimates(object, se = FALSE, remove.system.eq = FALSE, remove.eq = FALSE, remove.ineq = FALSE, remove.def = FALSE, remove.nonfree = FALSE, remove.unused = FALSE, # zstat = FALSE, pvalue = FALSE, ci = FALSE, standardized = FALSE, output = output ) # scenario 1: simulation if (!is.null(object@meta$lavSimulate)) { pe$est.true <- object@meta$est.true nel <- length(pe$est.true) # EST EST <- lav_lavaanList_partable(object, what = "est", type = "all") AVE <- rowMeans(EST, na.rm = TRUE) # remove things like equality constraints if (length(AVE) > nel) { AVE <- AVE[seq_len(nel)] } pe$est.ave <- AVE if (est.bias) { pe$est.bias <- pe$est.ave - pe$est.true } # SE? if (se.bias) { SE.OBS <- apply(EST, 1L, sd, na.rm = TRUE) if (length(SE.OBS) > nel) { SE.OBS <- SE.OBS[seq_len(nel)] } pe$se.obs <- SE.OBS SE <- lav_lavaanList_partable(object, what = "se", type = "all") SE.AVE <- rowMeans(SE, na.rm = TRUE) if (length(SE.AVE) > nel) { SE.AVE <- SE.AVE[seq_len(nel)] } pe$se.ave <- SE.AVE pe$se.bias <- pe$se.ave - pe$se.obs } # scenario 2: bootstrap } else if (!is.null(object@meta$lavBootstrap)) { # print the average value for est EST <- lav_lavaanList_partable(object, what = "est", type = "all") pe$est.ave <- rowMeans(EST, na.rm = TRUE) # scenario 3: multiple imputation } else if (!is.null(object@meta$lavMultipleImputation)) { # pool est: take the mean EST <- lav_lavaanList_partable(object, what = "est", type = "all") m <- NCOL(EST) pe$est <- rowMeans(EST, na.rm = TRUE) # pool se # between-imputation variance # B.var <- apply(EST, 1L, var) est1 <- rowMeans(EST, na.rm = TRUE) est2 <- rowMeans(EST^2, na.rm = TRUE) B.var <- (est2 - est1 * est1) * m / (m - 1) # within-imputation variance SE <- lav_lavaanList_partable(object, what = "se", type = "all") W.var <- rowMeans(SE^2, na.rm = TRUE) # total variance: T.var = W.var + B.var + B.var/m pe$se <- sqrt(W.var + B.var + (B.var / m)) tmp.se <- ifelse(pe$se == 0.0, NA, pe$se) if (zstat) { pe$z <- pe$est / tmp.se if (pvalue) { pe$pvalue <- 2 * (1 - pnorm(abs(pe$z))) } } # scenario 4: multiple groups/sets } else if (!is.null(object@meta$lavMultipleGroups)) { # show individual estimates, for each group EST <- lav_lavaanList_partable(object, what = "est", type = "all") EST <- as.list(as.data.frame(EST)) ngroups <- length(EST) names(EST) <- object@meta$group.label ATTR <- attributes(pe) NAMES <- c(names(pe), names(EST)) pe <- c(pe, EST) attributes(pe) <- ATTR names(pe) <- NAMES } # scenario 5: just a bunch of fits, using different datasets else { # print the average value for est EST <- lav_lavaanList_partable(object, what = "est", type = "all") pe$est.ave <- rowMeans(EST, na.rm = TRUE) # more? } # remove ==,<,> rm.idx <- which(pe$op %in% c("==", "<", ">")) if (length(rm.idx) > 0L) { pe <- pe[-rm.idx, ] } out$pe <- pe if (print) { # print pe? print(pe, nd = nd) } } else { cat("available slots (per dataset) are:\n") print(object@meta$store.slots) } invisible(out) } setMethod( "coef", "lavaanList", function(object, type = "free", labels = TRUE) { lav_lavaanList_partable( object = object, what = "est", type = type, labels = labels ) } ) lav_lavaanList_partable <- function(object, what = "est", type = "free", labels = TRUE) { if ("partable" %in% object@meta$store.slots) { if (what %in% names(object@ParTableList[[1]])) { OUT <- sapply(object@ParTableList, "[[", what) } else { lav_msg_stop(gettextf( "column `%s' not found in the first element of the ParTableList slot.", what)) } } else { lav_msg_stop(gettext("no ParTable slot stored in lavaanList object")) } if (type == "user" || type == "all") { type <- "user" idx <- 1:length(object@ParTable$lhs) } else if (type == "free") { idx <- which(object@ParTable$free > 0L & !duplicated(object@ParTable$free)) } else { lav_msg_stop(gettext("argument `type' must be one of free or user")) } OUT <- OUT[idx, , drop = FALSE] if (labels) { rownames(OUT) <- lav_partable_labels(object@ParTable, type = type) } OUT } lavaan/R/lav_partable_utils.R0000644000176200001440000002700714627656440015740 0ustar liggesusers# what are the block values (not necessarily integers) lav_partable_block_values <- function(partable) { if (is.null(partable$block)) { block.values <- 1L } else { # always integers tmp <- partable$block[partable$block > 0L & # non-zero only !partable$op %in% c("==", "<", ">", ":=")] block.values <- unique(na.omit(tmp)) # could be, eg, '2' only } block.values } # guess number of blocks from a partable lav_partable_nblocks <- function(partable) { length(lav_partable_block_values(partable)) } # what are the group values (not necessarily integers) lav_partable_group_values <- function(partable) { # FLAT? if (any(partable$op == ":")) { colon.idx <- which(partable$op == ":" & tolower(partable$lhs) == "group") if (length(colon.idx) > 0L) { group.values <- unique(partable$rhs[colon.idx]) } # regular partable } else if (is.null(partable$group)) { group.values <- 1L } else if (is.numeric(partable$group)) { tmp <- partable$group[partable$group > 0L & !partable$op %in% c("==", "<", ">", ":=")] group.values <- unique(na.omit(tmp)) } else { # character tmp <- partable$group[nchar(partable$group) > 0L & !partable$op %in% c("==", "<", ">", ":=")] group.values <- unique(na.omit(tmp)) } group.values } # guess number of groups from a partable lav_partable_ngroups <- function(partable) { length(lav_partable_group_values(partable)) } # what are the level values (not necessarily integers) lav_partable_level_values <- function(partable) { # FLAT? if (any(partable$op == ":")) { colon.idx <- which(partable$op == ":" & tolower(partable$lhs) == "level") level.values <- integer(0L) if (length(colon.idx) > 0L) { level.values <- unique(partable$rhs[colon.idx]) } # regular partable } else if (is.null(partable$level)) { level.values <- 1L } else if (is.numeric(partable$level)) { tmp <- partable$level[partable$level > 0L & !partable$op %in% c("==", "<", ">", ":=")] level.values <- unique(na.omit(tmp)) } else { # character tmp <- partable$level[nchar(partable$level) > 0L & !partable$op %in% c("==", "<", ">", ":=")] level.values <- unique(na.omit(tmp)) } level.values } # guess number of levels from a partable lav_partable_nlevels <- function(partable) { length(lav_partable_level_values(partable)) } # efa sets values lav_partable_efa_values <- function(partable) { if (is.null(partable$efa)) { efa.values <- character(0L) } else { # should be character tmp.efa <- as.character(partable$efa) tmp <- tmp.efa[nchar(tmp.efa) > 0L & !partable$op %in% c("==", "<", ">", ":=")] efa.values <- unique(na.omit(tmp)) } efa.values } # number of efa sets from a partable lav_partable_nefa <- function(partable) { length(lav_partable_efa_values(partable)) } # number of sample statistics per block lav_partable_ndat <- function(partable) { # global meanstructure <- any(partable$op == "~1") fixed.x <- any(partable$exo > 0L & partable$free == 0L) conditional.x <- any(partable$exo > 0L & partable$op == "~") categorical <- any(partable$op == "|") correlation <- any(partable$op == "~*~") if (categorical) { meanstructure <- TRUE } # blocks nblocks <- lav_partable_nblocks(partable) nlevels <- lav_partable_nlevels(partable) ndat <- integer(nblocks) for (b in seq_len(nblocks)) { # how many observed variables in this block? if (conditional.x) { ov.names <- lav_partable_vnames(partable, "ov.nox", block = b) } else { ov.names <- lav_partable_vnames(partable, "ov", block = b) } nvar <- length(ov.names) # pstar pstar <- nvar * (nvar + 1) / 2 if (meanstructure) { pstar <- pstar + nvar # no meanstructure if within level, except ov.x which is not # decomposed if (nlevels > 1L && (b %% nlevels) == 1L) { # all zero pstar <- pstar - nvar # except within-only 'y' ov.names.y <- lav_partable_vnames(partable, "ov.nox", block = b) ov.names.y2 <- unlist(lav_partable_vnames(partable, "ov", block = seq_len(nblocks)[-b] )) ov.names.y <- ov.names.y[!ov.names.y %in% ov.names.y2] if (length(ov.names.y) > 0L) { pstar <- pstar + length(ov.names.y) } # except within-only 'x' (unless fixed.x) ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) ov.names.x2 <- unlist(lav_partable_vnames(partable, "ov", block = seq_len(nblocks)[-b] )) ov.names.x <- ov.names.x[!ov.names.x %in% ov.names.x2] if (!fixed.x && length(ov.names.x) > 0L) { pstar <- pstar + length(ov.names.x) } } } ndat[b] <- pstar # correction for fixed.x? if (!conditional.x && fixed.x) { ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) nvar.x <- length(ov.names.x) pstar.x <- nvar.x * (nvar.x + 1) / 2 if (meanstructure) { if (nlevels > 1L && (b %% nlevels) == 1L) { # do nothing, they are already removed } else { pstar.x <- pstar.x + nvar.x } } ndat[b] <- ndat[b] - pstar.x } # correction for ordinal data? if (categorical) { ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) nexo <- length(ov.names.x) ov.ord <- lav_partable_vnames(partable, "ov.ord", block = b) nvar.ord <- length(ov.ord) th <- lav_partable_vnames(partable, "th", block = b) nth <- length(th) # no variances ndat[b] <- ndat[b] - nvar.ord # no means ndat[b] <- ndat[b] - nvar.ord # but additional thresholds ndat[b] <- ndat[b] + nth # add slopes if (conditional.x) { ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) nexo <- length(ov.names.x) ndat[b] <- ndat[b] + (nvar * nexo) } } # correction for correlation not categorical if (correlation && !categorical) { ndat[b] <- ndat[b] - nvar } # correction for conditional.x not categorical if (conditional.x && !categorical) { ov.names.x <- lav_partable_vnames(partable, "ov.x", block = b) nexo <- length(ov.names.x) # add slopes ndat[b] <- ndat[b] + (nvar * nexo) } # correction for group proportions? group.idx <- which(partable$lhs == "group" & partable$op == "%" & partable$block == b) if (length(group.idx) > 0L) { # ndat <- ndat + (length(group.idx) - 1L) # G - 1 (sum to one) ndat[b] <- ndat[b] + 1L # poisson: each cell a parameter } } # blocks # sum over all blocks sum(ndat) } # total number of free parameters (ignoring equality constraints) lav_partable_npar <- function(partable) { # we only assume non-zero values npar <- length(which(partable$free > 0L)) npar } # global degrees of freedom: ndat - npar # ignoring constraints! (not very useful) # # we need to find the rank of con.jac to find the exact amount # of non-redundant equality constraints (this is done in lav_test.R) lav_partable_df <- function(partable) { npar <- lav_partable_npar(partable) ndat <- lav_partable_ndat(partable) # degrees of freedom df <- ndat - npar as.integer(df) } # check order of covariances: we only fill the upper.tri # therefore, we 'switch' lhs & rhs if they appear in the wrong order lav_partable_covariance_reorder <- function(partable, # nolint ov.names = NULL, lv.names = NULL) { # shortcut cov.idx <- which(partable$op == "~~" & partable$lhs != partable$rhs) if (length(cov.idx) == 0L) { # nothing to do return(partable) } # get names if (is.null(ov.names)) { ov.names <- lav_partable_vnames(partable, "ov") } else { ov.names <- unlist(ov.names) } if (is.null(lv.names)) { lv.names <- lav_partable_vnames(partable, "lv") # add random slopes (if any) if (!is.null(partable$rv) && any(nchar(partable$rv) > 0L)) { rv.names <- unique(partable$rv[nchar(partable$rv) > 0L]) lv.names <- c(lv.names, rv.names) } } else { lv.names <- unlist(lv.names) } lv.ov.names <- c(lv.names, ov.names) # identify wrong ordering lhs.idx <- match(partable$lhs[cov.idx], lv.ov.names) rhs.idx <- match(partable$rhs[cov.idx], lv.ov.names) swap.idx <- cov.idx[lhs.idx > rhs.idx] if (length(swap.idx) == 0L) { # nothing to do return(partable) } # swap! tmp <- partable$lhs[swap.idx] partable$lhs[swap.idx] <- partable$rhs[swap.idx] partable$rhs[swap.idx] <- tmp partable } # add a single parameter to an existing parameter table lav_partable_add <- function(partable = NULL, add = list()) { # treat partable as list, not as a data.frame partable <- as.list(partable) # number of elements nel <- length(partable$lhs) # add copy of last row for (c in seq_len(length(partable))) { if (is.integer(partable[[c]][[1]])) { if (partable[[c]][nel] == 0L) { partable[[c]][nel + 1] <- 0L } else if (partable[[c]][nel] == 1L) { partable[[c]][nel + 1] <- 1L } else { partable[[c]][nel + 1] <- partable[[c]][nel] + 1L } } else if (is.character(partable[[c]][[1]])) { partable[[c]][nel + 1] <- "" } else if (is.numeric(partable[[c]][[1]])) { partable[[c]][nel + 1] <- 0 } else { partable[[c]][nel + 1] <- partable[[c]][nel] } # replace if (names(partable)[c] %in% names(add)) { partable[[c]][nel + 1] <- add[[names(partable)[c]]] } } partable } # look for p2-row-idx of p1 elements # p1 is usually a subset of p2 # return NA if not found lav_partable_map_id_p1_in_p2 <- function(p1, p2, stopifnotfound = TRUE, exclude.nonpar = TRUE) { # check if we have a 'block' column (in both p1 and p2) if (is.null(p1$block)) { if (is.null(p1$group)) { p1$block <- rep.int(1L, length(p1$lhs)) } else { p1$block <- p1$group } } if (is.null(p2$block)) { if (is.null(p2$group)) { p2$block <- rep.int(1L, length(p2$lhs)) } else { p2$block <- p2$group } } # ALL rows from p1, or only 'parameters'? if (exclude.nonpar) { # get all parameters that have a '.p*' plabel # (they exclude "==", "<", ">", ":=") p1.idx <- which(grepl("\\.p", p1$plabel)) } else { # all of it # note: block should be '0' in both p1 and p2 p1.idx <- seq_len(length(p1$lhs)) } np1 <- length(p1.idx) # return p2.id p2.id <- integer(np1) # check every parameter in p1 for (i in seq_len(np1)) { # identify parameter in p1 lhs <- p1$lhs[i] op <- p1$op[i] rhs <- p1$rhs[i] block <- p1$block[i] # search for corresponding parameter in p2 p2.idx <- which(p2$lhs == lhs & p2$op == op & p2$rhs == rhs & p2$block == block) # found? if (length(p2.idx) == 0L) { if (stopifnotfound) { lav_msg_stop(gettext("parameter in p1 not found in p2:"), paste(lhs, op, rhs, "(block = ", block, ")", sep = " ") ) } else { p2.id[i] <- as.integer(NA) } } else { p2.id[i] <- p2.idx } } p2.id } lav_partable_da2ovda <- function(partable) { # convert handling of ov.order = "data" with "da-operator elements" # to "ovda attribute" if (any(partable$op == "da")) { da.idx <- which(partable$op == "da") ov.names.data <- partable$lhs[da.idx] temp <- lapply(partable, function(x) x[-da.idx]) # names(temp) <- names(partable) attr(temp, "ovda") <- ov.names.data return(temp) } return(partable) } lavaan/R/lav_matrix_rotate_methods.R0000644000176200001440000004405614627656441017337 0ustar liggesusers# various rotation criteria and their gradients # YR 05 April 2019: initial version # YR 14 June 2019: add more rotation criteria # references: # # Bernaards, C. A., & Jennrich, R. I. (2005). Gradient projection algorithms # and software for arbitrary rotation criteria in factor analysis. Educational # and Psychological Measurement, 65(5), 676-696. # old website: http://web.archive.org/web/20180708170331/http://www.stat.ucla.edu/research/gpa/splusfunctions.net # # Browne, M. W. (2001). An overview of analytic rotation in exploratory factor # analysis. Multivariate behavioral research, 36(1), 111-150. # # Mulaik, S. A. (2010). Foundations of factor analysis (Second Edition). # Boca Raton: Chapman and Hall/CRC. # Note: this is YR's implementation, not a copy of the GPArotation # package # # Why did I write my own functions (and not use the GPArotation): # - to better understand what is going on # - to have direct access to the gradient functions # - to avoid yet another dependency # - to simplify further experiments # Orthomax family (Harman, 1960) # # gamma = 0 -> quartimax # gamma = 1/2 -> biquartimax # gamma = 1/P -> equamax # gamma = 1 -> varimax # lav_matrix_rotate_orthomax <- function(LAMBDA = NULL, orthomax.gamma = 1, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA # center L2 column-wise cL2 <- t(t(L2) - orthomax.gamma * colMeans(L2)) out <- -1 * sum(L2 * cL2) / 4 if (grad) { attr(out, "grad") <- -1 * LAMBDA * cL2 } out } # Crawford-Ferguson (1970) family # # combine penalization for 1) row complexity, and 2) column complexity # if combined with orthogonal rotation, this is equivalent to the # orthomax family: # # quartimax -> gamma = 0 (only row complexity) # varimax -> gamma = 1/nrow # equamax -> gamma = ncol/(2*nrow) # parsimax -> gamma = (ncol - 1)/(nrow + ncol - 2) # factor parsimony -> gamma = 1 (only column complexity) # # the Crawford-Ferguson family is also equivalent to the oblimin family # if the latter is restricted to orthogonal rotation # lav_matrix_rotate_cf <- function(LAMBDA = NULL, cf.gamma = 0, ..., grad = FALSE) { # check if gamma is between 0 and 1? nRow <- nrow(LAMBDA) nCol <- ncol(LAMBDA) ROW1 <- matrix(1.0, nCol, nCol) diag(ROW1) <- 0.0 COL1 <- matrix(1.0, nRow, nRow) diag(COL1) <- 0.0 L2 <- LAMBDA * LAMBDA LR <- L2 %*% ROW1 LC <- COL1 %*% L2 f1 <- sum(L2 * LR) / 4 f2 <- sum(L2 * LC) / 4 out <- (1 - cf.gamma) * f1 + cf.gamma * f2 if (grad) { attr(out, "grad") <- ((1 - cf.gamma) * LAMBDA * LR) + (cf.gamma * LAMBDA * LC) } out } # Oblimin family (Carroll, 1960; Harman, 1976) # # quartimin -> gamma = 0 # biquartimin -> gamma = 1/2 # covarimin -> gamma = 1 # # if combined with orthogonal rotation, this is equivalent to the # orthomax family (they have the same optimizers): # # gamma = 0 -> quartimax # gamma = 1/2 -> biquartimax # gamma = 1 -> varimax # gamma = P/2 -> equamax # lav_matrix_rotate_oblimin <- function(LAMBDA = NULL, oblimin.gamma = 0, ..., grad = FALSE) { nRow <- nrow(LAMBDA) nCol <- ncol(LAMBDA) ROW1 <- matrix(1.0, nCol, nCol) diag(ROW1) <- 0.0 L2 <- LAMBDA * LAMBDA LR <- L2 %*% ROW1 Jp <- matrix(1, nRow, nRow) / nRow # see Jennrich (2002, p. 11) tmp <- (diag(nRow) - oblimin.gamma * Jp) %*% LR # same as t( t(L2) - gamma * colMeans(L2) ) %*% ROW1 out <- sum(L2 * tmp) / 4 if (grad) { attr(out, "grad") <- LAMBDA * tmp } out } # quartimax criterion # Carroll (1953); Saunders (1953) Neuhaus & Wrigley (1954); Ferguson (1954) # we use here the equivalent 'Ferguson, 1954' variant # (See Mulaik 2010, p. 303) lav_matrix_rotate_quartimax <- function(LAMBDA = NULL, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA out <- -1 * sum(L2 * L2) / 4 if (grad) { attr(out, "grad") <- -1 * LAMBDA * L2 } out } # varimax criterion # Kaiser (1958, 1959) # # special case of the Orthomax family (Harman, 1960), where gamma = 1 # see Jennrich (2001, p. 296) lav_matrix_rotate_varimax <- function(LAMBDA = NULL, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA # center L2 column-wise cL2 <- t(t(L2) - colMeans(L2)) out <- -1 * abs(sum(L2 * cL2)) / 4 # abs needed? if (grad) { attr(out, "grad") <- -1 * LAMBDA * cL2 } out } # quartimin criterion (part of Carroll's oblimin family lav_matrix_rotate_quartimin <- function(LAMBDA = NULL, ..., grad = FALSE) { nCol <- ncol(LAMBDA) ROW1 <- matrix(1.0, nCol, nCol) diag(ROW1) <- 0.0 L2 <- LAMBDA * LAMBDA LR <- L2 %*% ROW1 out <- sum(L2 * LR) / 4 if (grad) { attr(out, "grad") <- LAMBDA * LR } out } # Browne's (2001) version of Yates (1984) geomin criterion # # we use the exp/log trick as in Bernaard & Jennrich (2005, p. 687) lav_matrix_rotate_geomin <- function(LAMBDA = NULL, geomin.epsilon = 0.01, ..., grad = FALSE) { nCol <- ncol(LAMBDA) L2 <- LAMBDA * LAMBDA L2 <- L2 + geomin.epsilon if (geomin.epsilon < sqrt(.Machine$double.eps)) { # Yates's original formula tmp <- apply(L2, 1, prod)^(1 / nCol) } else { tmp <- exp(rowSums(log(L2)) / nCol) } out <- sum(tmp) if (grad) { attr(out, "grad") <- (2 / nCol) * LAMBDA / L2 * tmp } out } # simple entropy # seems to only work for orthogonal rotation lav_matrix_rotate_entropy <- function(LAMBDA = NULL, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA # handle zero elements -> replace by '1', so log(1) == 0 L2[L2 == 0] <- 1 out <- -1 * sum(L2 * log(L2)) / 2 if (grad) { attr(out, "grad") <- -LAMBDA * log(L2) - LAMBDA } out } # McCammon's (1966) Minimum Entropy Criterion # # for p-vector x, where x > 0 and sum(x) = 1, we have # - entropy(x) == 0, if there is only one 1, and all zeroes # - entropy(x) == max == log(p) if all elements are 1/p # - entropy(x) is similar as complexity(x), but also measure of equality # of elements of x # # works only ok with orthogonal rotation! lav_matrix_rotate_mccammon <- function(LAMBDA = NULL, ..., grad = FALSE) { nCol <- ncol(LAMBDA) nRow <- nrow(LAMBDA) L2 <- LAMBDA * LAMBDA # entropy function (Browne, 2001, eq 9) f_entropy <- function(x) { -1 * sum(ifelse(x > 0, x * log(x), 0)) } # sums of rows/columns/all sumi. <- rowSums(L2) sum.j <- colSums(L2) sum.. <- sum(L2) Q1 <- f_entropy(t(L2) / sum.j) # encouraging columns with few large, # and many small elements Q2 <- f_entropy(sum.j / sum..) # encouraging equal column sums # minimize out <- log(Q1) - log(Q2) if (grad) { # See Bernaards and Jennrich 2005 page 685+686 H <- -(log(t(t(L2) / sum.j)) + 1) G1 <- t(t(H) / sum.j - rowSums(t(L2 * H) / (sum.j * sum.j))) h <- -(log(sum.j / sum..) + 1) alpha <- as.numeric(h %*% sum.j) / (sum.. * sum..) # paper divides by # sum.., not sum..^2?? G2 <- matrix(h / sum.. - alpha, nRow, nCol, byrow = TRUE) attr(out, "grad") <- 2 * LAMBDA * (G1 / Q1 - G2 / Q2) } out } # Infomax # McKeon (1968, unpublished) and Browne (2001) # Treat LAMBDA^2 as a contingency table, and use simplicity function based # on tests for association; most effective was LRT for association # (see Agresti, 1990, eq 3.13) which is maximized for max simplicity # # McKeon: criterion may be regarded as a measure of information about row # categories conveyed by column categories (and vice versa); hence infomax # - favors perfect cluster # - discourages general factor # - both for orthogonal and oblique rotation # # Note: typo in Browne (2001), see last paragraph of Bernaards and # Jennrich (2005) page 684 lav_matrix_rotate_infomax <- function(LAMBDA = NULL, ..., grad = FALSE) { nCol <- ncol(LAMBDA) nRow <- nrow(LAMBDA) L2 <- LAMBDA * LAMBDA # entropy function (Browne, 2001, eq 9) f_entropy <- function(x) { -1 * sum(ifelse(x > 0, x * log(x), 0)) } # sums of rows/columns/all sumi. <- rowSums(L2) sum.j <- colSums(L2) sum.. <- sum(L2) Q1 <- f_entropy(L2 / sum..) # Bernaards & Jennrich version!! (Browne # divides by sum.j, like in McCammon) Q2 <- f_entropy(sum.j / sum..) Q3 <- f_entropy(sumi. / sum..) # minimize out <- log(nCol) + Q1 - Q2 - Q3 if (grad) { H <- -(log(L2 / sum..) + 1) alpha <- sum(L2 * H) / (sum.. * sum..) G1 <- H / sum.. - alpha hj <- -(log(sum.j / sum..) + 1) alphaj <- as.numeric(hj %*% sum.j) / (sum.. * sum..) G2 <- matrix(hj, nRow, nCol, byrow = TRUE) / sum.. - alphaj hi <- -(log(sumi. / sum..) + 1) alphai <- as.numeric(sumi. %*% hi) / (sum.. * sum..) G3 <- matrix(hi, nRow, nCol) / sum.. - alphai attr(out, "grad") <- 2 * LAMBDA * (G1 - G2 - G3) } out } # oblimax # Harman, 1976; Saunders, 1961 # # for orthogonal rotation, oblimax is equivalent to quartimax lav_matrix_rotate_oblimax <- function(LAMBDA = NULL, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA # minimize version out <- -log(sum(L2 * L2)) + 2 * log(sum(L2)) if (grad) { attr(out, "grad") <- (-4 * L2 * LAMBDA / (sum(L2 * L2)) + 4 * LAMBDA / (sum(L2))) } out } # Bentler's Invariant Pattern Simplicity # Bentler (1977) # # lav_matrix_rotate_bentler <- function(LAMBDA = NULL, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA L2tL2 <- crossprod(L2) L2tL2.inv <- lav_matrix_symmetric_inverse(S = L2tL2, logdet = TRUE) L2tL2.logdet <- attr(L2tL2.inv, "logdet") DIag <- diag(L2tL2) DIag.inv <- diag(1 / DIag) DIag.logdet <- sum(log(DIag)) # add small constant? # minimize version out <- -(L2tL2.logdet - DIag.logdet) / 4 if (grad) { attr(out, "grad") <- -LAMBDA * (L2 %*% (L2tL2.inv - DIag.inv)) } out } # The Tandem criteria # Comrey (1967) # # only for sequential use: # - tandem1 is used to determine the number of factors # (it removes the minor factors) # - tandomII is used for final rotation # lav_matrix_rotate_tandem1 <- function(LAMBDA, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA LL <- tcrossprod(LAMBDA) LL2 <- LL * LL # minimize version out <- -1 * sum(L2 * (LL2 %*% L2)) if (grad) { tmp1 <- 4 * LAMBDA * (LL2 %*% L2) tmp2 <- 4 * (LL * (L2 %*% t(L2))) %*% LAMBDA attr(out, "grad") <- -tmp1 - tmp2 } out } lav_matrix_rotate_tandem2 <- function(LAMBDA, ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA LL <- tcrossprod(LAMBDA) LL2 <- LL * LL # minimize version out <- sum(L2 * ((1 - LL2) %*% L2)) if (grad) { tmp1 <- 4 * LAMBDA * ((1 - LL2) %*% L2) tmp2 <- 4 * (LL * tcrossprod(L2, L2)) %*% LAMBDA attr(out, "grad") <- tmp1 - tmp2 } out } # simplimax # Kiers (1994) # # oblique rotation method # designed to rotate so that a given number 'k' of small loadings are # as close to zero as possible # # may be viewed as partially specified target rotation with # dynamically chosen weights # lav_matrix_rotate_simplimax <- function(LAMBDA = NULL, k = nrow(LAMBDA), ..., grad = FALSE) { L2 <- LAMBDA * LAMBDA # 'k' smallest element of L2 small.element <- sort(L2)[k] # which elements are smaller than (or equal than) 'small.element'? ID <- sign(L2 <= small.element) # minimize version out <- sum(L2 * ID) if (grad) { attr(out, "grad") <- 2 * ID * LAMBDA } out } # target rotation # Harman, 1976 # # LAMBDA is rotated toward a specified target matrix 'target' # # Note: 'target' must be fully specified; if there are any NAs # use lav_matrix_rotate_pst() instead # lav_matrix_rotate_target <- function(LAMBDA = NULL, target = NULL, ..., grad = FALSE) { # squared difference DIFF <- LAMBDA - target DIFF2 <- DIFF * DIFF out <- sum(DIFF2, na.rm = TRUE) if (grad) { tmp <- 2 * DIFF # change NAs to zero tmp[is.na(tmp)] <- 0 attr(out, "grad") <- tmp } out } # partially specified target rotation # # Browne 1972a, 1972b # # a pre-specified weight matrix W with ones/zeroes determines # which elements of (LAMBDA - target) are used by the rotation criterion # # if 'target' contains NAs, they should correspond to '0' values in the # target.mask matrix # lav_matrix_rotate_pst <- function(LAMBDA = NULL, target = NULL, target.mask = NULL, ..., grad = FALSE) { # mask target+LAMBDA target <- target.mask * target LAMBDA <- target.mask * LAMBDA # squared difference DIFF <- LAMBDA - target DIFF2 <- DIFF * DIFF # minimize out <- sum(DIFF2, na.rm = TRUE) if (grad) { tmp <- 2 * DIFF # change NAs to zero tmp[is.na(tmp)] <- 0 attr(out, "grad") <- tmp } out } # bi-quartimin # # Jennrich & Bentler 2011 # lav_matrix_rotate_biquartimin <- function(LAMBDA, ..., grad = FALSE) { # see Matlab code page 549 stopifnot(ncol(LAMBDA) > 1L) # remove first column LAMBDA.group <- LAMBDA[, -1, drop = FALSE] # apply quartimin on the 'group' part out <- lav_matrix_rotate_quartimin(LAMBDA.group, ..., grad = grad) if (grad) { tmp <- attr(out, "grad") attr(out, "grad") <- cbind(0, tmp) } out } # bi-geomin # # Jennrich & Bentler 2012 # lav_matrix_rotate_bigeomin <- function(LAMBDA, geomin.epsilon = 0.01, ..., grad = FALSE) { stopifnot(ncol(LAMBDA) > 1L) # remove first column LAMBDA.group <- LAMBDA[, -1, drop = FALSE] # apply geomin on the 'group' part out <- lav_matrix_rotate_geomin(LAMBDA.group, geomin.epsilon = geomin.epsilon, ..., grad = grad ) if (grad) { tmp <- attr(out, "grad") attr(out, "grad") <- cbind(0, tmp) } out } # gradient check ilav_matrix_rotate_grad_test <- function(crit = NULL, ..., LAMBDA = NULL, nRow = 20L, nCol = 5L) { # test matrix if (is.null(LAMBDA)) { LAMBDA <- matrix(rnorm(nRow * nCol), nRow, nCol) } ff <- function(x, ...) { Lambda <- matrix(x, nRow, nCol) crit(Lambda, ..., grad = FALSE) } GQ1 <- matrix( numDeriv::grad(func = ff, x = as.vector(LAMBDA), ...), nRow, nCol ) GQ2 <- attr(crit(LAMBDA, ..., grad = TRUE), "grad") if (lav_verbose()) { print(list(LAMBDA = LAMBDA, GQ1 = GQ1, GQ2 = GQ2)) } all.equal(GQ1, GQ2, tolerance = 1e-07) } ilav_matrix_rotate_grad_test_all <- function() { # Orthomax family with various values for gamma for (gamma in seq(0, 1, 0.2)) { check <- ilav_matrix_rotate_grad_test( crit = lav_matrix_rotate_orthomax, gamma = gamma ) if (is.logical(check) && check) { cat( "orthomax + gamma = ", sprintf("%3.1f", gamma), ": OK\n" ) } else { cat( "orthomax + gamma = ", sprintf("%3.1f", gamma), ": FAILED\n" ) } } # Crawford-Ferguson with various values for gamma for (gamma in seq(0, 1, 0.2)) { check <- ilav_matrix_rotate_grad_test( crit = lav_matrix_rotate_cf, gamma = gamma ) if (is.logical(check) && check) { cat( "Crawford-Ferguson + gamma = ", sprintf("%3.1f", gamma), ": OK\n" ) } else { cat( "Crawford-Ferguson + gamma = ", sprintf("%3.1f", gamma), ": FAILED\n" ) } } # Oblimin family with various values for gamma for (gamma in seq(0, 1, 0.2)) { check <- ilav_matrix_rotate_grad_test( crit = lav_matrix_rotate_oblimin, gamma = gamma ) if (is.logical(check) && check) { cat( "Oblimin + gamma = ", sprintf("%3.1f", gamma), ": OK\n" ) } else { cat( "Oblimin + gamma = ", sprintf("%3.1f", gamma), ": FAILED\n" ) } } # quartimax check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_quartimax) if (is.logical(check) && check) { cat("quartimax: OK\n") } else { cat("quartimax: FAILED\n") } # varimax check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_varimax) if (is.logical(check) && check) { cat("varimax: OK\n") } else { cat("varimax: FAILED\n") } # quartimin check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_quartimin) if (is.logical(check) && check) { cat("quartimin: OK\n") } else { cat("quartimin: FAILED\n") } # geomin check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_geomin) if (is.logical(check) && check) { cat("geomin: OK\n") } else { cat("geomin: FAILED\n") } # simple entropy check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_entropy) if (is.logical(check) && check) { cat("entropy: OK\n") } else { cat("entropy: FAILED\n") } # McCammon entropy criterion check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_mccammon) if (is.logical(check) && check) { cat("McCammon: OK\n") } else { cat("McCammon: FAILED\n") } # infomax check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_infomax) if (is.logical(check) && check) { cat("infomax: OK\n") } else { cat("infomax: FAILED\n") } # oblimax check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_oblimax) if (is.logical(check) && check) { cat("oblimax: OK\n") } else { cat("oblimax: FAILED\n") } # bentler check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_bentler) if (is.logical(check) && check) { cat("bentler: OK\n") } else { cat("bentler: FAILED\n") } # simplimax check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_simplimax) if (is.logical(check) && check) { cat("simplimax: OK\n") } else { cat("simplimax: FAILED\n") } # tandem1 check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_tandem1) if (is.logical(check) && check) { cat("tandem1: OK\n") } else { cat("tandem1: FAILED\n") } # tandem2 check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_tandem2) if (is.logical(check) && check) { cat("tandem2: OK\n") } else { cat("tandem2: FAILED\n") } # bi-quartimin check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_biquartimin) if (is.logical(check) && check) { cat("biquartimin: OK\n") } else { cat("biquartimin: FAILED\n") } # bi-quartimin check <- ilav_matrix_rotate_grad_test(crit = lav_matrix_rotate_bigeomin) if (is.logical(check) && check) { cat("bigeomin: OK\n") } else { cat("bigeomin: FAILED\n") } } lavaan/R/lav_tables_mvb.R0000644000176200001440000000613114627656441015040 0ustar liggesusers# tools for the multivariate Bernoulli distribution # # see: # # Maydeu-Olivares & Joe (2005). Limited- and Full-Information Estimation and # Goodness-of-Fit Testing in 2^n Contingency Tables: A Unified Framework. # Journal of the American Statistical Association, 100, 1009--1020. # YR. 15 April 2014 -- first version # compute higher-order joint moments (Teugels 1991) # PROP must be an array, with dim = rep(2L, nitems) lav_tables_mvb_getPiDot <- function(PROP, order. = nitems) { # number of items/dimensions nitems <- length(dim(PROP)) # compute 'pi dot' up to order = order. pidot <- unlist( lapply(1:order., function(Order) { IDX <- utils::combn(1:nitems, Order) tmp <- apply(IDX, 2L, function(idx) { as.numeric(apply(PROP, idx, sum))[1L] }) tmp }) ) pidot } # compute 'T' matrix, so that pidot = T %*% prop lav_tables_mvb_getT <- function(nitems = 3L, order. = nitems, rbind. = FALSE) { # index matrix INDEX <- array(1:(2^nitems), dim = rep(2L, nitems)) T.r <- lapply(1:order., function(Order) { IDX <- utils::combn(1:nitems, Order) TT <- matrix(0L, ncol(IDX), 2^nitems) TT <- do.call( "rbind", lapply(1:ncol(IDX), function(i) { TRue <- as.list(rep(TRUE, nitems)) TRue[IDX[, i]] <- 1L ARGS <- c(list(INDEX), TRue) T1 <- integer(2^nitems) T1[as.vector(do.call("[", ARGS))] <- 1L T1 }) ) TT }) if (rbind.) { T.r <- do.call("rbind", T.r) } T.r } # simple test function to check that pidot = T %*% prop lav_tables_mvb_test <- function(nitems = 3L) { freq <- sample(5:50, 2^nitems, replace = TRUE) prop <- freq / sum(freq) TABLE <- array(freq, dim = rep(2, nitems)) PROP <- array(prop, dim = rep(2, nitems)) # note: freq is always as.numeric(TABLE) # prop is always as.numeric(PROP) pidot <- lav_tables_mvb_getPiDot(PROP) T.r <- lav_tables_mvb_getT(nitems = nitems, order. = nitems, rbind. = TRUE) if (lav_verbose()) { out <- cbind(as.numeric(T.r %*% prop), pidot) colnames(out) <- c("T * prop", "pidot") print(out) } all.equal(pidot, as.numeric(T.r %*% prop)) } # L_r test of Maydeu-Olivares & Joe (2005) eq (4) lav_tables_mvb_Lr <- function(nitems = 0L, obs.prop = NULL, est.prop = NULL, nobs = 0L, order. = 2L) { # recreate tables obs.PROP <- array(obs.prop, dim = rep(2L, nitems)) est.PROP <- array(est.prop, dim = rep(2L, nitems)) # compute {obs,est}.prop.dot obs.prop.dot <- lav_tables_mvb_getPiDot(obs.PROP, order. = order.) est.prop.dot <- lav_tables_mvb_getPiDot(est.PROP, order. = order.) # compute T.r T.r <- lav_tables_mvb_getT(nitems = nitems, order. = order., rbind. = TRUE) # compute GAMMA based on est.prop GAMMA <- diag(est.prop) - tcrossprod(est.prop) # compute XI XI <- T.r %*% GAMMA %*% t(T.r) # compute Lr diff.dot <- obs.prop.dot - est.prop.dot Lr <- as.numeric(nobs * t(diff.dot) %*% solve(XI) %*% diff.dot) df <- 2^nitems - 1L p.value <- 1 - pchisq(Lr, df = df) # return list list(Lr = Lr, df = df, p.value = p.value) } lavaan/R/ctr_pml_utils.R0000644000176200001440000003651014627656441014744 0ustar liggesusers# contributed by Myrsini Katsikatsou (March 2016) # the function pc_lik_PL_with_cov gives the value of the bivariate likelihood # for a specific pair of ordinal variables casewise when covariates are present and estimator=="PML" # (the bivariate likelihood is essentially the bivariate probability of the # observed response pattern of two ordinal variables) # Input arguments: # Y1 is a vector, includes the observed values for the first variable for all cases/units, # Y1 is ordinal # Y2 similar to Y1 # Rho is the polychoric correlation of Y1 and Y2 # th.y1 is the vector of the thresholds for Y1* excluding the first and # the last thresholds which are -Inf and Inf # th.y2 is similar to th.y1 # eXo is the data for the covariates in a matrix format where nrows= no of cases, # ncols= no of covariates # PI.y1 is a vector, includes the regression coefficients of the covariates # for the first variable, Y1, the length of the vector is the no of covariates; # to obtain this vector apply the function lavaan:::computePI()[row_correspondin_to_Y1, ] # PI.y2 is similar to PI.y2 # missing.ind is of "character" value, taking the values listwise, pairwise, available_cases; # to obtain a value use lavdata@missing # Output: # It is a vector, length= no of cases, giving the bivariate likelihood for each case. pc_lik_PL_with_cov <- function(Y1, Y2, Rho, th.y1, th.y2, eXo, PI.y1, PI.y2, missing.ind) { th.y1 <- c(-100, th.y1, 100) th.y2 <- c(-100, th.y2, 100) pred.y1 <- c(eXo %*% PI.y1) pred.y2 <- c(eXo %*% PI.y2) th.y1.upper <- th.y1[Y1 + 1L] - pred.y1 th.y1.lower <- th.y1[Y1] - pred.y1 th.y2.upper <- th.y2[Y2 + 1L] - pred.y2 th.y2.lower <- th.y2[Y2] - pred.y2 if (missing.ind == "listwise") { # I guess this is the default which # also handles the case of complete data biv_prob <- pbivnorm(th.y1.upper, th.y2.upper, rho = Rho) - pbivnorm(th.y1.lower, th.y2.upper, rho = Rho) - pbivnorm(th.y1.upper, th.y2.lower, rho = Rho) + pbivnorm(th.y1.lower, th.y2.lower, rho = Rho) lik <- biv_prob } else if (missing.ind %in% c( "pairwise", "available.cases", "available_cases" )) { # index of cases with complete pairs CP.idx <- which(complete.cases(cbind(Y1, Y2))) th.y1.upper <- th.y1.upper[CP.idx] th.y1.lower <- th.y1.lower[CP.idx] th.y2.upper <- th.y2.upper[CP.idx] th.y2.lower <- th.y2.lower[CP.idx] biv_prob <- pbivnorm(th.y1.upper, th.y2.upper, rho = Rho) - pbivnorm(th.y1.lower, th.y2.upper, rho = Rho) - pbivnorm(th.y1.upper, th.y2.lower, rho = Rho) + pbivnorm(th.y1.lower, th.y2.lower, rho = Rho) # lik <- numeric( length(Y1) ) lik <- rep(as.numeric(NA), length(Y1)) lik[CP.idx] <- biv_prob } lik } ################################################################# # The function uni_lik gives the value of the univariate likelihood for a # specific ordinal variable, casewise (which is essentially the probability for # the observed response category for each case). # The input arguments are explained before the function pc_lik_PL_with_cov above. # Output: # It is a vector, length= no of cases, giving the univariate likelihoods for each case. uni_lik <- function(Y1, th.y1, eXo = NULL, PI.y1 = NULL) { th.y1 <- c(-100, th.y1, 100) if (!is.null(eXo)) { pred.y1 <- c(eXo %*% PI.y1) } if (is.null(eXo)) { th.y1.upper <- th.y1[Y1 + 1L] th.y1.lower <- th.y1[Y1] } else { th.y1.upper <- th.y1[Y1 + 1L] - pred.y1 th.y1.lower <- th.y1[Y1] - pred.y1 } uni_lik <- pnorm(th.y1.upper) - pnorm(th.y1.lower) uni_lik[is.na(uni_lik)] <- 0 } ################################################################# # The function lav_tables_univariate_freq_cell computes the univariate (one-way) frequency tables. # The function closely folows the "logic" of the lavaan function # lav_tables_pairwise_freq_cell. # The output is either a list or a data.frame depending on the value the logical # input argument as.data.frame. Either way, the same information is contained which is: # a) the observed (univariate) frequencies f_ia, i=1,...,p (variables), # a=1,...,ci (response categories), with a index running faster than i index. # b) an index vector with the name varb which indicates which variable each frequency refers to. # c) an index vector with the name group which indicates which group each frequency # refers to when multi-group analysis. # d) an index vector with the name level which indicates which level within # each ordinal variable each frequency refers to. # e) a vector nobs which gives how many cases where considered to compute the # corresponding frequency. Since we use the available data for each variable # when missing=="available_cases" we expect these numbers to differ when # missing values are present. # f) an index vector with the name id indexing each univariate table, # 1 goes to first variable in the first group, 2 to 2nd variable in the second # group and so on. The last table has the index equal to (no of groups)*(no of variables). lav_tables_univariate_freq_cell <- function(lavdata = NULL, as.data.frame. = TRUE) { # shortcuts vartable <- as.data.frame(lavdata@ov, stringsAsFactors = FALSE) X <- lavdata@X ov.names <- lavdata@ov.names ngroups <- lavdata@ngroups # identify 'categorical' variables cat.idx <- which(vartable$type %in% c("ordered", "factor")) # do we have any categorical variables? if (length(cat.idx) == 0L) { lav_msg_stop(gettext("no categorical variables are found")) } # univariate tables univariate.tables <- vartable$name[cat.idx] univariate.tables <- rbind(univariate.tables, seq_len(length(univariate.tables)), deparse.level = 0 ) ntables <- ncol(univariate.tables) # for each group, for each pairwise table, collect information UNI_TABLES <- vector("list", length = ngroups) for (g in 1:ngroups) { UNI_TABLES[[g]] <- apply(univariate.tables, MARGIN = 2, FUN = function(x) { idx1 <- which(vartable$name == x[1]) id <- (g - 1) * ntables + as.numeric(x[2]) ncell <- vartable$nlev[idx1] # compute one-way observed frequencies Y1 <- X[[g]][, idx1] UNI_FREQ <- tabulate(Y1, nbins = max(Y1, na.rm = TRUE)) list( id = rep.int(id, ncell), varb = rep.int(x[1], ncell), group = rep.int(g, ncell), nobs = rep.int(sum(UNI_FREQ), ncell), level = seq_len(ncell), obs.freq = UNI_FREQ ) } ) } if (as.data.frame.) { for (g in 1:ngroups) { UNI_TABLE <- UNI_TABLES[[g]] UNI_TABLE <- lapply(UNI_TABLE, as.data.frame, stringsAsFactors = FALSE ) if (g == 1) { out <- do.call(rbind, UNI_TABLE) } else { out <- rbind(out, do.call(rbind, UNI_TABLE)) } } if (g == 1) { # remove group column out$group <- NULL } } else { if (ngroups == 1L) { out <- UNI_TABLES[[1]] } else { out <- UNI_TABLES } } out } ################################################################# # The function univariateExpProbVec gives the model-based univariate probabilities # for all ordinal indicators and for all of their response categories, i.e. pi(xi=a), where # a=1,...,ci and i=1,...,p with a index running faster than i index. # Input arguments: # TH is a vector giving the thresholds for all variables, tau_ia, with a running # faster than i (the first and the last thresholds which are -Inf and Inf are # not included). TH can be given by the lavaan function computeTH . # th.idx is a vector of same length as TH which gives the value of the i index, # namely which variable each thresholds refers to. This can be obtained by # lavmodel@th.idx . # Output: # It is a vector, lenght= Sum_i(ci), i.e. the sum of the response categories of # all ordinal variables. The vector contains the model-based univariate probabilities pi(xi=a). univariateExpProbVec <- function(TH = TH, th.idx = th.idx) { TH.split <- split(TH, th.idx) TH.lower <- unlist(lapply(TH.split, function(x) { c(-100, x) }), use.names = FALSE) TH.upper <- unlist(lapply(TH.split, function(x) { c(x, 100) }), use.names = FALSE) prob <- pnorm(TH.upper) - pnorm(TH.lower) # to avoid Nan/-Inf prob[prob < .Machine$double.eps] <- .Machine$double.eps prob } ############################################################################# # The function pc_cor_scores_PL_with_cov computes the derivatives of a bivariate # log-likelihood of two ordinal variables casewise with respect to thresholds, # slopes (reduced-form regression coefficients for the covariates), and polychoric correlation. # The function dbinorm of lavaan is used. # The function gives the right result for both listwise and pairwise deletion, # and the case of complete data. # Input arguments are explained before the function pc_lik_PL_with_cov defined above. # The only difference is that PI.y1 and PI.y2 are (accidentally) renamed here as sl.y1 and sl.y2 # Output: # It is a list containing the following # a) the derivatives w.r.t. the thresholds of the first variable casewise. # This is a matrix, nrows=no of cases, ncols= no of thresholds of variable 1. # b) the derivatives w.r.t. the thresholds of the second variable casewise. # This is a matrix, nrows=no of cases, ncols= no of thresholds of variable 2. # c) the derivatives w.r.t slopes for variable 1. This is a matrix, where # nrows=no of cases, ncols= no of covariates. # d) the derivatives w.r.t slopes for variable 2. This is a matrix, where # nrows=no of cases, ncols= no of covariates. # e) the derivative w.r.t the polychoric correlation of the two variables. # This is a vector of length= no of cases. pc_cor_scores_PL_with_cov <- function(Y1, Y2, eXo, Rho, th.y1, th.y2, sl.y1, sl.y2, missing.ind) { nth.y1 <- length(th.y1) nth.y2 <- length(th.y2) start.th.y1 <- th.y1 start.th.y2 <- th.y2 Nobs <- length(Y1) R <- sqrt(1 - Rho * Rho) th.y1 <- c(-100, th.y1, 100) th.y2 <- c(-100, th.y2, 100) pred.y1 <- c(eXo %*% sl.y1) pred.y2 <- c(eXo %*% sl.y2) th.y1.z1 <- th.y1[Y1 + 1L] - pred.y1 th.y1.z2 <- th.y1[Y1] - pred.y1 th.y2.z1 <- th.y2[Y2 + 1L] - pred.y2 th.y2.z2 <- th.y2[Y2] - pred.y2 # lik, i.e. the bivariate probability case-wise lik <- pc_lik_PL_with_cov( Y1 = Y1, Y2 = Y2, Rho = Rho, th.y1 = start.th.y1, th.y2 = start.th.y2, eXo = eXo, PI.y1 = sl.y1, PI.y2 = sl.y2, missing.ind = missing.ind ) # w.r.t. th.y1, mean tau tilde # derivarive bivariate prob w.r.t. tau^xi_ci, see formula in paper 2012 y1.Z1 <- dnorm(th.y1.z1) * (pnorm((th.y2.z1 - Rho * th.y1.z1) / R) - pnorm((th.y2.z2 - Rho * th.y1.z1) / R)) # derivarive bivariate prob w.r.t. tau^xi_(ci-1), y1.Z2 <- (-1) * (dnorm(th.y1.z2) * (pnorm((th.y2.z1 - Rho * th.y1.z2) / R) - pnorm((th.y2.z2 - Rho * th.y1.z2) / R))) # allocate the derivatives at the right column casewise idx.y1.z1 <- matrix(1:nth.y1, nrow = Nobs, ncol = nth.y1, byrow = TRUE) == Y1 idx.y1.z2 <- matrix(1:nth.y1, nrow = Nobs, ncol = nth.y1, byrow = TRUE) == (Y1 - 1L) der.table.y1 <- idx.y1.z1 * y1.Z1 + idx.y1.z2 * y1.Z2 # der of pl w.r.t. th.y1 dx.th.tilde.y1 <- der.table.y1 / lik dx.th.tilde.y1[is.na(dx.th.tilde.y1)] <- 0 # w.r.t. th.y2, mean tau tilde # derivarive bivariate prob w.r.t. tau^xi_ci, see formula in paper 2012 y2.Z1 <- dnorm(th.y2.z1) * (pnorm((th.y1.z1 - Rho * th.y2.z1) / R) - pnorm((th.y1.z2 - Rho * th.y2.z1) / R)) # derivarive bivariate prob w.r.t. tau^xi_(ci-1), y2.Z2 <- (-1) * (dnorm(th.y2.z2) * (pnorm((th.y1.z1 - Rho * th.y2.z2) / R) - pnorm((th.y1.z2 - Rho * th.y2.z2) / R))) # allocate the derivatives at the right column casewise idx.y2.z1 <- matrix(1:nth.y2, nrow = Nobs, ncol = nth.y2, byrow = TRUE) == Y2 idx.y2.z2 <- matrix(1:nth.y2, nrow = Nobs, ncol = nth.y2, byrow = TRUE) == (Y2 - 1L) der.table.y2 <- idx.y2.z1 * y2.Z1 + idx.y2.z2 * y2.Z2 # der of pl w.r.t. th.y2 dx.th.tilde.y2 <- der.table.y2 / lik dx.th.tilde.y2[is.na(dx.th.tilde.y2)] <- 0 # w.r.t. rho # derivarive bivariate prob w.r.t. rho, see formula in paper 2012 dbivprob.wrt.rho <- (dbinorm(th.y1.z1, th.y2.z1, Rho) - dbinorm(th.y1.z2, th.y2.z1, Rho) - dbinorm(th.y1.z1, th.y2.z2, Rho) + dbinorm(th.y1.z2, th.y2.z2, Rho)) # der of pl w.r.t. rho dx.rho <- dbivprob.wrt.rho / lik dx.rho[is.na(dx.rho)] <- 0 # der of pl w.r.t. slopes (also referred to PI obtained by computePI function) row.sums.y1 <- rowSums(dx.th.tilde.y1) row.sums.y2 <- rowSums(dx.th.tilde.y2) dx.sl.y1 <- (-1) * eXo * row.sums.y1 dx.sl.y2 <- (-1) * eXo * row.sums.y2 list( dx.th.y1 = dx.th.tilde.y1, # note that dx.th.tilde=dx.th dx.th.y2 = dx.th.tilde.y2, dx.sl.y1 = dx.sl.y1, dx.sl.y2 = dx.sl.y2, dx.rho = dx.rho ) } ############################################################### # The function uni_scores gives, casewise, the derivative of a univariate # log-likelihood w.r.t. thresholds and slopes if present weighted by the # casewise uni-weights as those defined in AC-PL (essentially the number of missing values per case). # The function closely follows the "logic" of the function pc_cor_scores_PL_with_cov defined above. # Input arguments are as before plus: weights.casewise given by # lavcavhe$uniweights.casewise . # Output: # A list including the following: # a) the derivatives w.r.t. the thresholds of the variable. This is a matrix, # nrows=no of cases, ncols= no of thresholds of variable 1. # b) the derivatives w.r.t slopes for the variable. If covariates are present, # this is a matrix, nrows=no of cases, ncols= no of covariates. # Otherwise it takes the value NULL. uni_scores <- function(Y1, th.y1, eXo = NULL, sl.y1 = NULL, weights.casewise) { nth.y1 <- length(th.y1) start.th.y1 <- th.y1 Nobs <- length(Y1) th.y1 <- c(-100, th.y1, 100) if (is.null(eXo)) { th.y1.z1 <- th.y1[Y1 + 1L] th.y1.z2 <- th.y1[Y1] } else { pred.y1 <- c(eXo %*% sl.y1) th.y1.z1 <- th.y1[Y1 + 1L] - pred.y1 th.y1.z2 <- th.y1[Y1] - pred.y1 } # lik, i.e. the univariate probability case-wise lik <- uni_lik( # Y1 = X[,i], Y1 = Y1, # th.y1 = TH[th.idx==i], th.y1 = th.y1, eXo = eXo, # PI.y1 = PI[i,]) PI.y1 = sl.y1 ) # w.r.t. th.y1 # derivarive of the univariate prob w.r.t. to the upper limit threshold y1.Z1 <- dnorm(th.y1.z1) # derivarive of the univariate prob w.r.t. to the lower limit threshold y1.Z2 <- (-1) * dnorm(th.y1.z2) # allocate the derivatives at the right column casewise idx.y1.z1 <- matrix(1:nth.y1, nrow = Nobs, ncol = nth.y1, byrow = TRUE) == Y1 idx.y1.z2 <- matrix(1:nth.y1, nrow = Nobs, ncol = nth.y1, byrow = TRUE) == (Y1 - 1L) der.table.y1 <- idx.y1.z1 * y1.Z1 + idx.y1.z2 * y1.Z2 # der of pl w.r.t. th.y1 dx.th.tilde.y1 <- der.table.y1 * (weights.casewise / lik) dx.th.tilde.y1[is.na(dx.th.tilde.y1)] <- 0 # der of pl w.r.t. slopes (also referred to PI obtained by computePI function) dx.sl.y1 <- NULL if (!is.null(eXo)) { row.sums.y1 <- rowSums(dx.th.tilde.y1) dx.sl.y1 <- (-1) * eXo * row.sums.y1 } list( dx.th.y1 = dx.th.tilde.y1, # note that dx.th.tilde=dx.th dx.sl.y1 = dx.sl.y1 ) } lavaan/R/lav_lavaan_step10_cache.R0000644000176200001440000003507614627656441016515 0ustar liggesuserslav_lavaan_step10_cache <- function(slotCache = NULL, # nolint lavdata = NULL, lavmodel = NULL, lavpartable = NULL, lavoptions = NULL, sampling.weights = NULL) { # # # # # # # # # # # # # 10. lavcache # # # # # # # # # # # # # # if slotCache not NULL # copy to lavcache # else # lavcache = list of length lavdata@ngroups # set tmp.ov.types = lavdata$ov$types # if lavmodel@conditional.x and sum(lavmodel@nexo) > 0L remove elements # lavpta$vidx$ov.x from tmp.ov.types # if lavoptions$estimator == "PML" and all tmp.ov.types are "ordered" # th = computeTH(lavmodel) # bi = lav_tables_pairwise_freq_cells(lavdata) # if lavoptions$missing is "available.cases" or "doubly.robust" # uni = lav_tables_univariate_freq_cell(lavdata) # if lavoptions$missing is "doubly.robust" # if lavoptions$control$pairwiseProbGivObs NULL: *** error *** # if lavoptions$control$univariateProbGivObs NULL: *** error *** # for all groups (1:lavdata@ngroups) # set tmp.idx = 1:length(bi$ibs.freq) # if bi$group not NULL and max(bi$group) > 1L set tmp.idx = indexes # for this group in bi # set bifreq = bi$obs.freq[tmp.idx] # set binobs = bi$nobs[tmp.idx] # set long = LongVecInd(no.x = ncol(lavdata@X[[g]]), # all.thres = th[[g]], # index.var.of.thres = lavmodel@th.idx[[g]]) # set lavcache[[g]] = list(bifreq = bifreq, nobs = binobs, long = long) # if sampling.weights not NULL # compute (for group g) lavcache[[g]]$sum_obs_weights_xixj_ab_vec (*) # if lavoptions$missing is "available.cases" or "doubly.robust" # set tmp.idx = 1:length(bi$ibs.freq) # if bi$group not NULL and max(bi$group) > 1L set tmp.idx = indexes # for this group in bi # set lavcache[[g]]$unifreq = unifreq = uni$obs.freq[tmp.idx] # set lavcache[[g]]$uninobs = uninobs = uni$nobs[tmp.idx] # set lavcache[[g]]$uniweights.casewise = uniweights.casewise = # rowSums(is.na(lavdata@X[[g]])) # compute lavcache[[g]]$uniweights (*) # if lavoptions$missing is "doubly.robust" # lavcache[[g]]$pairwiseProbGivObs = # lavoptions$control$pairwiseProbGivObs[[g]] # lavcache[[g]]$univariateProbGivObs = # lavoptions$control$univariateProbGivObs[[g]] # compute members idx.y1, idx.gy2, idx.cat.y1, idx.cat.gy2 and # id.uniPrGivObs from # lavchache[[g]] (*) # if lavdata$data.type is "full" and lavdata@Rp[[1L]] not NULL # copy lavdata@Rp[[g]]$pat to lavcache[[g]]$pat for all groups g # if lavoptions$estimator is "MML" # compute for all groups g lavcache[[g]]$GH via # lav_integration_gauss_hermite # # (*) !!! computations too complicated to summarize here !!! if (!is.null(slotCache)) { lavcache <- slotCache } else { # prepare cache -- stuff needed for estimation, but also post-estimation lavcache <- vector("list", length = lavdata@ngroups) # ov.types? (for PML check) tmp.ov.types <- lavdata@ov$type if (lavmodel@conditional.x && sum(lavmodel@nexo) > 0L) { # remove ov.x tmp.ov.x.idx <- unlist(attr(lavpartable, "vidx")$ov.x) tmp.ov.types <- tmp.ov.types[-tmp.ov.x.idx] } if (lavoptions$estimator == "PML" && all(tmp.ov.types == "ordered")) { th <- computeTH(lavmodel) bi <- lav_tables_pairwise_freq_cell(lavdata) # handle option missing = "available.cases" or "doubly.robust" if (lavoptions$missing == "available.cases" || lavoptions$missing == "doubly.robust") { uni <- lav_tables_univariate_freq_cell(lavdata) # checks for missing = "double.robust" if (lavoptions$missing == "doubly.robust") { # check whether the probabilities pairwiseProbGivObs and # univariateProbGivObs are given by the user if (is.null(lavoptions$control$pairwiseProbGivObs)) { lav_msg_stop(gettext( "could not find `pairwiseProbGivObs' in control() list")) } if (is.null(lavoptions$control$univariateProbGivObs)) { lav_msg_stop(gettext( "could not find `univariateProbGivObs' in control() list")) } } } for (g in 1:lavdata@ngroups) { if (is.null(bi$group) || max(bi$group) == 1L) { bifreq <- bi$obs.freq binobs <- bi$nobs } else { idx <- which(bi$group == g) bifreq <- bi$obs.freq[idx] binobs <- bi$nobs[idx] } long <- LongVecInd( no.x = ncol(lavdata@X[[g]]), all.thres = th[[g]], index.var.of.thres = lavmodel@th.idx[[g]] ) lavcache[[g]] <- list( bifreq = bifreq, nobs = binobs, long = long ) # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # I need to add something that splits weights into g groups so # adjust what follows in the new code also compute the sum of # weights within a group, this will substitute n_g (group size) # of simple random sampling (SRS) and also compute the total the # total sum of weights over all observation over all groups, # this substitutes the total sample size of SRS. if (!is.null(sampling.weights)) { # Keep track of indices of the response categories (a,b) of a # pair of ordinal variables (xi,xj) appearing in the data as # well as the index of the pair. idx_ab_of_xixj_ab <- lapply(long[c(1:2, 5)], function(x) { x[(long$index.thres.var1.of.pair != 0) & (long$index.thres.var2.of.pair != 0)] }) names(idx_ab_of_xixj_ab) <- c("idx_a", "idx_b", "idx_pairs") lavcache[[g]]$idx_ab_of_xixj_ab <- idx_ab_of_xixj_ab # Raw data for group g X.g <- lavdata@X[[g]] # nolint # I assume that X.g includes only the ordinal indicators nvar # gives the number of ordinal indicators nvar <- ncol(X.g) # pstar gives the number of pairs formed by the nvar ordinal # indicators pstar <- nvar * (nvar - 1) / 2 # Keep track of the indices of variables forming each pair idx_vars_in_pair <- combn(nvar, 2) # The output of sapply below provides the sum of weights for # all bivariate response pattern for all pairs of indicators. # If all indicators have the same number of response # categories, the output of sapply function below is a matrix. # Each column refers to a different pair of indicators (i,j) # with j running faster than i, e.g. (1,2) (1,3) (2,3). Within # each column, each element (i.e. each row of the matrix) # refers to a different combination of response categories # (a,b) with a, the category index of indicator i, running # faster than b, the category index of indicator j, e.g. # (1,1), (2,1) (3,1) (1,2) (2,2) (3,2) # If the indicators have different number of response # categories, the output of sapply function below is a list. # Each element of the list refers to a different pair of # indicators (i,j) with j running faster than i and it is a # matrix with number of rows the number of response categories # of indicator i and ncol = the number of response categories # of indicator j. sum_obs_weights_xixj_ab <- sapply(1:pstar, function(x) { tmp_idx_ab <- lapply(idx_ab_of_xixj_ab, function(y) { y[idx_ab_of_xixj_ab$idx_pairs == x] }) tmp_idx_cols <- idx_vars_in_pair[, x] tmp_var1 <- factor(X.g[, tmp_idx_cols[1]], levels = as.character(unique(tmp_idx_ab$idx_a)) ) tmp_var2 <- factor(X.g[, tmp_idx_cols[2]], levels = as.character(unique(tmp_idx_ab$idx_b)) ) tapply( X = lavdata@weights[[g]], INDEX = list(tmp_var1, tmp_var2), FUN = sum ) }) # We need to transform the output of sapply into a vector # where the sum of weights (for all bivariate response # patterns for all pairs of indicators) are listed in the same # order as in pairwisePI vector, i.e. a runs the fastest, # followed by b, then by j and lastly by i. if (is.matrix(sum_obs_weights_xixj_ab)) { sum_obs_weights_xixj_ab_vec <- c(sum_obs_weights_xixj_ab) } else if (is.list(sum_obs_weights_xixj_ab)) { sum_obs_weights_xixj_ab_vec <- do.call(c, sum_obs_weights_xixj_ab) } # Note that sapply gives NA for these bivariate response # patterns which are not observed at all. Substitute NA with # 0. idx_na_sowxav <- is.na(sum_obs_weights_xixj_ab_vec) if (any(idx_na_sowxav)) { sum_obs_weights_xixj_ab_vec[idx_na_sowxav] <- 0 } lavcache[[g]]$sum_obs_weights_xixj_ab_vec <- sum_obs_weights_xixj_ab_vec } # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # available cases if (lavoptions$missing == "available.cases" || lavoptions$missing == "doubly.robust") { if (is.null(uni$group) || max(uni$group) == 1L) { unifreq <- uni$obs.freq uninobs <- uni$nobs } else { idx <- which(uni$group == g) unifreq <- uni$obs.freq[idx] uninobs <- uni$nobs[idx] } lavcache[[g]]$unifreq <- unifreq lavcache[[g]]$uninobs <- uninobs uniweights.casewise <- rowSums(is.na(lavdata@X[[g]])) lavcache[[g]]$uniweights.casewise <- uniweights.casewise # weights per response category per variable in the same # order as unifreq; i.e. w_ia, i = 1,...,p, (p variables), # a = 1,...,Ci, (Ci response categories for variable i), # a running faster than i tmp.uniweights <- apply( lavdata@X[[g]], 2, function(x) { tapply(uniweights.casewise, as.factor(x), sum, na.rm = TRUE ) } ) if (is.matrix(tmp.uniweights)) { lavcache[[g]]$uniweights <- c(tmp.uniweights) } if (is.list(tmp.uniweights)) { lavcache[[g]]$uniweights <- unlist(tmp.uniweights) } } # "available.cases" or "double.robust" # doubly.robust only if (lavoptions$missing == "doubly.robust") { # add the provided by the user probabilities # pairwiseProbGivObs and univariateProbGivObs in Cache lavcache[[g]]$pairwiseProbGivObs <- lavoptions$control$pairwiseProbGivObs[[g]] lavcache[[g]]$univariateProbGivObs <- lavoptions$control$univariateProbGivObs[[g]] # compute different indices vectors that will help to do # calculations ind.vec <- as.data.frame(long[1:5]) ind.vec <- ind.vec[((ind.vec$index.thres.var1.of.pair != 0) & (ind.vec$index.thres.var2.of.pair != 0)), ] idx.cat.y1 <- ind.vec$index.thres.var1.of.pair idx.cat.y2 <- ind.vec$index.thres.var2.of.pair idx.pairs <- ind.vec$index.pairs.extended lavcache[[g]]$idx.pairs <- idx.pairs idx.cat.y1.split <- split(idx.cat.y1, idx.pairs) idx.cat.y2.split <- split(idx.cat.y2, idx.pairs) lavcache[[g]]$idx.cat.y1.split <- idx.cat.y1.split lavcache[[g]]$idx.cat.y2.split <- idx.cat.y2.split # generate the variables, categories indices vector which # keep track to which variables and categories the # elements of vector probY1Gy2 refer to nlev <- lavdata@ov$nlev nvar <- length(nlev) idx.var.matrix <- matrix(1:nvar, nrow = nvar, ncol = nvar) idx.diag <- diag(matrix(1:(nvar * nvar), nrow = nvar, ncol = nvar )) idx.y1gy2.matrix <- rbind( t(idx.var.matrix)[-idx.diag], idx.var.matrix[-idx.diag] ) no.pairs.y1gy2 <- ncol(idx.y1gy2.matrix) idx.cat.y1 <- unlist(lapply(1:no.pairs.y1gy2, function(x) { rep(1:nlev[idx.y1gy2.matrix[1, x]], times = nlev[idx.y1gy2.matrix[2, x]] ) })) idx.cat.gy2 <- unlist(lapply(1:no.pairs.y1gy2, function(x) { rep(1:nlev[idx.y1gy2.matrix[2, x]], each = nlev[idx.y1gy2.matrix[1, x]] ) })) dim.pairs <- unlist(lapply(1:no.pairs.y1gy2, function(x) { nlev[idx.y1gy2.matrix[1, x]] * nlev[idx.y1gy2.matrix[2, x]] })) idx.y1 <- unlist(mapply(rep, idx.y1gy2.matrix[1, ], each = dim.pairs )) idx.gy2 <- unlist(mapply(rep, idx.y1gy2.matrix[2, ], each = dim.pairs )) lavcache[[g]]$idx.y1 <- idx.y1 lavcache[[g]]$idx.gy2 <- idx.gy2 lavcache[[g]]$idx.cat.y1 <- idx.cat.y1 lavcache[[g]]$idx.cat.gy2 <- idx.cat.gy2 # the vector below keeps track of the variable each column # of the matrix univariateProbGivObs refers to lavcache[[g]]$id.uniPrGivObs <- sort(c( unique(lavmodel@th.idx[[g]]), lavmodel@th.idx[[g]] )) } # doubly.robust } # g } # copy response patterns to cache -- FIXME!! (data not included # in Model only functions) if (lavdata@data.type == "full" && !is.null(lavdata@Rp[[1L]])) { for (g in 1:lavdata@ngroups) { lavcache[[g]]$pat <- lavdata@Rp[[g]]$pat } } } # If estimator = MML, store Gauss-Hermite nodes/weights if (lavoptions$estimator == "MML") { for (g in 1:lavdata@ngroups) { # count only the ones with non-normal indicators # nfac <- lavpta$nfac.nonnormal[[g]] nfac <- attr(lavpartable, "nfac")[[g]] lavcache[[g]]$GH <- lav_integration_gauss_hermite( n = lavoptions$integration.ngh, dnorm = TRUE, mean = 0, sd = 1, ndim = nfac ) # lavcache[[g]]$DD <- lav_model_gradient_DD(lavmodel, group = g) } } lavcache } lavaan/R/lav_data_print.R0000644000176200001440000001653014627656441015053 0ustar liggesusers# print object from lavData class # setMethod( "show", "lavData", function(object) { # print 'lavData' object res <- lav_data_summary_short(object) lav_data_print_short(res, nd = 3L) } ) # create summary information for @lavdata slot lav_data_summary_short <- function(object) { # which object? if (inherits(object, "lavaan")) { lavdata <- object@Data } else if (inherits(object, "lavData")) { lavdata <- object } else { lav_msg_stop(gettext("object must be lavaan or lavData object")) } # two or three columns (depends on nobs/norig) threecolumn <- FALSE for (g in 1:lavdata@ngroups) { if (lavdata@nobs[[g]] != lavdata@norig[[g]]) { threecolumn <- TRUE break } } # clustered data? clustered <- FALSE if (.hasSlot(lavdata, "cluster") && # in case we have an old obj length(lavdata@cluster) > 0L) { clustered <- TRUE } # multilevel data? multilevel <- FALSE if (.hasSlot(lavdata, "nlevels") && # in case we have an old obj lavdata@nlevels > 1L) { multilevel <- TRUE } # extract summary information datasummary <- list( ngroups = lavdata@ngroups, nobs = unlist(lavdata@nobs) ) # norig? if (threecolumn) { datasummary$norig <- unlist(lavdata@norig) } # multiple groups? if (lavdata@ngroups > 1L) { datasummary$group.label <- lavdata@group.label } # sampling weights? if ((.hasSlot(lavdata, "weights")) && # in case we have an old object (!is.null(lavdata@weights[[1L]]))) { datasummary$sampling.weights <- lavdata@sampling.weights } # clustered/multilevel data? if (clustered) { if (multilevel) { datasummary$nlevels <- lavdata@nlevels } datasummary$cluster <- lavdata@cluster if (lavdata@ngroups == 1L) { datasummary$nclusters <- unlist(lavdata@Lp[[1]]$nclusters) } else { tmp <- vector("list", length = lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { tmp[[g]] <- unlist(lavdata@Lp[[g]]$nclusters) } datasummary$nclusters <- tmp } } # missing data? if (!is.null(lavdata@Mp[[1L]])) { datasummary$npatterns <- sapply(lavdata@Mp, "[[", "npatterns") if (multilevel && !is.null(lavdata@Mp[[1L]]$Zp)) { datasummary$npatterns2 <- sapply(lapply( lavdata@Mp, "[[", "Zp" ), "[[", "npatterns") } } datasummary } lav_data_print_short <- function(object, nd = 3L) { # object should data summary if (inherits(object, "lavaan")) { object <- lav_data_summary_short(object) } datasummary <- object num.format <- paste("%", max(8L, nd + 5L), ".", nd, "f", sep = "") # threecolumn threecolumn <- !is.null(datasummary$norig) # multilevel? multilevel <- !is.null(datasummary$nlevels) # clustered? clustered <- !is.null(datasummary$cluster) && is.null(datasummary$nlevels) # header? no, for historical reasons only # cat("Data information:\n\n") c1 <- c2 <- c3 <- character(0L) # number of observations if (datasummary$ngroups == 1L) { if (threecolumn) { c1 <- c(c1, "") c2 <- c(c2, "Used") c3 <- c(c3, "Total") } c1 <- c(c1, "Number of observations") c2 <- c(c2, datasummary$nobs) c3 <- c(c3, ifelse(threecolumn, datasummary$norig, "")) } else { c1 <- c(c1, "Number of observations per group:") if (threecolumn) { c2 <- c(c2, "Used") c3 <- c(c3, "Total") } else { c2 <- c(c2, "") c3 <- c(c3, "") } for (g in 1:datasummary$ngroups) { c1 <- c(c1, sprintf(" %-40s", datasummary$group.label[g])) c2 <- c(c2, datasummary$nobs[g]) c3 <- c(c3, ifelse(threecolumn, datasummary$norig[g], "")) } # g } # number of clusters if (datasummary$ngroups == 1L) { if (multilevel) { for (l in 2:datasummary$nlevels) { c1 <- c( c1, paste("Number of clusters [", datasummary$cluster[l - 1], "]", sep = "" ) ) c2 <- c(c2, datasummary$nclusters[l]) c3 <- c(c3, "") } } else if (clustered) { c1 <- c(c1, paste("Number of clusters [", datasummary$cluster, "]", sep = "" )) c2 <- c(c2, datasummary$nclusters[2]) c3 <- c(c3, "") } } else { if (multilevel) { for (l in 2:datasummary$nlevels) { c1 <- c( c1, paste("Number of clusters [", datasummary$cluster[l - 1], "]:", sep = "" ) ) c2 <- c(c2, "") c3 <- c(c3, "") for (g in 1:datasummary$ngroups) { c1 <- c(c1, sprintf(" %-40s", datasummary$group.label[g])) c2 <- c(c2, datasummary$nclusters[[g]][l]) c3 <- c(c3, "") } } } else if (clustered) { c1 <- c( c1, paste("Number of clusters [", datasummary$cluster, "]:", sep = "") ) c2 <- c(c2, "") c3 <- c(c3, "") for (g in 1:datasummary$ngroups) { c1 <- c(c1, sprintf(" %-40s", datasummary$group.label[g])) c2 <- c(c2, datasummary$nclusters[[g]][2]) c3 <- c(c3, "") } } } # missing patterns? if (!is.null(datasummary$npatterns)) { if (datasummary$ngroups == 1L) { if (multilevel) { c1 <- c(c1, "Number of missing patterns -- level 1") c2 <- c(c2, datasummary$npatterns) c3 <- c(c3, "") if (!is.null(datasummary$npatterns2)) { c1 <- c(c1, "Number of missing patterns -- level 2") c2 <- c(c2, datasummary$npatterns2) c3 <- c(c3, "") } } else { c1 <- c(c1, "Number of missing patterns") c2 <- c(c2, datasummary$npatterns) c3 <- c(c3, "") } } else { if (multilevel) { c1 <- c(c1, "Number of missing patterns per group:") c2 <- c(c2, "") c3 <- c(c3, "") for (g in 1:datasummary$ngroups) { c1 <- c( c1, paste(sprintf( " %-40s", datasummary$group.label[g] ), "-- level 1") ) c2 <- c(c2, datasummary$npatterns[g]) c3 <- c(c3, "") if (!is.null(datasummary$npatterns2)) { c1 <- c( c1, paste(sprintf( " %-40s", datasummary$group.label[g] ), "-- level 2") ) c2 <- c(c2, datasummary$npatterns2[g]) c3 <- c(c3, "") } } } else { c1 <- c(c1, "Number of missing patterns per group:") c2 <- c(c2, "") c3 <- c(c3, "") for (g in 1:datasummary$ngroups) { c1 <- c(c1, sprintf(" %-40s", datasummary$group.label[g])) c2 <- c(c2, datasummary$npatterns[g]) c3 <- c(c3, "") } } } } # sampling weights? if (!is.null(datasummary$sampling.weights)) { c1 <- c(c1, "Sampling weights variable") c2 <- c(c2, datasummary$sampling.weights) c3 <- c(c3, "") } # format c1/c2 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (threecolumn) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) invisible(M) } lavaan/R/lav_dataframe.R0000644000176200001440000000575614627656441014662 0ustar liggesusers# data.frame utilities # Y.R. 11 April 2013 # - 10 nov 2019: * removed lav_dataframe_check_vartype(), as we can simply use # sapply(lapply(frame, class), "[", 1L) (unused anyway) # * removed lav_dataframe_check_ordered() as we can simply use # any(sapply(frame[, ov.names], inherits, "ordered")) # construct vartable, but allow 'ordered/factor' argument to intervene # we do NOT change the data.frame lav_dataframe_vartable <- function(frame = NULL, ov.names = NULL, ov.names.x = NULL, ordered = NULL, factor = NULL, as.data.frame. = FALSE) { if (missing(ov.names)) { var.names <- names(frame) } else { ov.names <- unlist(ov.names, use.names = FALSE) ov.names.x <- unlist(ov.names.x, use.names = FALSE) var.names <- unique(c(ov.names, ov.names.x)) } nvar <- length(var.names) var.idx <- match(var.names, names(frame)) nobs <- integer(nvar) type <- character(nvar) user <- integer(nvar) exo <- ifelse(var.names %in% ov.names.x, 1L, 0L) mean <- numeric(nvar) var <- numeric(nvar) nlev <- integer(nvar) lnam <- character(nvar) for (i in seq_len(nvar)) { x <- frame[[var.idx[i]]] type.x <- class(x)[1L] # correct for matrix with 1 column if (inherits(x, "matrix") && (is.null(dim(x)) || (!is.null(dim) && ncol(x) == 1L))) { type.x <- "numeric" } # correct for integers if (inherits(x, "integer")) { type.x <- "numeric" } # handle the 'labelled' type from the haven package # - if the variable name is not in 'ordered', we assume # it is numeric (for now) 11 March 2018 if (inherits(x, "labelled") && !(var.names[i] %in% ordered)) { type.x <- "numeric" } # handle ordered/factor if (!is.null(ordered) && var.names[i] %in% ordered) { type.x <- "ordered" lev <- sort(unique(x)) # we assume integers! nlev[i] <- length(lev) lnam[i] <- paste(lev, collapse = "|") user[i] <- 1L } else if (!is.null(factor) && var.names[i] %in% factor) { type.x <- "factor" lev <- sort(unique(x)) # we assume integers! nlev[i] <- length(lev) lnam[i] <- paste(lev, collapse = "|") user[i] <- 1L } else { nlev[i] <- nlevels(x) lnam[i] <- paste(levels(x), collapse = "|") } type[i] <- type.x nobs[i] <- sum(!is.na(x)) mean[i] <- ifelse(type.x == "numeric", mean(x, na.rm = TRUE), as.numeric(NA) ) var[i] <- ifelse(type.x == "numeric", var(x, na.rm = TRUE), as.numeric(NA) ) } VAR <- list( name = var.names, idx = var.idx, nobs = nobs, type = type, exo = exo, user = user, mean = mean, var = var, nlev = nlev, lnam = lnam ) if (as.data.frame.) { VAR <- as.data.frame(VAR, stringsAsFactors = FALSE, row.names = 1:length(VAR$name) ) class(VAR) <- c("lavaan.data.frame", "data.frame") } VAR } lavaan/R/lav_test_score.R0000644000176200001440000002631014627656441015075 0ustar liggesusers# classic score test (= Lagrange Multiplier test) # # this function can run in two modes: # # MODE 1: 'add' # add new parameters that are currently not included in de model # (aka fixed to zero), but should be released # # MODE 2: 'release' (the default) # release existing "==" constraints # lavTestScore <- function(object, add = NULL, release = NULL, univariate = TRUE, cumulative = FALSE, epc = FALSE, standardized = epc, cov.std = epc, verbose = FALSE, warn = TRUE, information = "expected") { if (!missing(warn)) { current.warn <- lav_warn() if (lav_warn(warn)) on.exit(lav_warn(current.warn)) } if (!missing(verbose)) { current.verbose <- lav_verbose() if (lav_verbose(verbose)) on.exit(lav_verbose(current.verbose), TRUE) } # check object stopifnot(inherits(object, "lavaan")) lavoptions <- object@Options if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_stop(gettext("model did not converge")) } # check for inequality constraints PT <- object@ParTable if (any(PT$op == ">" | PT$op == "<")) { lav_msg_stop(gettext( "lavTestScore() does not handle inequality constraints (yet)")) } # check arguments if (cumulative) { univariate <- TRUE } # Mode 1: ADDING new parameters if (!is.null(add) && all(nchar(add) > 0L)) { # check release argument if (!is.null(release)) { lav_msg_stop(gettext( "`add' and `release' arguments cannot be used together.")) } # extend model with extra set of parameters FIT <- lav_object_extended(object, add = add) score <- lavTech(FIT, "gradient.logl") Information <- lavTech(FIT, paste("information", information, sep = ".")) npar <- object@Model@nx.free nadd <- FIT@Model@nx.free - npar # R R.model <- object@Model@con.jac[, , drop = FALSE] if (nrow(R.model) > 0L) { R.model <- cbind(R.model, matrix(0, nrow(R.model), ncol = nadd)) R.add <- cbind(matrix(0, nrow = nadd, ncol = npar), diag(nadd)) R <- rbind(R.model, R.add) Z <- cbind( rbind(Information, R.model), rbind(t(R.model), matrix(0, nrow(R.model), nrow(R.model))) ) Z.plus <- MASS::ginv(Z) J.inv <- Z.plus[1:nrow(Information), 1:nrow(Information)] r.idx <- seq_len(nadd) + nrow(R.model) } else { R <- cbind(matrix(0, nrow = nadd, ncol = npar), diag(nadd)) J.inv <- MASS::ginv(Information) r.idx <- seq_len(nadd) } # lhs/rhs lhs <- lav_partable_labels(FIT@ParTable)[FIT@ParTable$user == 10L] op <- rep("==", nadd) rhs <- rep("0", nadd) Table <- data.frame( lhs = lhs, op = op, rhs = rhs, stringsAsFactors = FALSE ) class(Table) <- c("lavaan.data.frame", "data.frame") } else { # MODE 2: releasing constraints R <- object@Model@con.jac[, , drop = FALSE] if (nrow(R) == 0L) { lav_msg_stop(gettext("no equality constraints found in model.")) } score <- lavTech(object, "gradient.logl") Information <- lavTech( object, paste("information", information, sep = ".") ) J.inv <- MASS::ginv(Information) # FIXME: move into if(is.null(release))? # else written over with Z1.plus if(is.numeric(release)) # R <- object@Model@con.jac[,] if (is.null(release)) { # ALL constraints r.idx <- seq_len(nrow(R)) } else if (is.numeric(release)) { r.idx <- release if (max(r.idx) > nrow(R)) { lav_msg_stop(gettextf( "maximum constraint number (%1$s) is larger than number of constraints (%2$s)", max(r.idx), nrow(R))) } # neutralize the non-needed constraints R1 <- R[-r.idx, , drop = FALSE] Z1 <- cbind( rbind(Information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) J.inv <- Z1.plus[1:nrow(Information), 1:nrow(Information)] } else if (is.character(release)) { lav_msg_stop(gettext("not implemented yet")) } # lhs/rhs eq.idx <- which(object@ParTable$op == "==") if (length(eq.idx) > 0L) { lhs <- object@ParTable$lhs[eq.idx][r.idx] op <- rep("==", length(r.idx)) rhs <- object@ParTable$rhs[eq.idx][r.idx] } Table <- data.frame( lhs = lhs, op = op, rhs = rhs, stringsAsFactors = FALSE ) class(Table) <- c("lavaan.data.frame", "data.frame") } if (object@Data@nlevels == 1L) { N <- object@SampleStats@ntotal if (lavoptions$mimic == "EQS") { N <- N - 1 } } else { # total number of clusters (over groups) N <- 0 for (g in 1:object@SampleStats@ngroups) { N <- N + object@Data@Lp[[g]]$nclusters[[2]] } # score <- score * (2 * object@SampleStats@ntotal) / N score <- score / 2 # -2 * LRT } if (lavoptions$se == "standard") { stat <- as.numeric(N * score %*% J.inv %*% score) } else { # generalized score test lav_msg_warn(gettext("se is not `standard'; not implemented yet; falling back to ordinary score test")) # NOTE!!! # we can NOT use VCOV here, because it reflects the constraints, # and the whole point is to test for these constraints... stat <- as.numeric(N * score %*% J.inv %*% score) } # compute df, taking into account that some of the constraints may # be needed to identify the model (and hence Information is singular) # Information.plus <- Information + crossprod(R) # df <- qr(R[r.idx,,drop = FALSE])$rank + # ( qr(Information)$rank - qr(Information.plus)$rank ) df <- nrow(R[r.idx, , drop = FALSE]) pvalue <- 1 - pchisq(stat, df = df) # total score test TEST <- data.frame( test = "score", X2 = stat, df = df, p.value = pvalue, stringsAsFactors = FALSE ) class(TEST) <- c("lavaan.data.frame", "data.frame") attr(TEST, "header") <- "total score test:" OUT <- list(test = TEST) if (univariate) { TS <- numeric(nrow(R)) EPC.uni <- numeric(nrow(R)) # ignored in release= mode for (r in r.idx) { R1 <- R[-r, , drop = FALSE] Z1 <- cbind( rbind(Information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[1:nrow(Information), 1:nrow(Information)] TS[r] <- as.numeric(N * t(score) %*% Z1.plus1 %*% score) if (epc && !is.null(add)) { # EPC.uni[r] <- -1 * utils::tail(as.numeric(score %*% Z1.plus1), # n = nrow(R))[r] # to keep the 'sign' consistent with modindices(), which # uses epc = 'new - old' EPC.uni[r] <- +1 * utils::tail(as.numeric(score %*% Z1.plus1), n = nrow(R) )[r] } } Table2 <- Table Table2$X2 <- TS[r.idx] Table2$df <- rep(1, length(r.idx)) Table2$p.value <- 1 - pchisq(Table2$X2, df = Table2$df) if (epc && !is.null(add)) { Table2$epc <- EPC.uni[r.idx] } attr(Table2, "header") <- "univariate score tests:" OUT$uni <- Table2 } if (cumulative) { TS.order <- sort.int(TS, index.return = TRUE, decreasing = TRUE)$ix ROW.order <- sort.int(TS[r.idx], index.return = TRUE, decreasing = TRUE)$ix TS <- numeric(length(r.idx)) for (r in 1:length(r.idx)) { rcumul.idx <- TS.order[1:r] R1 <- R[-rcumul.idx, , drop = FALSE] Z1 <- cbind( rbind(Information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[1:nrow(Information), 1:nrow(Information)] TS[r] <- as.numeric(N * t(score) %*% Z1.plus1 %*% score) } Table3 <- Table[ROW.order, ] Table3$X2 <- TS Table3$df <- seq_len(length(TS)) Table3$p.value <- 1 - pchisq(Table3$X2, df = Table3$df) attr(Table3, "header") <- "cumulative score tests:" OUT$cumulative <- Table3 } if (epc) { # EPC <- vector("list", length = length(r.idx)) # for(i in 1:length(r.idx)) { # r <- r.idx[i] # R1 <- R[-r,,drop = FALSE] # Z1 <- cbind( rbind(Information, R1), # rbind(t(R1), matrix(0,nrow(R1),nrow(R1))) ) # Z1.plus <- MASS::ginv(Z1) # Z1.plus1 <- Z1.plus[ 1:nrow(Information), 1:nrow(Information) ] # EPC[[i]] <- -1 * as.numeric(score %*% Z1.plus1) # } # # OUT$EPC <- EPC # alltogether R1 <- R[-r.idx, , drop = FALSE] Z1 <- cbind( rbind(Information, R1), rbind(t(R1), matrix(0, nrow(R1), nrow(R1))) ) Z1.plus <- MASS::ginv(Z1) Z1.plus1 <- Z1.plus[1:nrow(Information), 1:nrow(Information)] # EPC.all <- -1 * as.numeric(score %*% Z1.plus1) # to keep the 'sign' consistent with modindices(), which # uses epc = 'new - old' EPC.all <- +1 * as.numeric(score %*% Z1.plus1) # create epc table for the 'free' parameters if (!is.null(add) && all(nchar(add) > 0L)) { LIST <- parTable(FIT) } else { ## release mode LIST <- parTable(object) } if (lav_partable_ngroups(LIST) == 1L) { LIST$group <- NULL } nonpar.idx <- which(LIST$op %in% c("==", ":=", "<", ">")) if (length(nonpar.idx) > 0L) { LIST <- LIST[-nonpar.idx, ] } LIST$est[LIST$free > 0 & LIST$user != 10] <- lav_object_inspect_coef(object, type = "free") LIST$est[LIST$user == 10L] <- 0 LIST$epc <- rep(as.numeric(NA), length(LIST$lhs)) LIST$epc[LIST$free > 0] <- EPC.all LIST$epv <- LIST$est + LIST$epc if (standardized) { EPC <- LIST$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & LIST$exo == 0L) EPC[var.idx] <- LIST$est[var.idx] } # two problems: # - EPC of variances can be negative, and that is # perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & abs(EPC) < sqrt(.Machine$double.eps)) if (length(small.idx) > 0L) { EPC[small.idx] <- as.numeric(NA) } # get the sign EPC.sign <- sign(LIST$epc) LIST$sepc.lv <- EPC.sign * lav_standardize_lv(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.lv[small.idx] <- 0 } LIST$sepc.all <- EPC.sign * lav_standardize_all(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.all[small.idx] <- 0 } LIST$sepc.nox <- EPC.sign * lav_standardize_all_nox(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.nox[small.idx] <- 0 } } LIST$free[LIST$user == 10L] <- 0 LIST$user <- NULL # remove some more columns LIST$id <- LIST$ustart <- LIST$exo <- LIST$start <- LIST$se <- LIST$prior <- NULL if (lav_partable_nblocks(LIST) == 1L) { LIST$block <- NULL LIST$group <- NULL LIST$level <- NULL } attr(LIST, "header") <- "expected parameter changes (epc) and expected parameter values (epv):" OUT$epc <- LIST } OUT } lavaan/R/lav_data_patterns.R0000644000176200001440000003102414627656441015552 0ustar liggesusers# get missing patterns lav_data_missing_patterns <- function(Y, sort.freq = FALSE, coverage = FALSE, Lp = NULL) { # handle two-level data if (!is.null(Lp)) { Y.orig <- Y Z <- NULL if (length(Lp$between.idx[[2]]) > 0L) { Y <- Y[, -Lp$between.idx[[2]], drop = FALSE] z.idx <- which(!duplicated(Lp$cluster.idx[[2]])) Z <- Y.orig[z.idx, Lp$between.idx[[2]], drop = FALSE] } } # construct TRUE/FALSE matrix: TRUE if value is observed OBS <- !is.na(Y) # empty cases empty.idx <- which(rowSums(OBS) == 0L) # pattern of observed values per observation case.id <- apply(1L * OBS, 1L, paste, collapse = "") # remove empty patterns if (length(empty.idx)) { case.id.nonempty <- case.id[-empty.idx] } else { case.id.nonempty <- case.id } # sort non-empty patterns (from high occurence to low occurence) if (sort.freq) { TABLE <- sort(table(case.id.nonempty), decreasing = TRUE) } else { TABLE <- table(case.id.nonempty) } # unique pattern ids pat.id <- names(TABLE) # number of patterns pat.npatterns <- length(pat.id) # case idx per pattern pat.case.idx <- lapply( seq_len(pat.npatterns), function(p) which(case.id == pat.id[p]) ) # unique pattern frequencies pat.freq <- as.integer(TABLE) # first occurrence of each pattern pat.first <- match(pat.id, case.id) # TRUE/FALSE for each pattern pat.obs <- OBS[pat.first, , drop = FALSE] # observed per pattern Mp <- list( npatterns = pat.npatterns, id = pat.id, freq = pat.freq, case.idx = pat.case.idx, pat = pat.obs, empty.idx = empty.idx, nel = sum(OBS) ) if (coverage) { # FIXME: if we have empty cases, include them in N? # no for now Mp$coverage <- crossprod(OBS) / sum(pat.freq) # Mp$coverage <- crossprod(OBS) / NROW(Y) } # additional info in we have two-level data if (!is.null(Lp)) { Mp$j.idx <- lapply( seq_len(pat.npatterns), function(p) Lp$cluster.idx[[2]][Mp$case.idx[[p]]] ) Mp$j1.idx <- lapply( seq_len(pat.npatterns), function(p) unique.default(Mp$j.idx[[p]]) ) Mp$j.freq <- lapply( seq_len(pat.npatterns), function(p) as.integer(unname(table(Mp$j.idx[[p]]))) ) # between-level patterns if (!is.null(Z)) { Mp$Zp <- lav_data_missing_patterns(Z, sort.freq = FALSE, coverage = FALSE, Lp = NULL ) } } Mp } # get response patterns (ignore empty cases!) lav_data_resp_patterns <- function(Y) { # construct TRUE/FALSE matrix: TRUE if value is observed OBS <- !is.na(Y) # empty cases empty.idx <- which(rowSums(OBS) == 0L) # removeYempty cases if (length(empty.idx) > 0L) { Y <- Y[-empty.idx, , drop = FALSE] } ntotal <- nrow(Y) nvar <- ncol(Y) # identify, label and sort response patterns id <- apply(Y, MARGIN = 1, paste, collapse = "") # sort patterns (from high occurence to low occurence) TABLE <- sort(table(id), decreasing = TRUE) order <- names(TABLE) npatterns <- length(TABLE) pat <- Y[match(order, id), , drop = FALSE] row.names(pat) <- as.character(TABLE) # handle NA? Y[is.na(Y)] <- -9 total.patterns <- prod(apply(Y, 2, function(x) length(unique(x)))) empty.patterns <- total.patterns - npatterns # return a list # out <- list(nobs=ntotal, nvar=nvar, # id=id, npatterns=npatterns, # order=order, pat=pat) # only return pat out <- list( npatterns = npatterns, pat = pat, total.patterns = total.patterns, empty.patterns = empty.patterns ) out } # get cluster information # - cluster can be a vector! # - clus can contain multiple columns! lav_data_cluster_patterns <- function(Y = NULL, clus = NULL, # the cluster ids cluster = NULL, # the cluster 'names' multilevel = FALSE, ov.names = NULL, ov.names.x = NULL, ov.names.l = NULL) { # how many levels? nlevels <- length(cluster) + 1L # did we get any data (or is this just for simulateData) if (!is.null(Y) && !is.null(clus)) { haveData <- TRUE } else { haveData <- FALSE } # check clus if (haveData) { stopifnot(ncol(clus) == (nlevels - 1L), nrow(Y) == nrow(clus)) } cluster.size <- vector("list", length = nlevels) cluster.id <- vector("list", length = nlevels) cluster.idx <- vector("list", length = nlevels) nclusters <- vector("list", length = nlevels) cluster.sizes <- vector("list", length = nlevels) ncluster.sizes <- vector("list", length = nlevels) cluster.size.ns <- vector("list", length = nlevels) ov.idx <- vector("list", length = nlevels) ov.x.idx <- vector("list", length = nlevels) ov.y.idx <- vector("list", length = nlevels) both.idx <- vector("list", length = nlevels) within.idx <- vector("list", length = nlevels) within.x.idx <- vector("list", length = nlevels) within.y.idx <- vector("list", length = nlevels) between.idx <- vector("list", length = nlevels) between.x.idx <- vector("list", length = nlevels) between.y.idx <- vector("list", length = nlevels) both.names <- vector("list", length = nlevels) within.names <- vector("list", length = nlevels) within.x.names <- vector("list", length = nlevels) within.y.names <- vector("list", length = nlevels) between.names <- vector("list", length = nlevels) between.x.names <- vector("list", length = nlevels) between.y.names <- vector("list", length = nlevels) # level-1 is special if (haveData) { nclusters[[1]] <- NROW(Y) } # higher levels: for (l in 2:nlevels) { if (haveData) { CLUS <- clus[, (l - 1L)] cluster.id[[l]] <- unique(CLUS) cluster.idx[[l]] <- match(CLUS, cluster.id[[l]]) cluster.size[[l]] <- tabulate(cluster.idx[[l]]) nclusters[[l]] <- length(cluster.size[[l]]) # check if we have more observations than clusters if (nclusters[[1]] == nclusters[[l]]) { lav_msg_stop(gettext("every cluster contains only one observation.")) } mean.cluster.size <- mean(cluster.size[[l]]) if (mean.cluster.size < 1.5) { lav_msg_warn(gettextf( "mean cluster size is %s. This means that many clusters only contain a single observation.", mean.cluster.size)) } cluster.sizes[[l]] <- unique(cluster.size[[l]]) ncluster.sizes[[l]] <- length(cluster.sizes[[l]]) cluster.size.ns[[l]] <- as.integer(table(factor(cluster.size[[l]], levels = as.character(cluster.sizes[[l]]) ))) } else { cluster.id[[l]] <- integer(0L) cluster.idx[[l]] <- integer(0L) cluster.size[[l]] <- integer(0L) nclusters[[l]] <- integer(0L) cluster.sizes[[l]] <- integer(0L) ncluster.sizes[[l]] <- integer(0L) cluster.size.ns[[l]] <- integer(0L) } } # for all levels: if (multilevel) { for (l in 1:nlevels) { # index of ov.names for this level ov.idx[[l]] <- match(ov.names.l[[l]], ov.names) # new in 0.6-12: always preserve the order of ov.idx[[l]] idx <- which(ov.names %in% ov.names.l[[1]] & ov.names %in% ov.names.l[[2]]) both.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] idx <- which(ov.names %in% ov.names.l[[1]] & !ov.names %in% ov.names.l[[2]]) within.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # backwards compatibility: also store in within.idx[[2]] if (l == 2) { within.idx[[l]] <- within.idx[[1]] } idx <- which(!ov.names %in% ov.names.l[[1]] & ov.names %in% ov.names.l[[2]]) between.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # names # both.names[[l]] <- ov.names[ ov.names %in% ov.names.l[[1]] & # ov.names %in% ov.names.l[[2]] ] # within.names[[l]] <- ov.names[ ov.names %in% ov.names.l[[1]] & # !ov.names %in% ov.names.l[[2]] ] # between.names[[l]] <- ov.names[!ov.names %in% ov.names.l[[1]] & # ov.names %in% ov.names.l[[2]] ] both.names[[l]] <- ov.names[both.idx[[l]]] within.names[[l]] <- ov.names[within.idx[[l]]] between.names[[l]] <- ov.names[between.idx[[l]]] } } # fixed.x wrt variable index if (multilevel && length(ov.names.x) > 0L) { for (l in 1:nlevels) { # some ov.names.x could be 'splitted', and end up in both.names # they should NOT be part ov.x.idx (as they become latent variables) idx <- which(ov.names %in% ov.names.x & ov.names %in% ov.names.l[[l]] & !ov.names %in% unlist(both.names)) ov.x.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # not any longer, we split them, but still treat them as 'fixed' # ov.x.idx[[l]] <- which( ov.names %in% ov.names.x & # ov.names %in% ov.names.l[[l]] ) # if some ov.names.x have been 'splitted', and end up in both.names, # they should become part of ov.y.idx (despite being exogenous) # as they are now latent variables idx <- which(ov.names %in% ov.names.l[[l]] & !ov.names %in% ov.names.x[!ov.names.x %in% unlist(both.names)]) ov.y.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # not any longer, ov.x stays ov.x (even if we split) # ov.y.idx[[l]] <- which( ov.names %in% ov.names.l[[l]] & # !ov.names %in% ov.names.x ) # if(l == 1L) { # next # } # below, we only fill in the [[2]] element (and higher) idx <- which(ov.names %in% ov.names.l[[1]] & !ov.names %in% ov.names.l[[2]] & ov.names %in% ov.names.x) within.x.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # backwards compatibility: also store in within.x.idx[[2]] if (l == 2) { within.x.idx[[l]] <- within.x.idx[[1]] } idx <- which(ov.names %in% ov.names.l[[1]] & !ov.names %in% ov.names.l[[2]] & !ov.names %in% ov.names.x) within.y.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # backwards compatibility: also store in within.y.idx[[2]] if (l == 2) { within.y.idx[[l]] <- within.y.idx[[1]] } idx <- which(!ov.names %in% ov.names.l[[1]] & ov.names %in% ov.names.l[[2]] & ov.names %in% ov.names.x) between.x.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] idx <- which(!ov.names %in% ov.names.l[[1]] & ov.names %in% ov.names.l[[2]] & !ov.names %in% ov.names.x) between.y.idx[[l]] <- ov.idx[[l]][ov.idx[[l]] %in% idx] # within.x.names[[l]] <- ov.names[ ov.names %in% ov.names.l[[1]] & # ov.names %in% ov.names.x & # !ov.names %in% ov.names.l[[2]] ] # within.y.names[[l]] <- ov.names[ ov.names %in% ov.names.l[[1]] & # !ov.names %in% ov.names.x & # !ov.names %in% ov.names.l[[2]] ] # between.x.names[[l]] <- ov.names[!ov.names %in% ov.names.l[[1]] & # ov.names %in% ov.names.x & # ov.names %in% ov.names.l[[2]] ] # between.y.names[[l]] <- ov.names[!ov.names %in% ov.names.l[[1]] & # !ov.names %in% ov.names.x & # ov.names %in% ov.names.l[[2]] ] within.x.names[[l]] <- ov.names[within.x.idx[[l]]] within.y.names[[l]] <- ov.names[within.y.idx[[l]]] between.x.names[[l]] <- ov.names[between.x.idx[[l]]] between.y.names[[l]] <- ov.names[between.y.idx[[l]]] } } else { ov.y.idx <- ov.idx } out <- list( ov.names = ov.names, ov.names.x = ov.names.x, # for this group cluster = cluster, # clus = clus, # per level nclusters = nclusters, cluster.size = cluster.size, cluster.id = cluster.id, cluster.idx = cluster.idx, cluster.sizes = cluster.sizes, ncluster.sizes = ncluster.sizes, cluster.size.ns = cluster.size.ns, ov.idx = ov.idx, ov.x.idx = ov.x.idx, ov.y.idx = ov.y.idx, both.idx = both.idx, within.idx = within.idx, within.x.idx = within.x.idx, within.y.idx = within.y.idx, between.idx = between.idx, between.x.idx = between.x.idx, between.y.idx = between.y.idx, both.names = both.names, within.names = within.names, within.x.names = within.x.names, within.y.names = within.y.names, between.names = between.names, between.x.names = between.x.names, between.y.names = between.y.names ) out } lavaan/R/lav_partable_efa.R0000644000176200001440000002053414627656441015332 0ustar liggesusers# generate a parameter table for an EFA model # # YR 20 Sept 2022: initial verion lav_partable_generate_efa <- function(ov.names = NULL, nfactors = 1L, meanstructure = FALSE, varTable = NULL) { # currently, we support only a single block (but we plan for more) nblocks <- 1L ov.names <- list(ov.names) # currently, we only support continuous data (but ordered is planned) stopifnot(is.null(ordered)) lhs <- rhs <- op <- character(0) block <- free <- integer(0) ustart <- numeric(0) # create factor names lv.names <- paste("f", 1:nfactors, sep = "") # block number for (b in seq_len(nblocks)) { # ov.names for this block OV.NAMES <- ov.names[[b]] nvar <- length(OV.NAMES) nel <- nvar * nfactors # get 'ordered' variables from varTable categorical <- FALSE if (!is.null(varTable)) { ov.names.ord <- as.character(varTable$name[varTable$type == "ordered"]) # remove those that do appear in the model syntax idx <- which(!ov.names.ord %in% OV.NAMES) if (length(idx) > 0L) { ov.names.ord <- ov.names.ord[-idx] } if (length(ov.names.ord) > 0L) { ov.names.ord <- OV.NAMES[OV.NAMES %in% ov.names.ord] categorical <- TRUE } } # a) factor loadings lhs <- c(lhs, rep(lv.names, each = nvar)) op <- c(op, rep("=~", nel)) rhs <- c(rhs, rep(OV.NAMES, times = nfactors)) block <- c(block, rep(b, nel)) # group <- c(group, rep(1L, nel)) # level <- c(level, rep(1L, nel)) free <- c(free, rep(1L, nel)) # for now ustart <- c(ustart, rep(as.numeric(NA), nel)) # b) ov variances lhs <- c(lhs, OV.NAMES) op <- c(op, rep("~~", nvar)) rhs <- c(rhs, OV.NAMES) block <- c(block, rep(b, nvar)) # group <- c(group, rep(1L, nvar)) # level <- c(level, rep(1L, nvar)) free <- c(free, rep(1L, nvar)) ustart <- c(ustart, rep(as.numeric(NA), nvar)) # c) lv variances lhs <- c(lhs, lv.names) op <- c(op, rep("~~", nfactors)) rhs <- c(rhs, lv.names) block <- c(block, rep(b, nfactors)) # group <- c(group, rep(1L, nfactors)) # level <- c(level, rep(1L, nfactors)) free <- c(free, rep(0L, nfactors)) # standardized! ustart <- c(ustart, rep(1, nfactors)) # d) lv covariances pstar <- nfactors * (nfactors - 1) / 2 if (pstar > 0L) { # only if more than 1 variable tmp <- utils::combn(lv.names, 2) lhs <- c(lhs, tmp[1, ]) # to fill upper.tri op <- c(op, rep("~~", pstar)) rhs <- c(rhs, tmp[2, ]) block <- c(block, rep(b, pstar)) # group <- c(group, rep(g, pstar)) # level <- c(level, rep(l, pstar)) free <- c(free, rep(1L, pstar)) # to be changed... ustart <- c(ustart, rep(as.numeric(NA), pstar)) } if (meanstructure) { # e) ov means/intercepts lhs <- c(lhs, OV.NAMES) op <- c(op, rep("~1", nvar)) rhs <- c(rhs, rep("", nvar)) block <- c(block, rep(b, nvar)) # group <- c(group, rep(1L, nvar)) # level <- c(level, rep(1L, nvar)) free <- c(free, rep(1L, nvar)) ustart <- c(ustart, rep(as.numeric(NA), nvar)) # f) lv means/intercepts lhs <- c(lhs, lv.names) op <- c(op, rep("~1", nfactors)) rhs <- c(rhs, rep("", nfactors)) block <- c(block, rep(b, nfactors)) # group <- c(group, rep(1L, nfactors)) # level <- c(level, rep(1L, nfactors)) free <- c(free, rep(0L, nfactors)) ustart <- c(ustart, rep(0, nfactors)) } # meanstructure } # blocks # create LIST LIST <- list( id = 1:length(lhs), lhs = lhs, op = op, rhs = rhs, user = rep(0L, length(lhs)), # all system-generated block = block, group = rep(1L, length(lhs)), # level = level, free = free, ustart = ustart, exo = rep(0L, length(lhs)), label = rep("", length(lhs)), efa = rep("", length(lhs)) ) # add 'efa' column with a single block string (i.e., "efa") LIST$efa[LIST$op == "=~"] <- "efa" # take care of EFA constraints LIST <- lav_partable_efa_constraints(LIST) # free counter idx.free <- which(LIST$free > 0) LIST$free[idx.free] <- seq_along(idx.free) # needed? LIST <- lav_partable_complete(LIST) LIST } # handle EFA equality constraints # YR 14 Jan 2020: 0.6-6 does no longer impose 'explicit' constraints # if we only need to fix a parameter to 0/1 # Note: we should also check if they are really needed: # eg., if all the factor-loadings of the 'second' set (time/group) # are constrained to be equal to the factor-loadings of the first # set, no further constraints are needed lav_partable_efa_constraints <- function(LIST = NULL, orthogonal.efa = FALSE, group.equal = character(0L)) { # for each set, for each block nblocks <- lav_partable_nblocks(LIST) set.names <- lav_partable_efa_values(LIST) nsets <- length(set.names) for (b in seq_len(nblocks)) { for (s in seq_len(nsets)) { # lv's for this block/set lv.nam.efa <- unique(LIST$lhs[LIST$op == "=~" & LIST$block == b & LIST$efa == set.names[s]]) if (length(lv.nam.efa) == 1L) { # nothing to do (warn?) next } # equality constraints on ALL factor loadings in this set? # two scenario's: # 1. eq constraints within the same block, perhaps time1/time2/ # 2. eq constraints across groups (group.equal = "loadings") # --> no constraints are needed # store labels (if any) fix.to.zero <- TRUE # 1. within block/group if (s == 1L) { set.idx <- which(LIST$op == "=~" & LIST$block == b & LIST$lhs %in% lv.nam.efa) LABEL.set1 <- LIST$label[set.idx] } else { # collect labels for this set, if any set.idx <- which(LIST$op == "=~" & LIST$block == b & LIST$lhs %in% lv.nam.efa) # user-provided labels (if any) this.label.set <- LIST$label[set.idx] # same as in reference set? if (all(nchar(this.label.set) > 0L) && all(this.label.set %in% LABEL.set1)) { fix.to.zero <- FALSE } } # 2. across groups if (b == 1L) { set.idx <- which(LIST$op == "=~" & LIST$block == b & LIST$lhs %in% lv.nam.efa) LABEL.group1 <- LIST$label[set.idx] } else { if ("loadings" %in% group.equal) { fix.to.zero <- FALSE } else { # collect labels for this set, if any set.idx <- which(LIST$op == "=~" & LIST$block == b & LIST$lhs %in% lv.nam.efa) # user-provided labels (if any) this.label.set <- LIST$label[set.idx] # same as in reference set? if (all(nchar(this.label.set) > 0L) && all(this.label.set %in% LABEL.group1)) { fix.to.zero <- FALSE } } } # 1. echelon pattern nfac <- length(lv.nam.efa) for (f in seq_len(nfac)) { if (f == 1L) { next } nzero <- (f - 1L) ind.idx <- which(LIST$op == "=~" & LIST$block == b & LIST$lhs %in% lv.nam.efa[f]) if (length(ind.idx) < nzero) { lav_msg_stop(gettextf( "efa factor %s has not enough indicators for echelon pattern", lv.nam.efa[f])) } # fix to zero if (fix.to.zero) { LIST$free[ind.idx[seq_len(nzero)]] <- 0L LIST$ustart[ind.idx[seq_len(nzero)]] <- 0 LIST$user[ind.idx[seq_len(nzero)]] <- 7L } else { LIST$user[ind.idx[seq_len(nzero)]] <- 77L } } # 2. covariances constrained to zero (only if oblique rotation) if (!orthogonal.efa) { # skip if user == 1 (user-override!) cov.idx <- which(LIST$op == "~~" & LIST$block == b & LIST$user == 0L & LIST$lhs %in% lv.nam.efa & LIST$rhs %in% lv.nam.efa & LIST$lhs != LIST$rhs) # fix to zero if (fix.to.zero) { LIST$free[cov.idx] <- 0L LIST$ustart[cov.idx] <- 0 LIST$user[cov.idx] <- 7L } else { LIST$user[cov.idx] <- 77L } } } # sets } # blocks LIST } lavaan/R/lav_fit_gfi.R0000644000176200001440000000760014627656441014333 0ustar liggesusers# functions related to GFI and other 'absolute' fit indices # lower-level functions: # - lav_fit_gfi # - lav_fit_agfi # - lav_fit_pgfi # higher-level functions: # - lav_fit_gfi_lavobject # Y.R. 21 July 2022 # original formulas were given in Joreskog and Sorbom (1984) user's guide # for LISREL VI (one for ML, and another for ULS) # here we use the more 'general' formulas # (generalized to allow for meanstructures etc) # References: # Mulaik, S. A., James, L. R., Van Alstine, J., Bennett, N., Lind, S., & # Stilwell, C. D. (1989). Evaluation of goodness-of-fit indices for structural # equation models. Psychological bulletin, 105(3), 430. # Tanaka, J. S., & Huba, G. J. (1985). A fit index for covariance structure # models under arbitrary GLS estimation. British Journal of Mathematical and # Statistical Psychology, 38,197-201. lav_fit_gfi <- function(WLS.obs = NULL, WLS.est = NULL, WLS.V = NULL, NOBS = NULL) { # number of groups G <- length(WLS.obs) # compute gfi per group gfi.group <- numeric(G) for (g in 1:G) { wls.obs <- WLS.obs[[g]] wls.est <- WLS.est[[g]] wls.v <- WLS.V[[g]] if (is.null(wls.v)) { gfi.group[g] <- as.numeric(NA) } else { wls.diff <- wls.obs - wls.est if (is.matrix(wls.v)) { # full weight matrix t1 <- crossprod(wls.diff, wls.v) %*% wls.diff t2 <- crossprod(wls.obs, wls.v) %*% wls.obs } else { # diagonal weight matrix t1 <- as.numeric(crossprod(wls.diff^2, wls.v)) t2 <- as.numeric(crossprod(wls.obs^2, wls.v)) } gfi.group[g] <- 1 - t1 / t2 } } if (G > 1) { ## CHECKME: get the scaling right NOBS <- unlist(NOBS) GFI <- as.numeric((NOBS %*% gfi.group) / sum(NOBS)) } else { GFI <- gfi.group[1L] } GFI } # 'adjusted' GFI (adjusted for degrees of freedom) lav_fit_agfi <- function(GFI = NULL, nel = NULL, df = NULL) { if (!is.finite(GFI) || !is.finite(nel) || !is.finite(df)) { AGFI <- as.numeric(NA) } else if (df > 0) { AGFI <- 1 - (nel / df) * (1 - GFI) } else { AGFI <- 1 } AGFI } # PGFI: parsimony goodness-of-fit index # Mulaik, S. A., James, L. R., Van Alstine, J., Bennett, N., Lind, S., & # Stilwell, C. D. (1989). Evaluation of goodness-of-fit indices for structural # equation models. Psychological bulletin, 105(3), 430. # LISREL formula (Simplis book 2002, p. 126) lav_fit_pgfi <- function(GFI = NULL, nel = NULL, df = NULL) { if (!is.finite(GFI) || !is.finite(nel) || !is.finite(df)) { PGFI <- as.numeric(NA) } else if (nel == 0) { PGFI <- as.numeric(NA) } else { PGFI <- (df / nel) * GFI } PGFI } lav_fit_gfi_lavobject <- function(lavobject = NULL, fit.measures = "gfi") { # check lavobject stopifnot(inherits(lavobject, "lavaan")) # possible fit measures fit.gfi <- c("gfi", "agfi", "pgfi") # which one do we need? if (missing(fit.measures)) { # default set fit.measures <- fit.gfi } else { # remove any not-GFI related index from fit.measures rm.idx <- which(!fit.measures %in% fit.gfi) if (length(rm.idx) > 0L) { fit.measures <- fit.measures[-rm.idx] } if (length(fit.measures) == 0L) { return(list()) } } # extract ingredients WLS.obs <- lav_object_inspect_wls_obs(lavobject) WLS.est <- lav_object_inspect_wls_est(lavobject) WLS.V <- lav_object_inspect_wls_v(lavobject) NOBS <- lavobject@SampleStats@nobs # compute GFI GFI <- lav_fit_gfi( WLS.obs = WLS.obs, WLS.est = WLS.est, WLS.V = WLS.V, NOBS = NOBS ) # total number of modeled sample stats nel <- length(unlist(WLS.obs)) # degrees of freedom df <- lavobject@test[[1]]$df # container indices <- list() indices["gfi"] <- GFI indices["agfi"] <- lav_fit_agfi(GFI = GFI, nel = nel, df = df) indices["pgfi"] <- lav_fit_pgfi(GFI = GFI, nel = nel, df = df) # return only those that were requested indices[fit.measures] } lavaan/R/lav_sam_step1.R0000644000176200001440000005732014627656441014624 0ustar liggesusers# step 1 in SAM: fitting the measurement blocks lav_sam_step1 <- function(cmd = "sem", mm.list = NULL, mm.args = list(), FIT = FIT, data = NULL, sam.method = "local") { lavoptions <- FIT@Options lavpta <- FIT@pta PT <- FIT@ParTable nblocks <- lavpta$nblocks ngroups <- lavpta$ngroups if (lav_verbose()) { cat("Fitting the measurement part:\n") } # local only -> handle missing data if (sam.method %in% c("local", "fsr")) { # if missing = "listwise", make data complete, to avoid different # datasets per measurement block if (lavoptions$missing == "listwise") { # FIXME: make this work for multiple groups!! OV <- unique(unlist(FIT@pta$vnames$ov)) # add group/cluster/sample.weights variables (if any) OV <- c( OV, FIT@Data@group, FIT@Data@cluster, FIT@Data@sampling.weights ) data <- na.omit(data[, OV]) } } # total number of free parameters if (FIT@Model@ceq.simple.only) { npar <- FIT@Model@nx.unco PT.free <- PT$free PT.free[PT.free > 0] <- seq_len(npar) } else { npar <- FIT@Model@nx.free PT.free <- PT$free } if (npar < 1L) { lav_msg_stop(gettext("model does not contain any free parameters")) } # check for higher-order factors # 0.6-11: hard stop for now, as we do not support them (yet)! LV.IND.names <- unique(unlist(FIT@pta$vnames$lv.ind)) if (length(LV.IND.names) > 0L) { lav_msg_stop(gettext( "model contains indicators that are also latent variables:"), lav_msg_view(LV.IND.names, "none")) # ind.idx <- match(LV.IND.names, LV.names) # LV.names <- LV.names[-ind.idx] } # do we have at least 1 'regular' (measured) latent variable? LV.names <- unique(unlist(FIT@pta$vnames$lv.regular)) if (length(LV.names) == 0L) { lav_msg_stop(gettext("model does not contain any (measured) latent variables; use sem() instead")) } # how many measurement models? if (!is.null(mm.list)) { nMMblocks <- length(mm.list) # check each measurement block for (b in seq_len(nMMblocks)) { # check if we can find all lv names in LV.names if (!all(unlist(mm.list[[b]]) %in% LV.names)) { tmp <- unlist(mm.list[[b]]) lav_msg_stop(gettext("mm.list contains unknown latent variable(s):"), lav_msg_view(tmp[!tmp %in% LV.names], "none")) } # make list per block if (!is.list(mm.list[[b]])) { mm.list[[b]] <- rep(list(mm.list[[b]]), nblocks) } else { if (length(mm.list[[b]]) != nblocks) { lav_msg_stop(gettextf( "mm.list block %1$s has length %2$s but nblocks = %3$s", b, length(mm.list[[b]]), nblocks)) } } } } else { # TODO: here comes the automatic 'detection' of linked # measurement models # # for now we take a single latent variable per measurement model block mm.list <- as.list(LV.names) nMMblocks <- length(mm.list) for (b in seq_len(nMMblocks)) { # make list per block mm.list[[b]] <- rep(list(mm.list[[b]]), nblocks) } } # adjust options for measurement models lavoptions.mm <- lavoptions lavoptions.mm$optim.bounds <- NULL if (lavoptions$se == "none") { lavoptions.mm$se <- "none" } else { # categorical? if (FIT@Model@categorical) { lavoptions.mm$se <- "robust.sem" } else if (lavoptions$estimator.orig == "MLM") { lavoptions.mm$se <- "robust.sem" } else if (lavoptions$estimator.orig == "MLR") { lavoptions.mm$se <- "robust.huber.white" } else if (lavoptions$estimator.orig == "PML") { lavoptions.mm$se <- "robust.huber.white" } else { lavoptions.mm$se <- "standard" # may be overriden later } } # if(sam.method == "global") { # lavoptions.mm$test <- "none" # } # we need the tests to create summary info about MM lavoptions.mm$check.post <- FALSE # neg lv variances may be overriden lavoptions.mm$check.gradient <- FALSE # too sensitive in large model (global) lavoptions.mm$baseline <- FALSE lavoptions.mm$bounds <- "wide.zerovar" # override with user-specified mm.args lavoptions.mm <- modifyList(lavoptions.mm, mm.args) # create MM slotOptions slotOptions.mm <- lav_options_set(lavoptions.mm) # we assume the same number/names of lv's per group!!! MM.FIT <- vector("list", nMMblocks) # fitted object # for joint model later if (lavoptions$se != "none") { Sigma.11 <- matrix(0, npar, npar) } step1.free.idx <- integer(0L) # NOTE: we should explicitly add zero-constrained LV covariances # to PT, and keep them zero in PTM if (cmd == "lavaan") { add.lv.cov <- FALSE } else { add.lv.cov <- TRUE } # fit mm model for each measurement block for (mm in seq_len(nMMblocks)) { if (lav_verbose()) { cat( " block ", mm, "[", paste(mm.list[[mm]], collapse = " "), "]\n" ) } # create parameter table for this measurement block only PTM <- lav_partable_subset_measurement_model( PT = PT, add.lv.cov = add.lv.cov, add.idx = TRUE, lv.names = mm.list[[mm]] ) mm.idx <- attr(PTM, "idx") attr(PTM, "idx") <- NULL PTM$est <- NULL PTM$se <- NULL # check for categorical in PTM in this mm-block if (!any(PTM$op == "|")) { slotOptions.mm$categorical <- FALSE slotOptions.mm$.categorical <- FALSE } # update slotData for this measurement block ov.names.block <- lapply(1:ngroups, function(g) { unique(unlist(lav_partable_vnames(PTM, type = "ov", group = g))) }) slotData.block <- lav_data_update_subset(FIT@Data, ov.names = ov.names.block ) # handle single block 1-factor CFA with (only) two indicators if (length(unlist(ov.names.block)) == 2L && ngroups == 1L) { lambda.idx <- which(PTM$op == "=~") # check if both factor loadings are fixed # (note: this assumes std.lv = FALSE) if(any(PTM$free[lambda.idx] != 0)) { PTM$free[ lambda.idx] <- 0L PTM$ustart[lambda.idx] <- 1 PTM$start[ lambda.idx] <- 1 free.idx <- which(as.logical(PTM$free)) # adjust free counter if (length(free.idx) > 0L) { PTM$free[free.idx] <- seq_len(length(free.idx)) } # warn about it (needed?) lav_msg_warn(gettextf( "measurement block [%1$s] (%2$s) contains only two indicators; -> fixing both factor loadings to unity", mm, lav_msg_view(mm.list[[mm]], "none"))) } } # fit this measurement model only # (question: can we re-use even more slots?) fit.mm.block <- lavaan( model = PTM, slotData = slotData.block, slotOptions = slotOptions.mm, debug = FALSE, verbose = FALSE ) # check convergence if (!lavInspect(fit.mm.block, "converged")) { # warning for now, but this is not good! lav_msg_warn(gettextf( "measurement model for %s did not converge!", lav_msg_view(mm.list[[mm]], "none"))) } # store fitted measurement model MM.FIT[[mm]] <- fit.mm.block # fill in point estimates measurement block (including slack values) PTM <- MM.FIT[[mm]]@ParTable # pt.idx: the row-numbers in PT that correspond to the rows in PTM # pt.idx <- lav_partable_map_id_p1_in_p2(p1 = PTM, p2 = PT, # stopifnotfound = TRUE, exclude.nonpar = FALSE) # pt.idx == mm.idx ptm.idx <- which((PTM$free > 0L | PTM$op %in% c(":=", "<", ">")) & PTM$user != 3L) PT$est[mm.idx[ptm.idx]] <- PTM$est[ptm.idx] # if categorical, add non-free residual variances if (fit.mm.block@Model@categorical || fit.mm.block@Model@correlation) { extra.idx <- which(PTM$op %in% c("~~", "~*~") & PTM$lhs == PTM$rhs & PTM$user == 0L & PTM$free == 0L & PTM$ustart == 1) if (length(extra.idx) > 0L) { PT$est[mm.idx[extra.idx]] <- PTM$est[extra.idx] } } # if EFA, add user=7 values (but do not add to ptm.idx) user7.idx <- which(PTM$user == 7L) if (length(user7.idx)) { PT$est[mm.idx[user7.idx]] <- PTM$est[user7.idx] } # fill in standard errors measurement block if (lavoptions$se != "none") { if (fit.mm.block@Model@ceq.simple.only) { PTM.free <- PTM$free PTM.free[PTM.free > 0] <- seq_len(fit.mm.block@Model@nx.unco) } else { PTM.free <- PTM$free } ptm.se.idx <- which((PTM$free > 0L) & PTM$user != 3L) # no :=, <, > # PT$se[ seq_len(length(PT$lhs)) %in% mm.idx & PT$free > 0L ] <- # PTM$se[ PTM$free > 0L & PTM$user != 3L] PT$se[mm.idx[ptm.se.idx]] <- PTM$se[ptm.se.idx] # compute variance matrix for this measurement block sigma.11 <- MM.FIT[[mm]]@vcov$vcov # fill in variance matrix par.idx <- PT.free[mm.idx[ptm.idx]] keep.idx <- PTM.free[ptm.idx] # par.idx <- PT.free[ seq_len(length(PT$lhs)) %in% mm.idx & # PT$free > 0L ] # keep.idx <- PTM.free[ PTM$free > 0 & PTM$user != 3L ] Sigma.11[par.idx, par.idx] <- sigma.11[keep.idx, keep.idx, drop = FALSE] # store (ordered) indices in step1.free.idx step1.free.idx <- c(step1.free.idx, sort.int(par.idx)) } } # measurement block # only keep 'measurement part' parameters in Sigma.11 if (lavoptions$se != "none") { Sigma.11 <- Sigma.11[step1.free.idx, step1.free.idx, drop = FALSE] } else { Sigma.11 <- NULL } # create STEP1 list STEP1 <- list( MM.FIT = MM.FIT, Sigma.11 = Sigma.11, step1.free.idx = step1.free.idx, PT.free = PT.free, mm.list = mm.list, PT = PT ) STEP1 } ## STEP 1b: compute Var(eta) and E(eta) per block ## only needed for local/fsr approach! lav_sam_step1_local <- function(STEP1 = NULL, FIT = NULL, sam.method = "local", local.options = list( M.method = "ML", lambda.correction = TRUE, alpha.correction = 0L, twolevel.method = "h1" )) { # local.M.method local.M.method <- toupper(local.options[["M.method"]]) if (!local.M.method %in% c("GLS", "ML", "ULS")) { lav_msg_stop(gettext( "local option M.method should be one of GLS, ML or ULS.")) } # local.twolevel.method local.twolevel.method <- tolower(local.options[["twolevel.method"]]) if (!local.twolevel.method %in% c("h1", "anova", "mean")) { lav_msg_stop(gettext( "local option twolevel.method should be one of h1, anova or mean.")) } lavoptions <- FIT@Options lavpta <- FIT@pta ngroups <- lavpta$ngroups nlevels <- lavpta$nlevels nblocks <- lavpta$nblocks nMMblocks <- length(STEP1$MM.FIT) mm.list <- STEP1$mm.list if (length(unlist(lavpta$vnames$lv.interaction)) > 0L) { lv.interaction.flag <- TRUE } else { lv.interaction.flag <- FALSE } if (lav_verbose()) { cat("Constructing the mapping matrix using the ", local.M.method, " method ... ", sep = "" ) } LAMBDA.list <- vector("list", nMMblocks) THETA.list <- vector("list", nMMblocks) NU.list <- vector("list", nMMblocks) DELTA.list <- vector("list", nMMblocks) # correlation/categorical LV.idx.list <- vector("list", nMMblocks) OV.idx.list <- vector("list", nMMblocks) for (mm in seq_len(nMMblocks)) { fit.mm.block <- STEP1$MM.FIT[[mm]] # LV.idx.list/OV.idx.list: list per block LV.idx.list[[mm]] <- vector("list", nblocks) OV.idx.list[[mm]] <- vector("list", nblocks) # store LAMBDA/THETA LAMBDA.list[[mm]] <- computeLAMBDA(fit.mm.block@Model) THETA.list[[mm]] <- computeTHETA(fit.mm.block@Model) if (fit.mm.block@Model@meanstructure) { NU.list[[mm]] <- computeNU(fit.mm.block@Model, lavsamplestats = fit.mm.block@SampleStats ) } if (fit.mm.block@Model@categorical || fit.mm.block@Model@correlation) { delta.idx <- which(names(fit.mm.block@Model@GLIST) == "delta") DELTA.list[[mm]] <- fit.mm.block@Model@GLIST[delta.idx] } for (bb in seq_len(nblocks)) { lambda.idx <- which(names(FIT@Model@GLIST) == "lambda")[bb] ind.names <- fit.mm.block@pta$vnames$ov.ind[[bb]] LV.idx.list[[mm]][[bb]] <- match( mm.list[[mm]][[bb]], FIT@Model@dimNames[[lambda.idx]][[2]] ) OV.idx.list[[mm]][[bb]] <- match( ind.names, FIT@Model@dimNames[[lambda.idx]][[1]] ) } # nblocks } ## nMMblocks # assemble global LAMBDA/THETA (per block) LAMBDA <- computeLAMBDA(FIT@Model, handle.dummy.lv = FALSE) THETA <- computeTHETA(FIT@Model, fix = FALSE) # keep dummy lv if (FIT@Model@meanstructure) { NU <- computeNU(FIT@Model, lavsamplestats = FIT@SampleStats) } if (FIT@Model@categorical || FIT@Model@correlation) { delta.idx <- which(names(FIT@Model@GLIST) == "delta") DELTA <- FIT@Model@GLIST[delta.idx] } for (b in seq_len(nblocks)) { for (mm in seq_len(nMMblocks)) { ov.idx <- OV.idx.list[[mm]][[b]] lv.idx <- LV.idx.list[[mm]][[b]] LAMBDA[[b]][ov.idx, lv.idx] <- LAMBDA.list[[mm]][[b]] THETA[[b]][ov.idx, ov.idx] <- THETA.list[[mm]][[b]] # new in 0.6-10: check if any indicators are also involved # in the structural part; if so, set THETA row/col to zero # and make sure LAMBDA element is correctly set # (we also need to adjust M) dummy.ov.idx <- FIT@Model@ov.y.dummy.ov.idx[[b]] dummy.lv.idx <- FIT@Model@ov.y.dummy.lv.idx[[b]] if (length(dummy.ov.idx)) { THETA[[b]][dummy.ov.idx, ] <- 0 THETA[[b]][, dummy.ov.idx] <- 0 LAMBDA[[b]][dummy.ov.idx, ] <- 0 LAMBDA[[b]][cbind(dummy.ov.idx, dummy.lv.idx)] <- 1 } if (FIT@Model@meanstructure) { NU[[b]][ov.idx, 1] <- NU.list[[mm]][[b]] if (length(dummy.ov.idx)) { NU[[b]][dummy.ov.idx, 1] <- 0 } } if ((FIT@Model@categorical || FIT@Model@correlation) && !is.null(DELTA.list[[mm]][[b]])) { # could be mixed cat/cont DELTA[[b]][ov.idx, 1] <- DELTA.list[[mm]][[b]] } } # remove 'lv.interaction' columns from LAMBDA[[b]] # if (length(lavpta$vidx$lv.interaction[[b]]) > 0L) { # LAMBDA[[b]] <- LAMBDA[[b]][, -lavpta$vidx$lv.interaction[[b]]] # } # check if LAMBDA has full column rank this.lambda <- LAMBDA[[b]] if (length(lavpta$vidx$lv.interaction[[b]]) > 0L) { this.lambda <- this.lambda[, -lavpta$vidx$lv.interaction[[b]]] } if (qr(this.lambda)$rank < ncol(this.lambda)) { print(this.lambda) lav_msg_stop(gettext( "LAMBDA has no full column rank. Please use sam.method = global")) } } # b # store LAMBDA/THETA/NU per block STEP1$LAMBDA <- LAMBDA STEP1$THETA <- THETA if (FIT@Model@meanstructure) { STEP1$NU <- NU } if (FIT@Model@categorical || FIT@Model@correlation) { STEP1$DELTA <- DELTA } VETA <- vector("list", nblocks) MSM.. <- vector("list", nblocks) MTM.. <- vector("list", nblocks) COV.. <- vector("list", nblocks) YBAR.. <- vector("list", nblocks) REL <- vector("list", nblocks) alpha <- vector("list", nblocks) lambda <- vector("list", nblocks) if (lavoptions$meanstructure) { EETA <- vector("list", nblocks) } else { EETA <- NULL } M <- vector("list", nblocks) if (lv.interaction.flag) { # compute Bartlett factor scores FS <- vector("list", nblocks) # FS.mm <- lapply(STEP1$MM.FIT, lav_predict_eta_bartlett) FS.mm <- lapply(STEP1$MM.FIT, lavPredict, method = "Bartlett", drop.list.single.group = FALSE ) for (b in seq_len(nblocks)) { tmp <- lapply( 1:length(STEP1$MM.FIT), function(x) FS.mm[[x]][[b]] ) LABEL <- unlist(lapply(tmp, colnames)) FS[[b]] <- do.call("cbind", tmp) colnames(FS[[b]]) <- LABEL # dummy lv's? if (length(dummy.lv.idx) > 0L) { FS.obs <- FIT@Data@X[[b]][, dummy.ov.idx, drop = FALSE] colnames(FS.obs) <- FIT@Data@ov.names[[b]][dummy.ov.idx] FS[[b]] <- cbind(FS[[b]], FS.obs) } } } # compute VETA/EETA per block if (nlevels > 1L && local.twolevel.method == "h1") { H1 <- lav_h1_implied_logl( lavdata = FIT@Data, lavsamplestats = FIT@SampleStats, lavoptions = FIT@Options ) } for (b in seq_len(nblocks)) { # get sample statistics for this block if (nlevels > 1L) { if (ngroups > 1L) { this.level <- (b - 1L) %% ngroups + 1L } else { this.level <- b } this.group <- floor(b / nlevels + 0.5) if (this.level == 1L) { if (local.twolevel.method == "h1") { COV <- H1$implied$cov[[1]] YBAR <- H1$implied$mean[[1]] } else if (local.twolevel.method == "anova" || local.twolevel.method == "mean") { COV <- FIT@SampleStats@YLp[[this.group]][[2]]$Sigma.W YBAR <- FIT@SampleStats@YLp[[this.group]][[2]]$Mu.W } # reduce ov.idx <- FIT@Data@Lp[[this.group]]$ov.idx[[this.level]] COV <- COV[ov.idx, ov.idx, drop = FALSE] YBAR <- YBAR[ov.idx] } else if (this.level == 2L) { if (local.twolevel.method == "h1") { COV <- H1$implied$cov[[2]] YBAR <- H1$implied$mean[[2]] } else if (local.twolevel.method == "anova") { COV <- FIT@SampleStats@YLp[[this.group]][[2]]$Sigma.B YBAR <- FIT@SampleStats@YLp[[this.group]][[2]]$Mu.B } else if (local.twolevel.method == "mean") { S.PW <- FIT@SampleStats@YLp[[this.group]][[2]]$Sigma.W NJ <- FIT@SampleStats@YLp[[this.group]][[2]]$s Y2 <- FIT@SampleStats@YLp[[this.group]][[2]]$Y2 # grand mean MU.Y <- (FIT@SampleStats@YLp[[this.group]][[2]]$Mu.W + FIT@SampleStats@YLp[[this.group]][[2]]$Mu.B) Y2c <- t(t(Y2) - MU.Y) # MUST be centered YB <- crossprod(Y2c) / nrow(Y2c) COV <- YB - 1 / NJ * S.PW YBAR <- FIT@SampleStats@YLp[[this.group]][[2]]$Mu.B } # reduce ov.idx <- FIT@Data@Lp[[this.group]]$ov.idx[[this.level]] COV <- COV[ov.idx, ov.idx, drop = FALSE] YBAR <- YBAR[ov.idx] } else { lav_msg_stop(gettext("level 3 not supported (yet).")) } # single level } else { this.group <- b YBAR <- FIT@h1$implied$mean[[b]] # EM version if missing="ml" COV <- FIT@h1$implied$cov[[b]] # rescale COV? if (FIT@Model@categorical || FIT@Model@correlation) { SCALE.vector <- 1 / (drop(DELTA[[b]])) COV <- SCALE.vector * COV * rep(SCALE.vector, each = ncol(COV)) YBAR <- SCALE.vector * YBAR # Checkme! } # do we need ICOV? if (local.M.method == "GLS") { if (FIT@Options$sample.cov.rescale) { # get unbiased S N <- FIT@SampleStats@nobs[[b]] COV.unbiased <- COV * N / (N - 1) ICOV <- solve(COV.unbiased) } else { ICOV <- solve(COV) } } } COV..[[b]] <- COV YBAR..[[b]] <- YBAR # compute mapping matrix 'M' this.lambda <- LAMBDA[[b]] if (length(lavpta$vidx$lv.interaction[[b]]) > 0L) { this.lambda <- this.lambda[, -lavpta$vidx$lv.interaction[[b]]] } Mb <- lav_sam_mapping_matrix( LAMBDA = this.lambda, THETA = THETA[[b]], S = COV, S.inv = ICOV, method = local.M.method ) if (length(lavpta$vidx$lv.interaction[[b]]) > 0L) { tmp <- Mb Mb <- matrix(0, nrow = ncol(LAMBDA[[b]]), ncol = nrow(LAMBDA[[b]])) Mb[-lavpta$vidx$lv.interaction[[b]], ] <- tmp } # handle observed-only variables dummy.ov.idx <- c( FIT@Model@ov.x.dummy.ov.idx[[b]], FIT@Model@ov.y.dummy.ov.idx[[b]] ) dummy.lv.idx <- c( FIT@Model@ov.x.dummy.lv.idx[[b]], FIT@Model@ov.y.dummy.lv.idx[[b]] ) if (length(dummy.ov.idx)) { Mb[dummy.lv.idx, ] <- 0 Mb[cbind(dummy.lv.idx, dummy.ov.idx)] <- 1 } # here, we remove the lv.interaction row(s) from Mb if (length(lavpta$vidx$lv.interaction[[b]]) > 0L) { Mb <- Mb[-lavpta$vidx$lv.interaction[[b]], ] } # compute EETA if (lavoptions$meanstructure) { EETA[[b]] <- lav_sam_eeta(M = Mb, YBAR = YBAR, NU = NU[[b]]) } # compute VETA if (sam.method == "local") { tmp <- lav_sam_veta( M = Mb, S = COV, THETA = THETA[[b]], alpha.correction = local.options[["alpha.correction"]], lambda.correction = local.options[["lambda.correction"]], N <- FIT@SampleStats@nobs[[this.group]], dummy.lv.idx = dummy.lv.idx, extra = TRUE ) VETA[[b]] <- tmp[, , drop = FALSE] # drop attributes alpha[[b]] <- attr(tmp, "alpha") lambda[[b]] <- attr(tmp, "lambda") MSM..[[b]] <- attr(tmp, "MSM") MTM..[[b]] <- attr(tmp, "MTM") } else { # FSR -- no correction VETA[[b]] <- Mb %*% COV %*% t(Mb) } # standardize? not really needed, but we may have 1.0000001 # as variances, and this may lead to false convergence if (FIT@Options$std.lv) { VETA[[b]] <- stats::cov2cor(VETA[[b]]) } # lv.names, including dummy-lv covariates psi.idx <- which(names(FIT@Model@GLIST) == "psi")[b] lv.names <- FIT@Model@dimNames[[psi.idx]][[b]] # including dummy/interact/. if (!lv.interaction.flag) { dimnames(VETA[[b]]) <- FIT@Model@dimNames[[psi.idx]] } else { lv.int.names <- lavpta$vnames$lv.interaction[[b]] # remove interaction terms lv.names1 <- lv.names[!lv.names %in% lv.int.names] colnames(VETA[[b]]) <- rownames(VETA[[b]]) <- lv.names1 } # compute model-based RELiability MSM <- Mb %*% COV %*% t(Mb) # REL[[b]] <- diag(VETA[[b]]] %*% solve(MSM)) # CHECKme! REL[[b]] <- diag(VETA[[b]]) / diag(MSM) # check for lv.interactions if (lv.interaction.flag && length(lv.int.names) > 0L) { if (FIT@Model@categorical || FIT@Model@correlation) { lav_msg_stop(gettext("SAM + lv interactions do not work (yet) if correlation structures are used.")) } # EETA2 EETA1 <- EETA[[b]] EETA[[b]] <- lav_sam_eeta2( EETA = EETA1, VETA = VETA[[b]], lv.names = lv.names1, lv.int.names = lv.int.names ) # VETA2 if (sam.method == "local") { # reorder FS[[b]] if needed FS.b <- FS[[b]][, lv.names1, drop = FALSE] tmp <- lav_sam_veta2( FS = FS.b, M = Mb, VETA = VETA[[b]], EETA = EETA1, THETA = THETA[[b]], lv.names = lv.names1, lv.int.names = lv.int.names, dummy.lv.names = lv.names[dummy.lv.idx], alpha.correction = local.options[["alpha.correction"]], lambda.correction = local.options[["lambda.correction"]], extra = TRUE ) VETA[[b]] <- tmp[, , drop = FALSE] # drop attributes alpha[[b]] <- attr(tmp, "alpha") lambda[[b]] <- attr(tmp, "lambda") MSM..[[b]] <- attr(tmp, "MSM") MTM..[[b]] <- attr(tmp, "MTM") } else { lav_msg_fixme("not ready yet!") # FSR -- no correction VETA[[b]] <- lav_sam_fs2( FS = FS[[b]], lv.names = lv.names1, lv.int.names = lv.int.names ) } } # store Mapping matrix for this block M[[b]] <- Mb } # blocks # label blocks if (nblocks > 1L) { names(EETA) <- FIT@Data@block.label names(VETA) <- FIT@Data@block.label names(REL) <- FIT@Data@block.label names(MSM..) <- FIT@Data@block.label names(MTM..) <- FIT@Data@block.label names(COV..) <- FIT@Data@block.label names(YBAR..) <- FIT@Data@block.label } # store EETA/VETA/M/alpha/lambda STEP1$VETA <- VETA STEP1$EETA <- EETA STEP1$REL <- REL STEP1$M <- M STEP1$lambda <- lambda STEP1$alpha <- alpha STEP1$MSM <- MSM.. STEP1$MTM <- MTM.. STEP1$COV <- COV.. STEP1$YBAR <- YBAR.. if (lav_verbose()) { cat("done.\n") } STEP1 } lavaan/R/lav_model_h1_information.R0000644000176200001440000010540014627656441017016 0ustar liggesusers# the information matrix of the unrestricted (H1) model # taking into account: # - the estimator (ML or (D)WLS/ULS) # - missing or not # - fixed.x = TRUE or FALSE # - conditional.x = TRUE or FALSE # - h1.information is "structured" or "unstructured" # # Note: this replaces the (old) lav_model_wls_v() function # # - YR 22 Okt 2017: initial version # - YR 03 Dec 2017: add lavh1, implied is either lavimplied or lavh1 # add support for clustered data: first.order # - YR 03 Jan 2018: add support for clustered data: expected # - YR 23 Aug 2018: lav_model_h1_acov (0.6-3) ## For the lavaan.mi package, TDJ provides pooled versions of all the ## sample moments called in these functions. If any updates to these functions ## require NEW information (from @SampleStats or @implied or @h1), ## PLEASE ADD A TAG @TDJorgensen ## at the end of the commit message on GitHub, so TDJ can check whether ## lavaan.mi::lavResiduals.mi() needs to be updated accordingly. lav_model_h1_information <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL) { if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } # information information <- lavoptions$information[1] # ALWAYS take the first one # the caller must control it # compute information matrix if (information == "observed") { I1 <- lav_model_h1_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } else if (information == "expected") { I1 <- lav_model_h1_information_expected( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } else if (information == "first.order") { I1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } # I1 information, as a list per group I1 } # fisher/expected information of H1 lav_model_h1_information_expected <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL) { if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } estimator <- lavmodel@estimator # structured of unstructured? (since 0.5-23) if (!is.null(lavoptions) && !is.null(lavoptions$h1.information[1]) && lavoptions$h1.information[1] == "unstructured") { structured <- FALSE } else { structured <- TRUE } # 1. WLS.V (=A1) for GLS/WLS if (lavmodel@estimator == "GLS" || lavmodel@estimator == "WLS") { A1 <- lavsamplestats@WLS.V } # 1b. else if (lavmodel@estimator == "DLS") { if (lavmodel@estimator.args$dls.GammaNT == "sample") { A1 <- lavsamplestats@WLS.V } else { A1 <- vector("list", length = lavsamplestats@ngroups) for (g in seq_len(lavsamplestats@ngroups)) { dls.a <- lavmodel@estimator.args$dls.a GammaNT <- lav_samplestats_Gamma_NT( COV = lavimplied$cov[[g]], MEAN = lavimplied$mean[[g]], rescale = FALSE, x.idx = lavsamplestats@x.idx[[g]], fixed.x = lavmodel@fixed.x, conditional.x = lavmodel@conditional.x, meanstructure = lavmodel@meanstructure, slopestructure = lavmodel@conditional.x ) W.DLS <- (1 - dls.a) * lavsamplestats@NACOV[[g]] + dls.a * GammaNT A1[[g]] <- lav_matrix_symmetric_inverse(W.DLS) } } } # 2. DWLS/ULS diagonal @WLS.VD slot else if (lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { # diagonal only!! A1 <- lavsamplestats@WLS.VD } # 3a. ML single level else if (lavmodel@estimator %in% c("ML", "NTRLS", "DLS", "catML") && lavdata@nlevels == 1L) { A1 <- vector("list", length = lavsamplestats@ngroups) # structured? compute model-implied statistics if (structured && length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel) } for (g in 1:lavsamplestats@ngroups) { if (.hasSlot(lavdata, "weights")) { WT <- lavdata@weights[[g]] } else { WT <- NULL } if (lavsamplestats@missing.flag) { # mvnorm # FIXME: allow for meanstructure = FALSE # FIXME: allow for conditional.x = TRUE if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { MEAN <- lavsamplestats@missing.h1[[g]]$mu } if (structured) { A1[[g]] <- lav_mvnorm_missing_information_expected( Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], wt = WT, Mu = MEAN, # meanstructure = lavmodel@meanstructure, Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]] ) } else { A1[[g]] <- lav_mvnorm_missing_information_expected( Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], wt = WT, Mu = MEAN, # meanstructure = lavmodel@meanstructure, Sigma = lavsamplestats@missing.h1[[g]]$sigma, x.idx = lavsamplestats@x.idx[[g]] ) } } else { if (lavmodel@conditional.x) { # mvreg if (lavmodel@meanstructure && structured) { RES.INT <- lavimplied$res.int[[g]] RES.SLOPES <- lavimplied$res.slopes[[g]] } else { RES.INT <- lavsamplestats@res.int[[g]] RES.SLOPES <- lavsamplestats@res.slopes[[g]] } if (structured) { A1[[g]] <- lav_mvreg_information_expected( sample.mean.x = lavsamplestats@mean.x[[g]], sample.cov.x = lavsamplestats@cov.x[[g]], sample.nobs = lavsamplestats@nobs[[g]], res.int = RES.INT, res.slopes = RES.SLOPES, # wt = WT, # meanstructure = lavmodel@meanstructure, res.cov = lavimplied$res.cov[[g]] ) } else { A1[[g]] <- lav_mvreg_information_expected( sample.mean.x = lavsamplestats@mean.x[[g]], sample.cov.x = lavsamplestats@cov.x[[g]], sample.nobs = lavsamplestats@nobs[[g]], res.int = lavsamplestats@res.int[[g]], res.slopes = lavsamplestats@res.slopes[[g]], # wt = WT, # meanstructure = lavmodel@meanstructure, res.cov = lavsamplestats@res.cov[[g]] ) } } else { # conditional.x = FALSE # mvnorm if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { MEAN <- lavsamplestats@mean[[g]] } correlation.flag <- FALSE if (.hasSlot(lavmodel, "correlation")) { correlation.flag <- lavmodel@correlation } if (structured) { A1[[g]] <- lav_mvnorm_information_expected( Sigma = lavimplied$cov[[g]], # wt = WT, # not needed x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure, correlation = correlation.flag ) } else { A1[[g]] <- lav_mvnorm_h1_information_expected( sample.cov.inv = lavsamplestats@icov[[g]], # wt = WT, not needed x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure, correlation = correlation.flag ) } } # conditional.x } # missing # stochastic group weight if (lavmodel@group.w.free) { # unweight!! (as otherwise, we would 'weight' again) a <- exp(lavimplied$group.w[[g]]) / lavsamplestats@nobs[[g]] A1[[g]] <- lav_matrix_bdiag(matrix(a, 1L, 1L), A1[[g]]) } } # g } # ML # 3b. ML + multilevel else if (lavmodel@estimator == "ML" && lavdata@nlevels > 1L) { A1 <- vector("list", length = lavsamplestats@ngroups) # structured? compute model-implied statistics if (structured && length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel) } # structured? lavimplied vs lavh1 if (structured) { implied <- lavimplied } else { implied <- lavh1$implied } for (g in 1:lavsamplestats@ngroups) { MU.W <- implied$mean[[(g - 1) * lavdata@nlevels + 1L]] MU.B <- implied$mean[[(g - 1) * lavdata@nlevels + 2L]] SIGMA.W <- implied$cov[[(g - 1) * lavdata@nlevels + 1L]] SIGMA.B <- implied$cov[[(g - 1) * lavdata@nlevels + 2L]] # clustered data A1[[g]] <- lav_mvnorm_cluster_information_expected( Lp = lavdata@Lp[[g]], Mu.W = MU.W, Sigma.W = SIGMA.W, Mu.B = MU.B, Sigma.B = SIGMA.B, x.idx = lavsamplestats@x.idx[[g]] ) } # g } # ML + multilevel A1 } lav_model_h1_information_observed <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL) { if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } estimator <- lavmodel@estimator # structured? if (!is.null(lavoptions) && !is.null(lavoptions$h1.information[1]) && lavoptions$h1.information[1] == "unstructured") { structured <- FALSE } else { structured <- TRUE } # 1. WLS.V (=A1) for GLS/WLS if (lavmodel@estimator == "GLS" || lavmodel@estimator == "WLS" || lavmodel@estimator == "DLS") { A1 <- lavsamplestats@WLS.V } # 1b. else if (lavmodel@estimator == "DLS") { if (lavmodel@estimator.args$dls.GammaNT == "sample") { A1 <- lavsamplestats@WLS.V } else { A1 <- vector("list", length = lavsamplestats@ngroups) for (g in seq_len(lavsamplestats@ngroups)) { dls.a <- lavmodel@estimator.args$dls.a GammaNT <- lav_samplestats_Gamma_NT( COV = lavimplied$cov[[g]], MEAN = lavimplied$mean[[g]], rescale = FALSE, x.idx = lavsamplestats@x.idx[[g]], fixed.x = lavmodel@fixed.x, conditional.x = lavmodel@conditional.x, meanstructure = lavmodel@meanstructure, slopestructure = lavmodel@conditional.x ) W.DLS <- (1 - dls.a) * lavsamplestats@NACOV[[g]] + dls.a * GammaNT A1[[g]] <- lav_matrix_symmetric_inverse(W.DLS) } } } # 2. DWLS/ULS diagonal @WLS.VD slot else if (lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { # diagonal only!! A1 <- lavsamplestats@WLS.VD } # 3a. ML single level else if (lavmodel@estimator == "ML" && lavdata@nlevels == 1L) { A1 <- vector("list", length = lavsamplestats@ngroups) # structured? compute model-implied statistics if (structured && length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel) } for (g in 1:lavsamplestats@ngroups) { if (lavsamplestats@missing.flag) { # mvnorm # FIXME: allow for meanstructure = FALSE # FIXME: allow for conditional.x = TRUE if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { MEAN <- lavsamplestats@missing.h1[[g]]$mu } if (structured) { A1[[g]] <- lav_mvnorm_missing_information_observed_samplestats( Yp = lavsamplestats@missing[[g]], # wt not needed Mu = MEAN, # meanstructure = lavmodel@meanstructure, Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]] ) } else { A1[[g]] <- lav_mvnorm_missing_information_observed_samplestats( Yp = lavsamplestats@missing[[g]], # wt not needed Mu = MEAN, # meanstructure = lavmodel@meanstructure, Sigma = lavsamplestats@missing.h1[[g]]$sigma, x.idx = lavsamplestats@x.idx[[g]] ) } } else { if (lavmodel@conditional.x) { # mvreg if (lavmodel@meanstructure && structured) { RES.INT <- lavimplied$res.int[[g]] RES.SLOPES <- lavimplied$res.slopes[[g]] } else { RES.INT <- lavsamplestats@res.int[[g]] RES.SLOPES <- lavsamplestats@res.slopes[[g]] } if (structured) { A1[[g]] <- lav_mvreg_information_observed_samplestats( sample.res.int = lavsamplestats@res.int[[g]], sample.res.slopes = lavsamplestats@res.slopes[[g]], sample.res.cov = lavsamplestats@res.cov[[g]], sample.mean.x = lavsamplestats@mean.x[[g]], sample.cov.x = lavsamplestats@cov.x[[g]], res.int = RES.INT, res.slopes = RES.SLOPES, # wt = WT, # meanstructure = lavmodel@meanstructure, res.cov = lavimplied$res.cov[[g]] ) } else { A1[[g]] <- lav_mvreg_information_observed_samplestats( sample.res.int = lavsamplestats@res.int[[g]], sample.res.slopes = lavsamplestats@res.slopes[[g]], sample.res.cov = lavsamplestats@res.cov[[g]], sample.mean.x = lavsamplestats@mean.x[[g]], sample.cov.x = lavsamplestats@cov.x[[g]], res.int = lavsamplestats@res.int[[g]], res.slopes = lavsamplestats@res.slopes[[g]], # wt = WT, # meanstructure = lavmodel@meanstructure, res.cov = lavsamplestats@res.cov[[g]] ) } } else { # conditional.x = FALSE # mvnorm if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { MEAN <- lavsamplestats@mean[[g]] } if (structured) { A1[[g]] <- lav_mvnorm_information_observed_samplestats( sample.mean = lavsamplestats@mean[[g]], sample.cov = lavsamplestats@cov[[g]], Mu = MEAN, Sigma = lavimplied$cov[[g]], # wt = WT, # not needed x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure ) } else { A1[[g]] <- lav_mvnorm_h1_information_observed_samplestats( sample.mean = lavsamplestats@mean[[g]], sample.cov = lavsamplestats@cov[[g]], sample.cov.inv = lavsamplestats@icov[[g]], # wt = WT, not needed x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure ) } } # conditional.x } # missing # stochastic group weight if (lavmodel@group.w.free) { # unweight!! a <- exp(lavimplied$group.w[[g]]) / lavsamplestats@nobs[[g]] A1[[g]] <- lav_matrix_bdiag(matrix(a, 1, 1), A1[[g]]) } } # g } # ML # 3b. ML + multilevel else if (lavmodel@estimator == "ML" && lavdata@nlevels > 1L) { A1 <- vector("list", length = lavsamplestats@ngroups) # structured? compute model-implied statistics if (structured && length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel) } # structured? lavimplied vs lavh1 if (structured) { implied <- lavimplied } else { implied <- lavh1$implied } for (g in 1:lavsamplestats@ngroups) { MU.W <- implied$mean[[(g - 1) * lavdata@nlevels + 1L]] MU.B <- implied$mean[[(g - 1) * lavdata@nlevels + 2L]] SIGMA.W <- implied$cov[[(g - 1) * lavdata@nlevels + 1L]] SIGMA.B <- implied$cov[[(g - 1) * lavdata@nlevels + 2L]] if (lavdata@missing == "ml") { A1[[g]] <- lav_mvnorm_cluster_missing_information_observed( Y1 = lavdata@X[[g]], Y2 = lavsamplestats@YLp[[g]][[2]]$Y2, Lp = lavdata@Lp[[g]], Mp = lavdata@Mp[[g]], Mu.W = MU.W, Sigma.W = SIGMA.W, Mu.B = MU.B, Sigma.B = SIGMA.B, x.idx = lavsamplestats@x.idx[[g]] ) } else { A1[[g]] <- lav_mvnorm_cluster_information_observed( Lp = lavdata@Lp[[g]], YLp = lavsamplestats@YLp[[g]], Mu.W = MU.W, Sigma.W = SIGMA.W, Mu.B = MU.B, Sigma.B = SIGMA.B, x.idx = lavsamplestats@x.idx[[g]] ) } } # g } # ML + multilevel A1 } # outer product of the case-wise scores (gradients) # HJ 18/10/2023: Adjust J matrix correctly using weights. Note: H matrix is # based on lav_model_hessian so no changes required. lav_model_h1_information_firstorder <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL) { if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } estimator <- lavmodel@estimator if (!estimator %in% c("ML", "PML")) { lav_msg_stop(gettext( "information = \"first.order\" not available for estimator"), sQuote(estimator)) } # structured? if (!is.null(lavoptions) && !is.null(lavoptions$h1.information[1]) && lavoptions$h1.information[1] == "unstructured") { structured <- FALSE } else { structured <- TRUE } # clustered? if (!is.null(lavoptions) && !is.null(lavoptions$.clustered) && lavoptions$.clustered) { clustered <- TRUE if (is.null(lavdata@Lp[[1]])) { lav_msg_stop(gettext("lavdata@Lp is empty, while clustered = TRUE")) } # if (estimator == "PML") { # lav_msg_stop(gettext( # "clustered information is not (yet) available when estimator = 'PML'")) # } # if(lavsamplestats@missing.flag) { # stop("lavaan ERROR: clustered information is not (yet) available when missing = \"ML\"") # } # if(lavmodel@conditional.x) { # stop("lavaan ERROR: clustered information is not (yet) available when conditional.x = TRUE") # } # if(!structured) { # stop("lavaan ERROR: clustered information is not (yet) available when h1.information = \"unstructured\"") # } } else { clustered <- FALSE } # structured? compute model-implied statistics if (estimator == "PML" || structured) { if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel) } } # structured? lavimplied vs lavh1 if (structured) { implied <- lavimplied } else { implied <- lavh1$implied } B1 <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavdata@ngroups) { if (.hasSlot(lavdata, "weights")) { WT <- lavdata@weights[[g]] } else { WT <- NULL } if (estimator == "PML") { # slow approach: compute outer product of case-wise scores if (lavmodel@conditional.x) { SIGMA <- implied$res.cov[[g]] MU <- implied$res.mean[[g]] TH <- implied$res.th[[g]] PI <- implied$res.slopes[[g]] EXO <- lavdata@eXo[[g]] } else { SIGMA <- implied$cov[[g]] MU <- implied$mean[[g]] TH <- implied$th[[g]] PI <- NULL EXO <- NULL } SC <- pml_deriv1( Sigma.hat = SIGMA, Mu.hat = MU, TH = TH, th.idx = lavmodel@th.idx[[g]], num.idx = lavmodel@num.idx[[g]], X = lavdata@X[[g]], eXo = EXO, wt = NULL, PI = PI, lavcache = lavcache[[g]], missing = lavdata@missing, scores = TRUE, negative = FALSE ) # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # information H1 if (isTRUE(clustered)) { # For clustered data, need to compute (centred) crossprod within each # cluster and sum them all up. clusters <- lavdata@Lp[[g]]$cluster.id[[2]] # why list of 2? clusters.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] nclust <- length(clusters) zb <- list() if (is.null(WT)) WT <- rep(1, length(clusters.idx)) for (b in seq_along(clusters)) { SC_b <- SC[clusters.idx == b, ] WT_b <- WT[clusters.idx == b] zb[[b]] <- apply(SC_b * WT_b, 2, sum) } zbar <- apply(do.call(cbind, zb), 1, mean) B1c <- lapply(zb, \(z) tcrossprod(z - zbar)) |> Reduce(f = `+`) B1[[g]] <- nclust / (nclust - 1) * B1c } else { if (is.null(WT)) { B1[[g]] <- lav_matrix_crossprod(SC) } else { B1[[g]] <- crossprod(WT * SC) } } } else if (estimator == "ML" && lavdata@nlevels > 1L) { # if not-structured, we use lavh1, and that is always # 'unconditional' (for now) if (lavmodel@conditional.x && structured) { if (lavdata@missing == "ml") { lav_msg_stop(gettext("firstorder information matrix not available (yet) if conditional.x + fiml")) } Res.Sigma.W <- implied$res.cov[[(g - 1) * lavdata@nlevels + 1L]] Res.Int.W <- implied$res.int[[(g - 1) * lavdata@nlevels + 1L]] Res.Pi.W <- implied$res.slopes[[(g - 1) * lavdata@nlevels + 1L]] Res.Sigma.B <- implied$res.cov[[(g - 1) * lavdata@nlevels + 2L]] Res.Int.B <- implied$res.int[[(g - 1) * lavdata@nlevels + 2L]] Res.Pi.B <- implied$res.slopes[[(g - 1) * lavdata@nlevels + 2L]] B1[[g]] <- lav_mvreg_cluster_information_firstorder( Y1 = lavdata@X[[g]], YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B, divide.by.two = TRUE ) } else { MU.W <- implied$mean[[(g - 1) * lavdata@nlevels + 1L]] MU.B <- implied$mean[[(g - 1) * lavdata@nlevels + 2L]] SIGMA.W <- implied$cov[[(g - 1) * lavdata@nlevels + 1L]] SIGMA.B <- implied$cov[[(g - 1) * lavdata@nlevels + 2L]] if (lavdata@missing == "ml") { B1[[g]] <- lav_mvnorm_cluster_missing_information_firstorder( Y1 = lavdata@X[[g]], Y2 = lavsamplestats@YLp[[g]][[2]]$Y2, Lp = lavdata@Lp[[g]], Mp = lavdata@Mp[[g]], Mu.W = MU.W, Sigma.W = SIGMA.W, Mu.B = MU.B, Sigma.B = SIGMA.B, x.idx = lavsamplestats@x.idx[[g]], divide.by.two = TRUE ) } else { # no missing values B1[[g]] <- lav_mvnorm_cluster_information_firstorder( Y1 = lavdata@X[[g]], YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Mu.W = MU.W, Sigma.W = SIGMA.W, Mu.B = MU.B, Sigma.B = SIGMA.B, x.idx = lavsamplestats@x.idx[[g]], divide.by.two = TRUE ) } } } else if (estimator == "ML" && lavdata@nlevels == 1L) { if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } if (lavsamplestats@missing.flag) { # mvnorm # FIXME: allow for meanstructure = FALSE # FIXME: allow for conditional.x = TRUE if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { MEAN <- lavsamplestats@missing.h1[[g]]$mu } B1[[g]] <- lav_mvnorm_missing_information_firstorder( Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], wt = WT, cluster.idx = cluster.idx, Mu = MEAN, # meanstructure = lavmodel@meanstructure, Sigma = implied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]] ) } else { if (lavmodel@conditional.x) { # mvreg if (lavmodel@meanstructure && structured) { RES.INT <- lavimplied$res.int[[g]] RES.SLOPES <- lavimplied$res.slopes[[g]] } else { RES.INT <- lavsamplestats@res.int[[g]] RES.SLOPES <- lavsamplestats@res.slopes[[g]] } B1[[g]] <- lav_mvreg_information_firstorder( Y = lavdata@X[[g]], eXo = lavdata@eXo[[g]], res.int = RES.INT, res.slopes = RES.SLOPES, # wt = WT, # meanstructure = lavmodel@meanstructure, res.cov = implied$res.cov[[g]] ) } else { # conditional.x = FALSE # mvnorm if (lavmodel@meanstructure && structured) { MEAN <- lavimplied$mean[[g]] } else { # NOTE: the information matrix will be the same (minus # the meanstructure block), but once INVERTED, the # standard errors will be (slightly) smaller!!! # This is only visibile when estimator = "MLF" # (or information = "first.order") MEAN <- lavsamplestats@mean[[g]] # saturated } if (structured) { B1[[g]] <- lav_mvnorm_information_firstorder( Y = lavdata@X[[g]], Mu = MEAN, Sigma = lavimplied$cov[[g]], wt = WT, cluster.idx = cluster.idx, x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure ) } else { B1[[g]] <- lav_mvnorm_h1_information_firstorder( Y = lavdata@X[[g]], sample.cov.inv = lavsamplestats@icov[[g]], Gamma = lavsamplestats@NACOV[[g]], wt = WT, cluster.idx = cluster.idx, # only if wt x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure ) } } # mvnorm } # missing } # ML # stochastic group weight if (lavmodel@group.w.free) { # unweight!! a <- exp(lavimplied$group.w[[g]]) / lavsamplestats@nobs[[g]] B1[[g]] <- lav_matrix_bdiag(matrix(a, 1, 1), B1[[g]]) } } # g B1 } # asymptotic variance matrix (=Gamma/N) of the unrestricted (H1) # sample statistics # # FIXME: make this work for categorical/GLS/WLS/... # lav_model_h1_acov <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, meanstructure = NULL, # if specified, use it h1.information = NULL, # if specified, use it se = NULL) { # if specified, use it if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } # override if (!is.null(meanstructure)) { lavoptions$meanstructure <- meanstructure } if (!is.null(h1.information)) { lavoptions$h1.information[1] <- h1.information } if (!is.null(se)) { lavoptions$se <- se } # information information <- lavoptions$information[1] # ALWAYS used the first # compute information matrix if (information == "observed") { I1 <- lav_model_h1_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } else if (information == "expected") { I1 <- lav_model_h1_information_expected( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } else if (information == "first.order") { I1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } if (lavoptions$se %in% c("robust.huber.white", "robust.sem")) { J1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions ) } # compute ACOV per group ACOV <- vector("list", length = lavdata@ngroups) for (g in 1:lavdata@ngroups) { # denominator if (lavdata@nlevels == 1L) { Ng <- lavsamplestats@nobs[[g]] } else { Ng <- lavdata@Lp[[g]]$nclusters[[2]] } # invert information I1.g.inv <- try(lav_matrix_symmetric_inverse(I1[[g]]), silent = TRUE) if (inherits(I1.g.inv, "try-error")) { lav_msg_stop(gettext( "could not invert h1 information matrix in group"), g) } # which type of se? if (lavoptions$se %in% c("standard", "none")) { ACOV[[g]] <- 1 / Ng * I1.g.inv } else if (lavoptions$se %in% c("robust.huber.white", "robust.sem")) { ACOV[[g]] <- 1 / Ng * (I1.g.inv %*% J1[[g]] %*% I1.g.inv) } } ACOV } lavaan/R/lav_lavaanList_multipleGroups.R0000644000176200001440000000172214627656441020134 0ustar liggesusers# lavMultipleGroups: fit the *same* model, on (typically a small number of) # groups/sets # YR - 11 July 2016 lavMultipleGroups <- function(model = NULL, dataList = NULL, ndat = length(dataList), cmd = "sem", ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL) { # dotdotdot dotdotdot <- list() # fit multiple times fit <- do.call("lavaanList", args = c(list( model = model, dataList = dataList, ndat = ndat, cmd = cmd, store.slots = store.slots, FUN = FUN, show.progress = show.progress, parallel = parallel, ncpus = ncpus, cl = cl ), dotdotdot)) # store group labels (if any) fit@meta$lavMultipleGroups <- TRUE fit@meta$group.label <- names(dataList) fit } lavaan/R/ctr_informative_testing.R0000644000176200001440000003403514627656441017014 0ustar liggesusers# This code is contributed by Leonard Vanbrabant InformativeTesting <- function(model = NULL, data, constraints = NULL, R = 1000L, type = "bollen.stine", return.LRT = TRUE, double.bootstrap = "standard", double.bootstrap.R = 249, double.bootstrap.alpha = 0.05, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, verbose = FALSE, ...) { if (!missing(verbose)) { current.verbose <- lav_verbose() if (lav_verbose(verbose)) on.exit(lav_verbose(current.verbose), TRUE) } fit.B1 <- sem(model, ..., data = data, se = "none", test = "standard" ) fit.B0 <- fit.A1 <- sem(model, ..., data = data, se = "none", test = "standard", constraints = constraints ) # con.idx <- (max(fit.B1@ParTable$id) + 1L):max(fit.A1@ParTable$id) # # user.equal <- fit.A1@ParTable # user.equal$op[con.idx] <- "==" user.equal <- fit.A1@ParTable CON <- attr( lavParseModelString(constraints, parser = fit.B1@Options$parser), "constraints" ) for (con in 1:length(CON)) { if (CON[[con]]$op %in% c("<", ">")) { this.lhs <- CON[[con]]$lhs this.op <- CON[[con]]$op this.rhs <- CON[[con]]$rhs # find this line in user.equal@ParTable idx <- which( user.equal$lhs == this.lhs, user.equal$op == this.op, user.equal$rhs == this.rhs ) if (length(idx) == 0L) { # not found, give warning? lav_msg_stop(gettext("no inequality constraints (<, >) found.")) } # change op to == user.equal$op[idx] <- "==" } } fit.A0 <- sem(user.equal, ..., data = data, se = "none", test = "standard" ) lrt.bootA <- bootstrapLRT(fit.A0, fit.A1, R = R, type = type, verbose = verbose, return.LRT = return.LRT, double.bootstrap = double.bootstrap, double.bootstrap.R = double.bootstrap.R, double.bootstrap.alpha = double.bootstrap.alpha, parallel = parallel, ncpus = ncpus, cl = cl ) lrt.bootB <- bootstrapLRT(fit.B0, fit.B1, R = R, type = type, verbose = verbose, return.LRT = return.LRT, double.bootstrap = double.bootstrap, double.bootstrap.R = double.bootstrap.R, double.bootstrap.alpha = double.bootstrap.alpha, parallel = parallel, ncpus = ncpus, cl = cl ) output <- list( fit.A0 = fit.A0, fit.A1 = fit.A1, fit.B1 = fit.B1, lrt.bootA = lrt.bootA, lrt.bootB = lrt.bootB, double.bootstrap = double.bootstrap, double.bootstrap.alpha = double.bootstrap.alpha, return.LRT = return.LRT, type = type ) class(output) <- "InformativeTesting" return(output) } print.InformativeTesting <- function(x, digits = max(3, getOption("digits") - 3), ...) { object <- x cat("\nInformativeTesting: Order/Inequality Constrained Hypothesis Testing:\n\n") cat(" Variable names in model :", unlist(object$fit.A1@Data@ov.names[1]), "\n") cat(" Number of variables :", object$fit.A1@Model@nvar[1], "\n") cat(" Number of groups :", object$fit.A1@Data@ngroups, "\n") cat(" Used sample size per group :", unlist(object$fit.A1@Data@nobs), "\n") cat(" Used sample size :", sum(unlist(object$fit.A1@Data@nobs)), "\n") cat(" Total sample size :", sum(unlist(object$fit.A1@Data@norig)), "\n\n") cat(" Estimator :", object$fit.A1@Options$estimator, "\n") cat(" Missing data :", object$fit.A1@Options$missing, "\n") cat(" Bootstrap method :", object$type, "\n") cat(" Double bootstrap method :", object$double.bootstrap, "\n") dbtype <- object$double.bootstrap # original LRT for hypothesis test Type A TsA <- attr(object$lrt.bootA, "LRT.original") # original LRT for hypothesis test Type B TsB <- attr(object$lrt.bootB, "LRT.original") # unadjusted pvalues for Ts pvalueA <- object$lrt.bootA[1] pvalueB <- object$lrt.bootB[1] alpha <- object$double.bootstrap.alpha ### if (dbtype == "no") { cat( "\n\n Type A test: H0: all restriktions active (=)", "\n", " vs. H1: at least one restriktion strictly true (>)", "\n" ) cat(" Test statistic: ", format(round(TsA, digits), nsmall = digits), ", unadjusted p-value: ", if (pvalueA < 1e-04) { "<0.0001" } else { format(round(pvalueA, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n\n", sep = "" ) cat( " Type B test: H0: all restriktions true", "\n", " vs. H1: at least one restriktion false", "\n" ) cat(" Test statistic: ", format(round(TsB, digits), nsmall = digits), ", unadjusted p-value: ", if (pvalueB < 1e-04) { "<0.0001" } else { format(round(pvalueB, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n", sep = "" ) } else if (dbtype == "FDB") { # adjusted pvalues for Ts adj.pvalueA <- attr(object$lrt.bootA, "adj.pvalue") adj.pvalueB <- attr(object$lrt.bootB, "adj.pvalue") cat( "\n\n Type A test: H0: all restriktions active (=)", "\n", " vs. H1: at least one restriktion strictly true (>)", "\n" ) cat(" Test statistic: ", format(round(TsA, digits), nsmall = digits), ", adjusted p-value: ", if (adj.pvalueA < 1e-04) { "<0.0001" } else { format(round(adj.pvalueA, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n\n", sep = "" ) cat( " Type B test: H0: all restriktions true", "\n", " vs. H1: at least one restriktion false", "\n" ) cat(" Test statistic: ", format(round(TsB, digits), nsmall = digits ), ", adjusted p-value: ", if (adj.pvalueB < 1e-04) { "<0.0001" } else { format(round(adj.pvalueB, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n", sep = "" ) } else if (dbtype == "standard") { # adjusted nominal levels adj.alphaA <- attr(object$lrt.bootA, "adj.alpha") adj.alphaB <- attr(object$lrt.bootB, "adj.alpha") # adjusted pvalues for Ts adj.pvalueA <- attr(object$lrt.bootA, "adj.pvalue") adj.pvalueB <- attr(object$lrt.bootB, "adj.pvalue") cat( "\n\n Type A test: H0: all restriktions active (=)", "\n", " vs. H1: at least one restriktion strictly true (>)", "\n" ) cat(" Test statistic: ", format(round(TsA, digits), nsmall = digits ), ", adjusted p-value: ", if (adj.pvalueA < 1e-04) { "<0.0001" } else { format(round(adj.pvalueA, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n", sep = "" ) cat(" ", "unadjusted p-value: ", if (pvalueA < 1e-04) { "<0.0001" } else { format(round(pvalueA, digits), nsmall = digits) }, " (alpha = ", format(round(adj.alphaA, digits), nsmall = digits), ") ", "\n\n", sep = "" ) cat( " Type B test: H0: all restriktions true", "\n", " vs. H1: at least one restriktion false", "\n" ) cat(" Test statistic: ", format(round(TsB, digits), nsmall = digits), ", adjusted p-value: ", if (adj.pvalueB < 1e-04) { "<0.0001" } else { format(round(adj.pvalueB, digits), nsmall = digits) }, " (alpha = ", alpha, ") ", "\n", sep = "" ) cat(" ", "unadjusted p-value: ", if (pvalueB < 1e-04) { "<0.0001" } else { format(round(pvalueB, digits), nsmall = digits) }, " (alpha = ", format(round(adj.alphaB, digits), nsmall = digits), ") ", "\n\n", sep = "" ) } if (dbtype == "no") { cat("\n No double bootstrap method is set. The results may be spurious.\n\n") } } plot.InformativeTesting <- function(x, ..., type = c("lr", "ppv"), main = "main", xlab = "xlabel", ylab = "Frequency", freq = TRUE, breaks = 15, cex.main = 1, cex.lab = 1, cex.axis = 1, col = "grey", border = par("fg"), vline = TRUE, vline.col = c("red", "blue"), lty = c(1, 2), lwd = 1, legend = TRUE, bty = "o", cex.legend = 1, loc.legend = "topright") { object <- x return.LRT <- object$return.LRT double.bootstrap <- object$double.bootstrap double.bootstrap.alpha <- object$double.bootstrap.alpha pvalue <- c(object$lrt.bootA[1], object$lrt.bootB[1]) par(mfrow = c(1, 2)) if (length(type) == 2) { par(mfrow = c(2, 2)) } if (return.LRT && (type == "lr" || length(type) == 2)) { lrt.obs <- c( attr(object$lrt.bootA, "LRT.original"), attr(object$lrt.bootB, "LRT.original") ) lrt.A <- attr(object$lrt.bootA, "LRT") lrt.B <- attr(object$lrt.bootB, "LRT") if (length(lrt.A) - length(lrt.B) < 0L) { lrt <- as.data.frame(cbind(c(lrt.A, rep(as.numeric(NA), length(lrt.B) - length(lrt.A))), lrt.B)) } else { lrt <- as.data.frame(cbind(lrt.A, c(lrt.B, rep( as.numeric(NA), length(lrt.A) - length(lrt.B) )))) } names(lrt) <- c("lrt.A", " lrt.B") if (xlab == "xlabel") { xlab.lrt <- c("Bootstrapped LR values") } if (main == "main") { main.lrt <- c( "Distr. of LR values - Type A", "Distr. of LR values - Type B" ) } for (i in 1:2) { plot <- hist(lrt[, i], plot = FALSE, breaks = breaks) plot(plot, ..., freq = freq, main = main.lrt[i], xlab = xlab.lrt, ylab = ylab, cex.axis = cex.axis, cex.main = cex.main, cex.lab = cex.lab, col = col, border = border, axes = FALSE, xaxt = "n" ) axis(side = 1) axis(side = 2) box(lty = 1, col = "black") if (vline) { abline( v = lrt.obs[i], col = vline.col[1], lty = lty[1], lwd = lwd ) } if (legend) { ppvalue <- sprintf("%.2f", pvalue[i]) obs.lrt <- sprintf("%.2f", lrt.obs[i]) ppval <- paste0("plug-in p value = ", ppvalue) obs.lrt <- paste0("observed LR = ", obs.lrt) legend.obj <- c(obs.lrt, ppval) if (!vline) { legend(loc.legend, legend.obj, lty = c(0, 0), lwd = lwd, cex = cex.legend, bty = bty ) } else { legend(loc.legend, legend.obj, lty = c(lty[1], 0), col = vline.col[1], lwd = lwd, cex = cex.legend, bty = bty ) } } } } if (double.bootstrap == "standard" && (type == "ppv" || length(type) == 2)) { ppvalue.A <- attr(object$lrt.bootA, "plugin.pvalues") ppvalue.B <- attr(object$lrt.bootB, "plugin.pvalues") adj.a <- c( quantile(ppvalue.A, double.bootstrap.alpha), quantile(ppvalue.B, double.bootstrap.alpha) ) adj.ppv <- c( attr(object$lrt.bootA, "adj.pvalue"), attr(object$lrt.bootB, "adj.pvalue") ) if (length(ppvalue.A) - length(ppvalue.B) < 0L) { ppv <- as.data.frame(cbind(c(ppvalue.A, rep(NA, length(ppvalue.B) - length(ppvalue.A))), ppvalue.B)) } else { ppv <- as.data.frame(cbind(ppvalue.A, c(ppvalue.B, rep(NA, length(ppvalue.A) - length(ppvalue.B))))) } names(ppv) <- c("ppA", "ppB") if (xlab == "xlabel") { xlab.ppv <- c("Bootstrapped plug-in p-values") } if (main == "main") { main.ppv <- c( "Distr. of plug-in p-values - Type A", "Distr. of plug-in p-values - Type B" ) } for (i in 1:2) { plot <- hist(ppv[, i], plot = FALSE, breaks = breaks) plot(plot, ..., freq = freq, main = main.ppv[i], xlab = xlab.ppv, ylab = ylab, cex.axis = cex.axis, cex.main = cex.main, cex.lab = cex.lab, col = col, border = border, axes = FALSE, xaxt = "n" ) axis(side = 1, at = seq(0, 1, 0.1)) axis(side = 2) box(lty = 1, col = "black") if (vline) { abline( v = adj.a[i], col = vline.col[1], lty = lty[1], lwd = lwd ) abline( v = adj.ppv[i], col = vline.col[2], lty = lty[2], lwd = lwd ) } if (legend) { adj.alpha <- sprintf("%.2f", adj.a[i]) adj.pval <- sprintf("%.2f", adj.ppv[i]) adja <- paste0("Adjusted alpha = ", adj.alpha) adjp <- paste0("Adjusted p-value = ", adj.pval) legend.obj <- c(adja, adjp) if (!vline) { legend(loc.legend, legend.obj, lty = 0, col = vline.col, lwd = lwd, cex = cex.legend, bty = bty ) } else { legend(loc.legend, legend.obj, lty = lty, col = vline.col, lwd = lwd, cex = cex.legend, bty = bty ) } } } } } lavaan/R/lav_efa_extraction.R0000644000176200001440000003111314627656441015713 0ustar liggesusers# Factor extraction method(s) # YR Feb 2020 # # - ULS_corner only (for now) # - just to get better starting values for ESEM # YR July 2020 # - adding generic function lav_efa_extraction, using eigenvalue based # approach; ML and ULS # - 'corner' is an option lav_efa_extraction <- function(S, nfactors = 1L, method = "ULS", # or ML corner = FALSE, reflect = FALSE, order.lv.by = "none", min.var = 0.0001) { stopifnot(is.matrix(S)) S <- unname(S) method <- tolower(method) # extract variances S.var <- diag(S) # force S to be pd (eg if we have polychoric correlations) S <- lav_matrix_symmetric_force_pd(S, tol = 1e-08) # convert to correlation matrix (ULS is not scale invariant!) R <- cov2cor(S) # optim.method if (method == "uls") { minObjective <- efa_extraction_uls_min_objective minGradient <- efa_extraction_uls_min_gradient cache <- efa_extraction_uls_init_cache(R = R, nfactors = nfactors) } else if (method == "ml") { minObjective <- efa_extraction_ml_min_objective minGradient <- efa_extraction_ml_min_gradient cache <- efa_extraction_ml_init_cache(R = R, nfactors = nfactors) } else { lav_msg_stop(gettext("method must be uls or ml (for now)")) } minHessian <- NULL # optimize control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = if (lav_verbose()) { 1L } else { 0L }, abs.tol = (.Machine$double.eps * 10) ) out <- nlminb( start = cache$theta, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control.nlminb, lower = min.var, upper = +1, cache = cache ) # extract LAMBDA/THETA if (method == "uls") { THETA <- diag(out$par * out$par) # compute LAMBDA A <- R diag(A) <- diag(A) - (out$par * out$par) EV <- eigen(A, symmetric = TRUE) Omega.1 <- EV$vectors[, 1:nfactors] gamma.1 <- EV$values[1:nfactors] # LAMBDA <- Omega.1 %*% diag(sqrt(gamma.1)) LAMBDA <- t(t(Omega.1) * sqrt(gamma.1)) # rescale if the input matrix was not a correlation matrix LAMBDA <- sqrt(S.var) * LAMBDA diag(THETA) <- S.var * diag(THETA) } else if (method == "ml") { THETA <- diag(out$par * out$par) # compute LAMBDA psi <- out$par A <- t(psi * cache$R.inv) * psi EV <- eigen(A, symmetric = TRUE) Omega.1 <- EV$vectors[, 1L + cache$nvar - seq_len(cache$nfactors), drop = FALSE ] gamma.1 <- EV$values[1L + cache$nvar - seq_len(cache$nfactors)] # LAMBDA <- diag(psi) %*% Omega.1 %*%sqrt(solve(Gamma.1)-diag(nfactors)) tmp1 <- psi * Omega.1 LAMBDA <- t(t(tmp1) * sqrt((1 / gamma.1) - 1)) # rescale if the input matrix was not a correlation matrix LAMBDA <- sqrt(S.var) * LAMBDA diag(THETA) <- S.var * diag(THETA) } # corner? if (corner) { # rotate to echelon pattern (see echelon() in GPArotation package) HEAD <- LAMBDA[seq_len(nfactors), , drop = FALSE] POST <- try(solve(HEAD, t(chol(tcrossprod(HEAD)))), silent = TRUE) okflag <- FALSE if (inherits(POST, "try-error")) { # new in 0.6-18 # this will happen if we have identical elements in the columns # of HEAD (perhaps the data is artificial?) # -> add some fuzz and try again SD <- sqrt(mean(abs(HEAD))) * 1e-04 fuzz <- matrix(rnorm(nfactors * nfactors, 0, SD), nfactors, nfactors) HEAD2 <- HEAD + fuzz POST <- try(solve(HEAD2, t(chol(tcrossprod(HEAD2)))), silent = TRUE) if (!inherits(POST, "try-error")) { okflag <- TRUE } } else { okflag <- TRUE } if (okflag) { LAMBDA <- LAMBDA %*% POST } else { lav_msg_warn(gettext( "rotation of initial factor solution to echelon pattern failed.")) } } # ALWAYS change the sign so that largest element in the column is positive # neg.max <- apply(LAMBDA, 2, function(x) { sign(x[which.max(abs(x))]) }) # neg.idx <- which(neg.max < 0) # if(length(neg.idx) > 0L) { # LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] # } # ALWAYS change the sign so that diag(LAMBDA) is positive neg.idx <- which(diag(LAMBDA) < 0) if (length(neg.idx) > 0L) { LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] } # reflect so that column sum is always positive if (reflect) { SUM <- colSums(LAMBDA) neg.idx <- which(SUM < 0) if (length(neg.idx) > 0L) { LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] } } # reorder the columns if (order.lv.by == "sumofsquares") { L2 <- LAMBDA * LAMBDA order.idx <- base::order(colSums(L2), decreasing = TRUE) } else if (order.lv.by == "index") { # reorder using Asparouhov & Muthen 2009 criterion (see Appendix D) max.loading <- apply(abs(LAMBDA), 2, max) # 1: per factor, number of the loadings that are at least 0.8 of the # highest loading of the factor # 2: mean of the index numbers average.index <- sapply(seq_len(ncol(LAMBDA)), function(i) { mean(which(abs(LAMBDA[, i]) >= 0.8 * max.loading[i])) }) # order of the factors order.idx <- base::order(average.index) } else if (order.lv.by == "none") { order.idx <- seq_len(ncol(LAMBDA)) } else { lav_msg_stop(gettext("order must be index, sumofsquares or none")) } LAMBDA <- LAMBDA[, order.idx, drop = FALSE] list(LAMBDA = LAMBDA, THETA = THETA) } efa_extraction_uls_init_cache <- function(R = NULL, nfactors = 1L, parent = parent.frame()) { R.inv <- solve(R) nvar <- ncol(R) # starting values for diagonal elements of THETA # using Joreskog (1966) suggestion: theta.init <- (1 - nfactors / (2 * nvar)) * 1 / diag(R.inv) theta <- sqrt(theta.init) out <- list2env( list( R = R, nfactors = nfactors, theta = theta ), parent = parent ) out } # x is here the sqrt() of theta! efa_extraction_uls_min_objective <- function(x, cache = NULL) { cache$theta <- x with(cache, { A <- R diag(A) <- diag(A) - (theta * theta) EV <- eigen(A, symmetric = TRUE, only.values = TRUE) gamma.2 <- EV$values[-seq_len(nfactors)] res <- 0.5 * sum(gamma.2 * gamma.2) return(res) }) } efa_extraction_uls_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x # nothing to do } with(cache, { A <- R diag(A) <- diag(A) - (theta * theta) EV <- eigen(A, symmetric = TRUE) Omega.2 <- EV$vectors[, -seq_len(nfactors)] gamma.2 <- EV$values[-seq_len(nfactors)] res <- -2 * theta * colSums(t(Omega.2 * Omega.2) * gamma.2) return(res) }) } # ML efa_extraction_ml_init_cache <- function(R = NULL, nfactors = 1L, parent = parent.frame()) { R.inv <- solve(R) nvar <- ncol(R) # starting values for diagonal elements of THETA # using Joreskog (1966) suggestion: theta.init <- (1 - nfactors / (2 * nvar)) * 1 / diag(R.inv) theta <- sqrt(theta.init) out <- list2env( list( R = R, nfactors = nfactors, R.inv = R.inv, nvar = nvar, # for ML only theta = theta ), parent = parent ) out } # x is here the sqrt of theta efa_extraction_ml_min_objective <- function(x, cache = NULL) { cache$theta <- x with(cache, { psi <- theta # A <- diag(psi) %*% R.inv %*% diag(psi) A <- t(R.inv * psi) * psi EV <- eigen(A, symmetric = TRUE, only.values = TRUE) gamma.2 <- EV$values[(nvar - nfactors):1L] res <- sum(log(gamma.2) + 1 / gamma.2 - 1) return(res) }) } efa_extraction_ml_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x # nothing to do } with(cache, { psi <- theta # A <- diag(psi) %*% solve(S) %*% diag(psi) A <- t(R.inv * psi) * psi EV <- eigen(A, symmetric = TRUE) omega.2 <- EV$vectors[, (nvar - nfactors):1L, drop = FALSE] gamma.2 <- EV$values[(nvar - nfactors):1L] res <- colSums(t(omega.2 * omega.2) * (1 - 1 / gamma.2)) return(res) }) } # ULS estimation # # - but resulting in a upper-corner all zeroes LAMBDA matrix # - not using eigenvalues/vectors, but minimizing the residuals # directly # - should give the same results as MINRES (after an orthogonal transformation) # - unless there are heywood cases; this function allows for negative variances! lav_efa_extraction_uls_corner <- function(S, nfactors = 1L, reflect = TRUE, order.lv.by = "none") { stopifnot(is.matrix(S)) S <- unname(S) nvar <- nrow(S) # extract variances S.var <- diag(S) # convert to correlation matrix (ULS is not scale invariant!) R <- cov2cor(S) # R.inv <- solve(R) # eigenvalue decomposition (to get starting values for LAMBDA) EV <- eigen(R, symmetric = TRUE) # extract first nfac components (assuming no measurement error) PC <- (EV$vectors[, seq_len(nfactors), drop = FALSE] %*% diag(sqrt(EV$values[seq_len(nfactors)]))) # rotate to echelon pattern (see echelon() in GPArotation package) HEAD <- PC[seq_len(nfactors), , drop = FALSE] LAMBDA <- PC %*% solve(HEAD, t(chol(tcrossprod(HEAD)))) THETA <- diag(nvar) if (nfactors > 1L) { corner.idx <- which(row(LAMBDA) < nfactors & col(LAMBDA) > row(LAMBDA)) lambda.idx <- seq_len(nvar * nfactors)[-corner.idx] LAMBDA[corner.idx] <- 0 # to make them exactly zero } else { corner.idx <- integer(0L) lambda.idx <- seq_len(nvar) } # optim.method minObjective <- efa_extraction_uls_corner_min_objective minGradient <- efa_extraction_uls_corner_min_gradient minHessian <- NULL # create cache environment cache <- efa_extraction_uls_corner_init_cache( LAMBDA = LAMBDA, lambda.idx = lambda.idx, R = R ) control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = if (lav_verbose()) { 1L } else { 0L }, abs.tol = (.Machine$double.eps * 10) ) # optimize out <- nlminb( start = cache$theta, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control.nlminb, lower = -1, upper = +1, cache = cache ) LAMBDA[lambda.idx] <- out$par diag(THETA) <- 1 - diag(tcrossprod(LAMBDA)) # rescale if the input matrix was not a correlation matrix LAMBDA <- sqrt(S.var) * LAMBDA diag(THETA) <- S.var * diag(THETA) # reflect so that column sum is always positive if (reflect) { SUM <- colSums(LAMBDA) neg.idx <- which(SUM < 0) if (length(neg.idx) > 0L) { LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] } } # reorder the columns if (order.lv.by == "sumofsquares") { L2 <- LAMBDA * LAMBDA order.idx <- base::order(colSums(L2), decreasing = TRUE) } else if (order.lv.by == "index") { # reorder using Asparouhov & Muthen 2009 criterion (see Appendix D) max.loading <- apply(abs(LAMBDA), 2, max) # 1: per factor, number of the loadings that are at least 0.8 of the # highest loading of the factor # 2: mean of the index numbers average.index <- sapply(seq_len(ncol(LAMBDA)), function(i) { mean(which(abs(LAMBDA[, i]) >= 0.8 * max.loading[i])) }) # order of the factors order.idx <- base::order(average.index) } else if (order.lv.by == "none") { order.idx <- seq_len(ncol(LAMBDA)) } else { lav_msg_stop(gettext("order must be index, sumofsquares or none")) } LAMBDA <- LAMBDA[, order.idx, drop = FALSE] list(LAMBDA = LAMBDA, THETA = THETA) } efa_extraction_uls_corner_init_cache <- function(LAMBDA = NULL, lambda.idx = NULL, R = NULL, parent = parent.frame()) { theta <- LAMBDA[lambda.idx] out <- list2env( list( LAMBDA = LAMBDA, lambda.idx = lambda.idx, R = R, theta = theta ), parent = parent ) out } efa_extraction_uls_corner_min_objective <- function(x, cache = NULL) { cache$theta <- x with(cache, { LAMBDA[lambda.idx] <- theta res1 <- lav_matrix_vech(R - tcrossprod(LAMBDA), diagonal = FALSE) res2 <- res1 * res1 return(sum(res2)) }) } efa_extraction_uls_corner_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x # nothing to do } with(cache, { LAMBDA[lambda.idx] <- theta Sigma <- tcrossprod(LAMBDA) diag(Sigma) <- 1 # diagonal is ignored tmp <- -2 * (R - Sigma) %*% LAMBDA return(tmp[lambda.idx]) }) } lavaan/R/lav_export_mplus.R0000644000176200001440000002217314627656441015467 0ustar liggesusers# export to Mplus syntax lav2mplus <- function(lav, group.label = NULL) { lav <- lav2check(lav) header <- " ! this model syntax is autogenerated by lavExport\n" footer <- "\n" lav <- as.data.frame(lav, stringsAsFactors = FALSE) ngroups <- lav_partable_ngroups(lav) lav_one_group <- function(lav) { # mplus does not like variable names with a 'dot' # replace them by an underscore '_' lav$lhs <- gsub("\\.", "_", lav$lhs) lav$rhs <- gsub("\\.", "_", lav$rhs) # remove contraints (:=, <, >, ==) here con.idx <- which(lav$op %in% c(":=", "<", ">", "==")) if (length(con.idx) > 0L) { lav <- lav[-con.idx, ] } # remove exogenous variances/covariances/intercepts... exo.idx <- which(lav$exo == 1L & lav$op %in% c("~~", "~1")) if (length(exo.idx)) { lav <- lav[-exo.idx, ] } # remove intercepts for categorical variables ord.names <- unique(lav$lhs[lav$op == "|"]) ord.int.idx <- which(lav$op == "~1" & lav$lhs %in% ord.names) if (length(ord.int.idx)) { lav <- lav[-ord.int.idx, ] } # end of line lav$eol <- rep(";", length(lav$lhs)) lav$ustart <- ifelse(is.na(lav$ustart), "", lav$ustart) lav$rhs2 <- ifelse(lav$free == 0L, paste("@", lav$ustart, sep = ""), paste("*", lav$ustart, sep = "") ) lav$plabel <- gsub("\\.", "", lav$plabel) LABEL <- ifelse(lav$label == "", lav$plabel, lav$label) lav$plabel <- ifelse(LABEL == "", LABEL, paste(" (", LABEL, ")", sep = "") ) # remove variances for ordered variables ov.names.ord <- vnames(lav, type = "ov.ord") ord.idx <- which(lav$lhs %in% ov.names.ord & lav$op == "~~" & lav$free == 0L & lav$lhs == lav$rhs) lav$lhs[ord.idx] <- paste("! ", lav$lhs[ord.idx], sep = "") lav$op[ord.idx] <- "" lav$rhs[ord.idx] <- "" # variances var.idx <- which(lav$op == "~~" & lav$rhs == lav$lhs) lav$op[var.idx] <- "" lav$rhs[var.idx] <- "" # scaling factors scal.idx <- which(lav$op == "~*~") lav$op[scal.idx] <- "" lav$rhs2[scal.idx] <- paste(lav$rhs2[scal.idx], "}", sep = "") lav$lhs[scal.idx] <- "{" # intercepts - excluding categorical observed int.idx <- which(lav$op == "~1") lav$op[int.idx] <- "" lav$rhs2[int.idx] <- paste(lav$rhs2[int.idx], "]", sep = "") lav$lhs[int.idx] <- paste("[", lav$lhs[int.idx], sep = "") # thresholds th.idx <- which(lav$op == "|") lav$op[th.idx] <- "$" lav$rhs[th.idx] <- gsub("t", "", x = lav$rhs[th.idx]) lav$rhs2[th.idx] <- paste(lav$rhs2[th.idx], "]", sep = "") lav$lhs[th.idx] <- paste("[", lav$lhs[th.idx], sep = "") # replace binary operators lav$op <- ifelse(lav$op == "=~", " BY ", lav$op) lav$op <- ifelse(lav$op == "~", " ON ", lav$op) lav$op <- ifelse(lav$op == "~~", " WITH ", lav$op) lav2 <- paste(lav$lhs, lav$op, lav$rhs, lav$rhs2, lav$plabel, lav$eol, sep = "" ) body <- paste(" ", lav2, collapse = "\n") body } if (ngroups == 1L) { body <- lav_one_group(lav) } else { group.values <- lav_partable_group_values(lav) # group 1 body <- lav_one_group(lav[lav$group == group.values[1], ]) if (is.null(group.label) || length(group.label) == 0L) { group.label <- paste(1:ngroups) } for (g in 2:ngroups) { body <- paste(body, paste("\nMODEL ", group.label[g], ":\n", sep = ""), lav_one_group(lav[lav$group == group.values[g], ]), sep = "" ) } } # constraints go to a 'MODEL CONSTRAINTS' block con.idx <- which(lav$op %in% c(":=", "<", ">", "==")) if (length(con.idx) > 0L) { ### FIXME: we need to convert the operator ### eg b^2 --> b**2, others?? lav$lhs[con.idx] <- gsub("\\^", "**", lav$lhs[con.idx]) lav$rhs[con.idx] <- gsub("\\^", "**", lav$rhs[con.idx]) constraints <- "\nMODEL CONSTRAINT:\n" # define 'new' variables def.idx <- which(lav$op == ":=") if (length(def.idx) > 0L) { def <- paste(lav$lhs[def.idx], collapse = " ") constraints <- paste(constraints, "NEW (", def, ");") lav$op[def.idx] <- "=" } # replace '==' by '=' eq.idx <- which(lav$op == "==") if (length(eq.idx) > 0L) { lav$op[eq.idx] <- "=" } con <- paste(gsub("\\.", "", lav$lhs[con.idx]), " ", lav$op[con.idx], " ", gsub("\\.", "", lav$rhs[con.idx]), ";", sep = "" ) con2 <- paste(" ", con, collapse = "\n") constraints <- paste(constraints, con2, sep = "\n") } else { constraints <- "" } out <- paste(header, body, constraints, footer, sep = "") class(out) <- c("lavaan.character", "character") out } # helper functions lav_mplus_estimator <- function(object) { estimator <- object@Options$estimator if (estimator == "DWLS") { estimator <- "WLS" } # only 1 argument for 'test' is allowed if (length(object@Options$test) > 1L) { standard.idx <- which(object@Options$test == "standard") if (length(standard.idx) > 1L) { object@Options$test <- object@Options$test[-standard.idx] } if (length(object@Options$test) > 1L) { lav_msg_warn(gettext("only first (non-standard) test will be used")) object@Options$test <- object@Options$test[1] } } if (estimator == "ML") { if (object@Options$test %in% c("yuan.bentler", "yuan.bentler.mplus")) { estimator <- "MLR" } else if (object@Options$test == "satorra.bentler") { estimator <- "MLM" } else if (object@Options$test == "scaled.shifted") { estimator <- "MLMV" } else if (object@Options$se == "first.order") { estimator <- "MLF" } } else if (estimator %in% c("ULS", "WLS")) { if (object@Options$test == "satorra.bentler") { estimator <- paste(estimator, "M", sep = "") } else if (object@Options$test == "scaled.shifted") { estimator <- paste(estimator, "MV", sep = "") } } else if (estimator == "MML") { estimator <- "ML" } estimator } lav_mplus_header <- function(data.file = NULL, group.label = "", ov.names = "", listwise = FALSE, ov.ord.names = "", estimator = "ML", meanstructure = FALSE, weight.name = character(0L), information = "observed", data.type = "full", nobs = NULL) { # replace '.' by '_' in all variable names ov.names <- gsub("\\.", "_", ov.names) ov.ord.names <- gsub("\\.", "_", ov.ord.names) ### FIXME!! ### this is old code from lavaan 0.3-1 ### surely, this can be done better... # TITLE command c.TITLE <- "TITLE:\n" c.TITLE <- paste( c.TITLE, " [This syntax is autogenerated by lavExport]\n" ) # DATA command c.DATA <- "DATA:\n" ngroups <- length(data.file) if (ngroups == 1L) { c.DATA <- paste(c.DATA, " file is ", data.file, ";\n", sep = "" ) } else { for (g in 1:ngroups) { c.DATA <- paste(c.DATA, " file (", group.label[g], ") is ", data.file[g], ";\n", sep = "" ) } } if (data.type == "full") { c.DATA <- paste(c.DATA, " type is individual;\n", sep = "") if (listwise) { c.DATA <- paste(c.DATA, " listwise = on;\n", sep = "") } } else if (data.type == "moment") { c.DATA <- paste(c.DATA, " type is fullcov;\n", sep = "") c.DATA <- paste(c.DATA, " nobservations are ", nobs, ";\n", sep = "") } else { lav_msg_stop(gettext("data.type must be full or moment")) } # VARIABLE command c.VARIABLE <- "VARIABLE:\n" c.VARIABLE <- paste(c.VARIABLE, " names are", sep = "") nvar <- length(ov.names) tmp <- 0 for (i in 1:nvar) { if (tmp %% 6 == 0) { c.VARIABLE <- paste(c.VARIABLE, "\n ", sep = "") } c.VARIABLE <- paste(c.VARIABLE, ov.names[i], sep = " ") tmp <- tmp + 1 } c.VARIABLE <- paste(c.VARIABLE, ";\n", sep = "") # missing if (data.type == "full") { c.VARIABLE <- paste(c.VARIABLE, " missing are all (-999999);\n", sep = "" ) } # categorical? if (length(ov.ord.names)) { c.VARIABLE <- paste(c.VARIABLE, " categorical are", sep = "") nvar <- length(ov.ord.names) tmp <- 0 for (i in 1:nvar) { if (tmp %% 6 == 0) { c.VARIABLE <- paste(c.VARIABLE, "\n ", sep = "") } c.VARIABLE <- paste(c.VARIABLE, ov.ord.names[i]) tmp <- tmp + 1 } c.VARIABLE <- paste(c.VARIABLE, ";\n", sep = "") } # weight variable? if (length(weight.name) > 0L) { c.VARIABLE <- paste(c.VARIABLE, " weight = ", weight.name, ";\n", sep = "" ) } # ANALYSIS command c.ANALYSIS <- paste("ANALYSIS:\n type = general;\n", sep = "") c.ANALYSIS <- paste(c.ANALYSIS, " estimator = ", toupper(estimator), ";\n", sep = "" ) if (toupper(estimator) %in% c("ML", "MLR")) { c.ANALYSIS <- paste(c.ANALYSIS, " information = ", information[1], ";\n", sep = "" ) } if (!meanstructure) { c.ANALYSIS <- paste(c.ANALYSIS, " model = nomeanstructure;\n", sep = "" ) } # MODEL command c.MODEL <- paste("MODEL:\n") # assemble pre-model header out <- paste(c.TITLE, c.DATA, c.VARIABLE, c.ANALYSIS, c.MODEL, sep = "") out } lavaan/R/lav_uvord.R0000644000176200001440000003032414627656441014062 0ustar liggesusers# functions to deal with binary/ordinal univariate data # - probit regression # - ordinal probit regression # - logit regression # - ordinal logit regression # Note: the idea of using 'o1' and 'o2' when computing z1/z2 comes from # the dissertation of Christensen, 2012 (see also his `ordinal' package) # YR - 25 Nov 2019 (replacing the old lav_probit.R routines) lav_uvord_fit <- function(y = NULL, X = NULL, wt = rep(1, length(y)), lower = -Inf, upper = +Inf, optim.method = "nlminb", logistic = FALSE, # probit is the default control = list(), output = "list") { # y if (!is.integer(y)) { # brute force, no checking! (this is a lower-level function) y <- as.integer(y) } if (!min(y, na.rm = TRUE) == 1L) { y <- as.integer(ordered(y)) } # check weights if (is.null(wt)) { wt <- rep(1, length(y)) } else { if (length(y) != length(wt)) { lav_msg_stop(gettext("length y is not the same as length wt")) } if (any(wt < 0)) { lav_msg_stop(gettext("all weights should be positive")) } } # check lower/upper # TODO # optim.method minObjective <- lav_uvord_min_objective minGradient <- lav_uvord_min_gradient minHessian <- lav_uvord_min_hessian if (optim.method == "nlminb" || optim.method == "nlminb2") { # nothing to do } else if (optim.method == "nlminb0") { minGradient <- minHessian <- NULL } else if (optim.method == "nlminb1") { minHessian <- NULL } # create cache environment cache <- lav_uvord_init_cache(y = y, X = X, wt = wt, logistic = logistic) # optimize -- only changes from defaults control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = 0L, abs.tol = (.Machine$double.eps * 10) ) control.nlminb <- modifyList(control.nlminb, control) optim <- nlminb( start = cache$theta, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control.nlminb, lower = lower, upper = upper, cache = cache ) if (output == "cache") { return(cache) } # return results as a list (to be compatible with lav_polychor.R) out <- list( theta = optim$par, nexo = cache$nexo, nth = cache$nth, th.idx = seq_len(cache$nth), slope.idx = seq_len(length(optim$par))[-seq_len(cache$nth)], missing.idx = cache$missing.idx, y = cache$y, wt = cache$wt, Y1 = cache$Y1, Y2 = cache$Y2, z1 = cache$z1, z2 = cache$z2, X = cache$X ) } # shortcut to get (possibly weighted) thresholds only, if no eXo lav_uvord_th <- function(y = NULL, wt = NULL) { y.freq <- tabulate(y) # unweighted y.ncat <- length(y.freq) # number of response categories if (is.null(wt)) { y.prop <- y.freq / sum(y.freq) } else { y.freq <- numeric(y.ncat) # numeric! weights... for (cat in seq_len(y.ncat)) { y.freq[cat] <- sum(wt[y == cat], na.rm = TRUE) } y.prop <- y.freq / sum(y.freq) } qnorm(cumsum(y.prop[-length(y.prop)])) } # prepare cache environment lav_uvord_init_cache <- function(y = NULL, X = NULL, wt = rep(1, length(y)), logistic = FALSE, parent = parent.frame()) { nobs <- length(y) # number of response categories y.ncat <- length(tabulate(y)) # unweighted # number of thresholds nth <- y.ncat - 1L # X if (is.null(X)) { nexo <- 0L } else { X <- unname(X) nexo <- ncol(X) # new in 0.6-17: check if X is full rank if (!anyNA(X)) { if (qr(X)$rank < ncol(X)) { lav_msg_stop(gettext( "matrix of exogenous covariates is rank deficient!(i.e., some x variables contain redundant information)")) } } } # nobs if (is.null(wt)) { N <- nobs } else { N <- sum(wt) } # frequencies (possibly weighted by wt) y.freq <- numeric(y.ncat) # numeric! weights... for (cat in seq_len(y.ncat)) { y.freq[cat] <- sum(wt[y == cat], na.rm = TRUE) } y.prop <- y.freq / sum(y.freq) # missing values missing.idx <- which(is.na(y)) # missing values if (any(is.na(y)) || (!is.null(X) && any(is.na(X)))) { lav_crossprod <- lav_matrix_crossprod } else { lav_crossprod <- base::crossprod } # distribution if (logistic) { pfun <- plogis dfun <- dlogis gfun <- function(x) { # FIXMe: is it worth making this work for abs(x) > 200? out <- numeric(length(x)) out[is.na(x)] <- NA x.ok <- which(abs(x) < 200) e <- exp(-x[x.ok]) e1 <- 1 + e e2 <- e1 * e1 e4 <- e2 * e2 out[x.ok] <- -e / e2 + e * (2 * (e * e1)) / e4 out } } else { pfun <- pnorm dfun <- dnorm gfun <- function(x) { -x * dnorm(x) } } # offsets -Inf/+Inf o1 <- ifelse(y == nth + 1, 100, 0) o2 <- ifelse(y == 1, -100, 0) # TH matrices (Matrix logical?) Y1 <- matrix(1:nth, nobs, nth, byrow = TRUE) == y Y2 <- matrix(1:nth, nobs, nth, byrow = TRUE) == (y - 1L) # starting values if (nexo == 0L) { if (logistic) { th.start <- qlogis(cumsum(y.prop[-length(y.prop)])) } else { th.start <- qnorm(cumsum(y.prop[-length(y.prop)])) } } else if (nth == 1L && nexo > 0L) { th.start <- 0 } else { if (logistic) { # th.start <- seq(-1, 1, length = nth) / 2 th.start <- qlogis((1:nth) / (nth + 1)) } else { # th.start <- seq(-1, 1, length = nth) / 2 th.start <- qnorm((1:nth) / (nth + 1)) } } beta.start <- rep(0, nexo) theta <- c(th.start, beta.start) # parameter labels (for pretty output only) # th.lab <- paste("th", seq_len(nth), sep = "") # sl.lab <- character(0L) # if(nexo > 0L) { # sl.lab <- paste("beta", seq_len(nexo), sep = "") # } # theta.labels <- c(th.lab, sl.lab) out <- list2env( list( y = y, X = X, wt = wt, o1 = o1, o2 = o2, missing.idx = missing.idx, N = N, pfun = pfun, dfun = dfun, gfun = gfun, lav_crossprod = lav_crossprod, nth = nth, nobs = nobs, y.ncat = y.ncat, nexo = nexo, Y1 = Y1, Y2 = Y2, theta = theta ), parent = parent ) out } # compute total (log)likelihood lav_uvord_loglik <- function(y = NULL, X = NULL, wt = rep(1, length(y)), logistic = FALSE, cache = NULL) { if (is.null(cache)) { cache <- lav_uvord_fit( y = y, X = X, wt = wt, logistic = logistic, output = "cache" ) } lav_uvord_loglik_cache(cache = cache) } lav_uvord_loglik_cache <- function(cache = NULL) { with(cache, { # Note: we could treat the binary case separately, # avoiding calling pfun() twice # free parameters th <- theta[1:nth] TH <- c(0, th, 0) beta <- theta[-c(1:nth)] if (nexo > 0L) { eta <- drop(X %*% beta) z1 <- TH[y + 1L] - eta + o1 z2 <- TH[y] - eta + o2 } else { z1 <- TH[y + 1L] + o1 z2 <- TH[y] + o2 } pi.i <- pfun(z1) - pfun(z2) # avoid numerical degradation if z2 (and therefore z1) are both 'large' # and the pfuns are close to 1.0 large.idx <- which(z2 > 1) if (length(large.idx) > 0L) { pi.i[large.idx] <- (pfun(z2[large.idx], lower.tail = FALSE) - pfun(z1[large.idx], lower.tail = FALSE)) } loglik <- sum(wt * log(pi.i), na.rm = TRUE) return(loglik) }) } # casewise scores lav_uvord_scores <- function(y = NULL, X = NULL, wt = rep(1, length(y)), use.weights = TRUE, logistic = FALSE, cache = NULL) { if (is.null(cache)) { cache <- lav_uvord_fit( y = y, X = X, wt = wt, logistic = logistic, output = "cache" ) } SC <- lav_uvord_scores_cache(cache = cache) if (!is.null(wt) && use.weights) { SC <- SC * wt } SC } lav_uvord_scores_cache <- function(cache = NULL) { with(cache, { # d logl / d pi dldpi <- 1 / pi.i # unweighted! # we assume z1/z2 are available p1 <- dfun(z1) p2 <- dfun(z2) # th scores.th <- dldpi * (Y1 * p1 - Y2 * p2) # beta if (nexo > 0L) { scores.beta <- dldpi * (-X) * (p1 - p2) return(cbind(scores.th, scores.beta, deparse.level = 0)) } else { return(scores.th) } }) } lav_uvord_gradient_cache <- function(cache = NULL) { with(cache, { # d logl / d pi wtp <- wt / pi.i p1 <- dfun(z1) p2 <- dfun(z2) # th dxa <- Y1 * p1 - Y2 * p2 scores.th <- wtp * dxa # beta if (nexo > 0L) { dxb <- X * (p1 - p2) # == X*p1 - X*p2 scores.beta <- wtp * (-dxb) return(colSums(cbind(scores.th, scores.beta, deparse.level = 0), na.rm = TRUE )) } else { return(colSums(scores.th, na.rm = TRUE)) } }) } # compute total Hessian lav_uvord_hessian <- function(y = NULL, X = NULL, wt = rep(1, length(y)), logistic = FALSE, cache = NULL) { if (is.null(cache)) { cache <- lav_uvord_fit( y = y, X = X, wt = wt, logistic = logistic, output = "cache" ) } tmp <- lav_uvord_loglik_cache(cache = cache) tmp <- lav_uvord_gradient_cache(cache = cache) lav_uvord_hessian_cache(cache = cache) } lav_uvord_hessian_cache <- function(cache = NULL) { with(cache, { wtp2 <- wt / (pi.i * pi.i) g1w <- gfun(z1) * wtp g2w <- gfun(z2) * wtp Y1gw <- Y1 * g1w Y2gw <- Y2 * g2w dx2.tau <- (lav_crossprod(Y1gw, Y1) - lav_crossprod(Y2gw, Y2) - lav_crossprod(dxa, dxa * wtp2)) if (nexo == 0L) { return(dx2.tau) } dxb2 <- dxb * wtp2 dx2.beta <- (lav_crossprod(X * g1w, X) - lav_crossprod(X * g2w, X) - lav_crossprod(dxb, dxb2)) dx.taubeta <- (-lav_crossprod(Y1gw, X) + lav_crossprod(Y2gw, X) + lav_crossprod(dxa, dxb2)) Hessian <- rbind(cbind(dx2.tau, dx.taubeta, deparse.level = 0), cbind(t(dx.taubeta), dx2.beta, deparse.level = 0), deparse.level = 0 ) return(Hessian) }) } # compute total (log)likelihood, for specific 'x' (nlminb) lav_uvord_min_objective <- function(x, cache = NULL) { # check order of first 2 thresholds; if x[1] > x[2], return Inf # new in 0.6-8 if (cache$nth > 1L && x[1] > x[2]) { return(+Inf) } if (cache$nth > 2L && x[2] > x[3]) { return(+Inf) } if (cache$nth > 3L && x[3] > x[4]) { return(+Inf) } cache$theta <- x -1 * lav_uvord_loglik_cache(cache = cache) / cache$N } # compute gradient, for specific 'x' (nlminb) lav_uvord_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_uvord_loglik_cache(cache = cache) } -1 * lav_uvord_gradient_cache(cache = cache) / cache$N } # compute hessian, for specific 'x' (nlminb) lav_uvord_min_hessian <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_uvord_loglik_cache(cache = cache) tmp <- lav_uvord_gradient_cache(cache = cache) } -1 * lav_uvord_hessian_cache(cache = cache) / cache$N } # get 'z1' and 'z2' values, given (new) values for the parameters # only needed for lav_bvord_cor_scores(), which is called from # pml_deriv1() in lav_model_gradient_pml.R lav_uvord_update_fit <- function(fit.y = NULL, th.new = NULL, sl.new = NULL) { # return fit.y with 'update' z1/z2 values if (is.null(th.new) && is.null(sl.new)) { return(fit.y) } if (!is.null(th.new)) { fit.y$theta[fit.y$th.idx] <- th.new } if (!is.null(sl.new)) { fit.y$theta[fit.y$slope.idx] <- sl.new } nth <- length(fit.y$th.idx) o1 <- ifelse(fit.y$y == nth + 1, 100, 0) o2 <- ifelse(fit.y$y == 1, -100, 0) theta <- fit.y$theta th <- theta[1:nth] TH <- c(0, th, 0) beta <- theta[-c(1:nth)] y <- fit.y$y X <- fit.y$X if (length(fit.y$slope.idx) > 0L) { eta <- drop(X %*% beta) fit.y$z1 <- TH[y + 1L] - eta + o1 fit.y$z2 <- TH[y] - eta + o2 } else { fit.y$z1 <- TH[y + 1L] + o1 fit.y$z2 <- TH[y] + o2 } fit.y } lavaan/R/lav_cfa_1fac.R0000644000176200001440000001302614627656441014346 0ustar liggesusers# special functions for the one-factor model # YR 24 June 2018 # 1-factor model with (only) three indicators: # no iterations needed; can be solved analytically # denote s11, s22, s33 the diagonal elements, and # s21, s31, s32 the off-diagonal elements # under the 1-factor model; typically, either psi == 1, or l1 == 1 # - s11 == l1^2*psi + theta1 # - s22 == l2^2*psi + theta2 # - s33 == l3^2*psi + theta3 # - s21 == l2*l1*psi # - s31 == l3*l1*psi # - s32 == l3*l2*psi # 6 unknowns, 6 knowns # note: if the triad of covariances is negative, there is no # `valid' solution, for example: # # > S # [,1] [,2] [,3] # [1,] 1.0 0.6 0.3 # [2,] 0.6 1.0 -0.1 # [3,] 0.3 -0.1 1.0 # # (note: all eigenvalues are positive) lav_cfa_1fac_3ind <- function(sample.cov, std.lv = FALSE, warn.neg.triad = TRUE, bounds = TRUE) { # check sample cov stopifnot(is.matrix(sample.cov)) nRow <- NROW(sample.cov) nCol <- NCOL(sample.cov) stopifnot(nRow == nCol, nRow < 4L, nCol < 4L) nvar <- nRow # we expect a 3x3 sample covariance matrix # however, if we get a 2x2 (or 1x1 covariance matrix), do something # useful anyways... if (nvar == 1L) { # lambda = 1, theta = 0, psi = sample.cov[1,1] # lambda = 1, theta = 0, psi = 1 (for now, until NlsyLinks is fixed) sample.cov <- matrix(1, 3L, 3L) * 1.0 } else if (nvar == 2L) { # hm, we could force both lambda's to be 1, but if the second # one is negative, this will surely lead to non-convergence issues # # just like lavaan < 0.6.2, we will use the regression of y=marker # on x=item2 mean.2var <- mean(diag(sample.cov)) max.var <- max(diag(sample.cov)) extra <- c(mean.2var, sample.cov[2, 1]) sample.cov <- rbind(cbind(sample.cov, extra, deparse.level = 0), c(extra, max.var), deparse.level = 0 ) } s11 <- sample.cov[1, 1] s22 <- sample.cov[2, 2] s33 <- sample.cov[3, 3] stopifnot(s11 > 0, s22 > 0, s33 > 0) s21 <- sample.cov[2, 1] s31 <- sample.cov[3, 1] s32 <- sample.cov[3, 2] # note: s21*s31*s32 should be positive! neg.triad <- FALSE if (s21 * s31 * s32 < 0) { neg.triad <- TRUE if (warn.neg.triad) { lav_msg_warn(gettext("product of the three covariances is negative!")) } } # first, we assume l1 = 1 psi <- (s21 * s31) / s32 # note that we assume |s32|>0 l1 <- 1 l2 <- s32 / s31 # l2 <- s21/psi l3 <- s32 / s21 # l3 <- s31/psi theta1 <- s11 - psi theta2 <- s22 - l2 * l2 * psi theta3 <- s33 - l3 * l3 * psi # sanity check (new in 0.6-11): apply standard bounds if (bounds) { lower.psi <- s11 - (1 - 0.1) * s11 # we assume REL(y1) >= 0.1 psi <- min(max(psi, lower.psi), s11) l2.bound <- sqrt(s22 / lower.psi) l2 <- min(max(-l2.bound, l2), l2.bound) l3.bound <- sqrt(s33 / lower.psi) l3 <- min(max(-l3.bound, l3), l3.bound) theta1 <- min(max(theta1, 0), s11) theta2 <- min(max(theta2, 0), s22) theta3 <- min(max(theta3, 0), s33) } lambda <- c(l1, l2, l3) theta <- c(theta1, theta2, theta3) # std.lv? if (std.lv) { # we allow for negative psi (if bounds = FALSE) lambda <- lambda * sign(psi) * sqrt(abs(psi)) psi <- 1 } # special cases if (nvar == 1L) { lambda <- lambda[1] theta <- theta[1] } else if (nvar == 2L) { lambda <- lambda[1:2] theta <- theta[1:2] psi <- psi / 2 # smaller works better? } list(lambda = lambda, theta = theta, psi = psi, neg.triad = neg.triad) } # FABIN (Hagglund, 1982) # 1-factor only lav_cfa_1fac_fabin <- function(S, lambda.only = FALSE, method = "fabin3", std.lv = FALSE, bounds = TRUE) { # check arguments if (std.lv) { lambda.only <- FALSE # we need psi } nvar <- NCOL(S) # catch nvar < 4 if (nvar < 4L) { out <- lav_cfa_1fac_3ind( sample.cov = S, std.lv = std.lv, warn.neg.triad = FALSE ) return(out) } # 1. lambda lambda <- numeric(nvar) lambda[1L] <- 1.0 for (i in 2:nvar) { idx3 <- (1:nvar)[-c(i, 1L)] s23 <- S[i, idx3] S31 <- S13 <- S[idx3, 1L] if (method == "fabin3") { S33 <- S[idx3, idx3] tmp <- try(solve(S33, S31), silent = TRUE) # GaussJordanPivot is # slighty more efficient if (inherits(tmp, "try-error")) { lambda[i] <- sum(s23 * S31) / sum(S13^2) } else { lambda[i] <- sum(s23 * tmp) / sum(S13 * tmp) } } else { lambda[i] <- sum(s23 * S31) / sum(S13^2) } } # bounds? (new in 0.6-11) if (bounds) { s11 <- S[1, 1] lower.psi <- s11 - (1 - 0.1) * s11 # we assume REL(y1) >= 0.1 for (i in 2:nvar) { l.bound <- sqrt(S[i, i] / lower.psi) lambda[i] <- min(max(-l.bound, lambda[i]), l.bound) } } if (lambda.only) { return(list( lambda = lambda, psi = as.numeric(NA), theta = rep(as.numeric(NA), nvar) )) } # 2. theta # GLS version # W <- solve(S) # LAMBDA <- as.matrix(lambda) # A1 <- solve(t(LAMBDA) %*% W %*% LAMBDA) %*% t(LAMBDA) %*% W # A2 <- W %*% LAMBDA %*% A1 # tmp1 <- W*W - A2*A2 # tmp2 <- diag( W %*% S %*% W - A2 %*% S %*% A2 ) # theta.diag <- solve(tmp1, tmp2) # 'least squares' version, assuming W = I D <- tcrossprod(lambda) / sum(lambda^2) theta <- solve(diag(nvar) - D * D, diag(S - (D %*% S %*% D))) # 3. psi (W=I) S1 <- S - diag(theta) l2 <- sum(lambda^2) psi <- sum(colSums(as.numeric(lambda) * S1) * lambda) / (l2 * l2) # std.lv? if (std.lv) { # we allow for negative psi lambda <- lambda * sign(psi) * sqrt(abs(psi)) psi <- 1 } list(lambda = lambda, theta = theta, psi = psi) } lavaan/R/lav_partable_subset.R0000644000176200001440000004212414627656441016103 0ustar liggesusers# YR 11 feb 2017: initial version # given a parameter table (PT), extract a part of the model: # eg.: # - only the measurement model (with saturated latent variables) # - only the stuctural part # - a single measurement block # ... # YR 25 June 2021: - add.exo.cov = TRUE for structural model # - fixed.x = FALSE/TRUE -> exo flags # FIXME: # - but fixed-to-zero covariances may not be present in PT... # - if indicators are regressed on exogenous covariates, should we # add them here? (no for now, unless add.ind.predictors = TRUE) lav_partable_subset_measurement_model <- function(PT = NULL, lv.names = NULL, add.lv.cov = TRUE, add.ind.predictors = FALSE, add.idx = FALSE, idx.only = FALSE) { # PT PT <- as.data.frame(PT, stringsAsFactors = FALSE) # lavpta lavpta <- lav_partable_attributes(PT) # nblocks nblocks <- lavpta$nblocks block.values <- lav_partable_block_values(PT) # lv.names: list with element per block if (is.null(lv.names)) { lv.names <- lavpta$vnames$lv.regular } else if (!is.list(lv.names)) { lv.names <- rep(list(lv.names), nblocks) } # keep rows idx keep.idx <- integer(0L) # remove not-needed measurement models for (g in 1:nblocks) { # indicators for latent variables we keep IND.idx <- which(PT$op == "=~" & PT$lhs %in% lv.names[[g]] & PT$block == block.values[g]) IND <- PT$rhs[IND.idx] IND.plabel <- PT$plabel[IND.idx] # keep =~ keep.idx <- c(keep.idx, IND.idx) # new in 0.6-17: indicators regressed on predictors if (add.ind.predictors) { PRED.idx <- which(PT$op == "~" & PT$lhs %in% IND & PT$block == block.values[g]) EXTRA <- unique(PT$rhs[PRED.idx]) keep.idx <- c(keep.idx, PRED.idx) # add them to IND, so we include their variances/intercepts IND <- c(IND, EXTRA) } # keep ~~ OV.VAR.idx <- which(PT$op == "~~" & PT$lhs %in% IND & PT$rhs %in% IND & PT$block == block.values[g]) keep.idx <- c(keep.idx, OV.VAR.idx) LV.VAR.idx <- which(PT$op == "~~" & PT$lhs %in% lv.names[[g]] & PT$rhs %in% lv.names[[g]] & PT$block == block.values[g]) keep.idx <- c(keep.idx, LV.VAR.idx) # intercepts indicators OV.INT.idx <- which(PT$op == "~1" & PT$lhs %in% IND & PT$block == block.values[g]) keep.idx <- c(keep.idx, OV.INT.idx) # intercepts latent variables LV.INT.idx <- which(PT$op == "~1" & PT$lhs %in% lv.names[[g]] & PT$block == block.values[g]) keep.idx <- c(keep.idx, LV.INT.idx) # thresholds TH.idx <- which(PT$op == "|" & PT$lhs %in% IND & PT$block == block.values[g]) keep.idx <- c(keep.idx, TH.idx) # scaling factors SC.idx <- which(PT$op == "~*~" & PT$lhs %in% IND & PT$block == block.values[g]) keep.idx <- c(keep.idx, SC.idx) # defined/constraints if (any(PT$op %in% c("==", "<", ">", ":="))) { # get the 'id' numbers and the labels involved in def/constraints PT2 <- PT PT2$free <- PT$id # us 'id' numbers instead of 'free' indices ID <- lav_partable_constraints_label_id(PT2, def = TRUE) LABEL <- names(ID) # what are the row indices that we currently keep? FREE.id <- PT$id[keep.idx] } # defined parameters def.idx <- which(PT$op == ":=") if (length(def.idx) > 0L) { def.keep <- logical(length(def.idx)) for (def in seq_len(length(def.idx))) { # rhs RHS.labels <- all.vars(as.formula(paste( "~", PT[def.idx[def], "rhs"] ))) if (length(RHS.labels) > 0L) { # par id RHS.freeid <- ID[match(RHS.labels, LABEL)] # keep? if (all(RHS.freeid %in% FREE.id)) { def.keep[def] <- TRUE } } else { # only constants? def.keep[def] <- TRUE } } keep.idx <- c(keep.idx, def.idx[def.keep]) # add 'id' numbers of := definitions that we keep FREE.id <- c(FREE.id, PT$id[def.idx[def.keep]]) } # (in)equality constraints con.idx <- which(PT$op %in% c("==", "<", ">")) if (length(con.idx) > 0L) { con.keep <- logical(length(con.idx)) for (con in seq_len(length(con.idx))) { lhs.keep <- FALSE rhs.keep <- FALSE # lhs LHS.labels <- all.vars(as.formula(paste( "~", PT[con.idx[con], "lhs"] ))) if (length(LHS.labels) > 0L) { # par id LHS.freeid <- ID[match(LHS.labels, LABEL)] # keep? if (all(LHS.freeid %in% FREE.id)) { lhs.keep <- TRUE } } else { lhs.keep <- TRUE } # rhs RHS.labels <- all.vars(as.formula(paste( "~", PT[con.idx[con], "rhs"] ))) if (length(RHS.labels) > 0L) { # par id RHS.freeid <- ID[match(RHS.labels, LABEL)] # keep? if (all(RHS.freeid %in% FREE.id)) { rhs.keep <- TRUE } } else { rhs.keep <- TRUE } if (lhs.keep && rhs.keep) { con.keep[con] <- TRUE } } keep.idx <- c(keep.idx, con.idx[con.keep]) } # con } # block if (idx.only) { return(keep.idx) } PT <- PT[keep.idx, , drop = FALSE] # check if we have enough indicators? # TODO # add covariances among latent variables? if (add.lv.cov) { PT <- lav_partable_add_lv_cov( PT = PT, lv.names = lv.names ) } # clean up PT <- lav_partable_complete(PT) if (add.idx) { attr(PT, "idx") <- keep.idx } PT } # NOTE: only within same level lav_partable_add_lv_cov <- function(PT, lv.names = NULL) { # PT PT <- as.data.frame(PT, stringsAsFactors = FALSE) # lavpta lavpta <- lav_partable_attributes(PT) # nblocks nblocks <- lavpta$nblocks block.values <- lav_partable_block_values(PT) # lv.names: list with element per block if (is.null(lv.names)) { lv.names <- lavpta$vnames$lv.regular } else if (!is.list(lv.names)) { lv.names <- rep(list(lv.names), nblocks) } # remove lv.names if not present at same level/block if (nblocks > 1L) { for (b in seq_len(nblocks)) { rm.idx <- which(!lv.names[[b]] %in% lavpta$vnames$lv.regular[[b]]) if (length(rm.idx) > 0L) { lv.names[[b]] <- lv.names[[b]][-rm.idx] } } # b } # add covariances among latent variables for (b in seq_len(nblocks)) { if (length(lv.names[[b]]) > 1L) { tmp <- utils::combn(lv.names[[b]], 2L) for (i in seq_len(ncol(tmp))) { # already present? cov1.idx <- which(PT$op == "~~" & PT$block == block.values[b] & PT$lhs == tmp[1, i] & PT$rhs == tmp[2, i]) cov2.idx <- which(PT$op == "~~" & PT$block == block.values[b] & PT$lhs == tmp[2, i] & PT$rhs == tmp[1, i]) # if not, add if (length(c(cov1.idx, cov2.idx)) == 0L) { ADD <- list( lhs = tmp[1, i], op = "~~", rhs = tmp[2, i], user = 3L, free = max(PT$free) + 1L, block = b ) # add group column if (!is.null(PT$group)) { ADD$group <- unique(PT$block[PT$block == b]) } # add level column if (!is.null(PT$level)) { ADD$level <- unique(PT$level[PT$block == b]) } # add lower column if (!is.null(PT$lower)) { ADD$lower <- as.numeric(-Inf) } # add upper column if (!is.null(PT$upper)) { ADD$upper <- as.numeric(+Inf) } PT <- lav_partable_add(PT, add = ADD) } } } # lv.names } # blocks PT } # this function takes a 'full' SEM (measurement models + structural part) # and returns only the structural part # # - what to do if we have no regressions among the latent variables? # we return all covariances among the latent variables # # - also, we should check if we have any 'higher' order factors # lav_partable_subset_structural_model <- function(PT = NULL, add.idx = FALSE, idx.only = FALSE, add.exo.cov = FALSE, fixed.x = FALSE, meanstructure = FALSE) { # PT PT <- as.data.frame(PT, stringsAsFactors = FALSE) # remove any EFA related information -- new in 0.6-18 if (!is.null(PT$efa)) { PT$efa <- NULL PT$est.unrotated <- NULL seven.idx <- which(PT$user == 7L & PT$op == "~~") if (length(seven.idx) > 0L) { PT$user[seven.idx] <- 0L PT$free[seven.idx] <- 1L PT$ustart[seven.idx] <- as.numeric(NA) PT$est[seven.idx] <- PT$est.std[seven.idx] } PT$est.std <- NULL } # lavpta lavpta <- lav_partable_attributes(PT) # nblocks nblocks <- lavpta$nblocks block.values <- lav_partable_block_values(PT) # eqs.names eqs.x.names <- lavpta$vnames$eqs.x eqs.y.names <- lavpta$vnames$eqs.y lv.names <- lavpta$vnames$lv.regular # keep rows idx keep.idx <- integer(0L) # remove not-needed measurement models for (g in 1:nblocks) { # higher-order factor loadings fac.idx <- which(PT$op == "=~" & PT$block == block.values[g] & PT$lhs %in% lavpta$vnames$lv.regular[[g]] & PT$rhs %in% lavpta$vnames$lv.regular[[g]]) # eqs.names eqs.names <- unique(c( lavpta$vnames$eqs.x[[g]], lavpta$vnames$eqs.y[[g]] )) all.names <- unique(c( eqs.names, lavpta$vnames$lv.regular[[g]] )) # regressions reg.idx <- which(PT$op == "~" & PT$block == block.values[g] & PT$lhs %in% eqs.names & PT$rhs %in% eqs.names) # the variances var.idx <- which(PT$op == "~~" & PT$block == block.values[g] & PT$lhs %in% all.names & PT$rhs %in% all.names & PT$lhs == PT$rhs) # optionally covariances (exo!) cov.idx <- which(PT$op == "~~" & PT$block == block.values[g] & PT$lhs %in% all.names & PT$rhs %in% all.names & PT$lhs != PT$rhs) # means/intercepts int.idx <- which(PT$op == "~1" & PT$block == block.values[g] & PT$lhs %in% all.names) keep.idx <- c( keep.idx, reg.idx, var.idx, cov.idx, int.idx, fac.idx ) # defined/constraints if (any(PT$op %in% c("==", "<", ">", ":="))) { # get the 'id' numbers and the labels involved in def/constraints PT2 <- PT PT2$free <- PT$id # us 'id' numbers instead of 'free' indices ID <- lav_partable_constraints_label_id(PT2, def = TRUE) LABEL <- names(ID) # what are the row indices that we currently keep? FREE.id <- PT$id[keep.idx] } # defined parameters def.idx <- which(PT$op == ":=") if (length(def.idx) > 0L) { def.keep <- logical(length(def.idx)) for (def in seq_len(length(def.idx))) { # rhs RHS.labels <- all.vars(as.formula(paste( "~", PT[def.idx[def], "rhs"] ))) if (length(RHS.labels) > 0L) { # par id RHS.freeid <- ID[match(RHS.labels, LABEL)] # keep? if (all(RHS.freeid %in% FREE.id)) { def.keep[def] <- TRUE } } else { # only constants? def.keep[def] <- TRUE } } keep.idx <- c(keep.idx, def.idx[def.keep]) # add 'id' numbers of := definitions that we keep FREE.id <- c(FREE.id, PT$id[def.idx[def.keep]]) } # (in)equality constraints con.idx <- which(PT$op %in% c("==", "<", ">")) if (length(con.idx) > 0L) { con.keep <- logical(length(con.idx)) for (con in seq_len(length(con.idx))) { lhs.keep <- FALSE rhs.keep <- FALSE # lhs LHS.labels <- all.vars(as.formula(paste( "~", PT[con.idx[con], "lhs"] ))) if (length(LHS.labels) > 0L) { # par id LHS.freeid <- ID[match(LHS.labels, LABEL)] # keep? if (all(LHS.freeid %in% FREE.id)) { lhs.keep <- TRUE } } else { lhs.keep <- TRUE } # rhs RHS.labels <- all.vars(as.formula(paste( "~", PT[con.idx[con], "rhs"] ))) if (length(RHS.labels) > 0L) { # par id RHS.freeid <- ID[match(RHS.labels, LABEL)] # keep? if (all(RHS.freeid %in% FREE.id)) { rhs.keep <- TRUE } } else { rhs.keep <- TRUE } if (lhs.keep && rhs.keep) { con.keep[con] <- TRUE } } keep.idx <- c(keep.idx, con.idx[con.keep]) } # con } # block if (idx.only) { return(keep.idx) } PT <- PT[keep.idx, , drop = FALSE] # add any missing covariances among exogenous variables if (add.exo.cov) { PT <- lav_partable_add_exo_cov(PT) } # if meanstructure, 'free' user=0 intercepts if (meanstructure) { int.idx <- which(PT$op == "~1" & PT$user == 0L & PT$free == 0L) if (length(int.idx) > 0L) { PT$free[int.idx] <- max(PT$free) + seq_len(length(int.idx)) PT$ustart[int.idx] <- as.numeric(NA) PT$user[int.idx] <- 3L } } # if fixed.x = FALSE, remove all remaining (free) exo=1 elements if (!fixed.x) { exo.idx <- which(PT$exo != 0L) if (length(exo.idx) > 0L) { PT$exo[exo.idx] <- 0L PT$user[exo.idx] <- 3L PT$free[exo.idx] <- max(PT$free) + seq_len(length(exo.idx)) } # if fixed.x = TRUE, check/set all exo elements } else { # redefine ov.x for the structural part only; set exo flag for (g in 1:nblocks) { ov.names.x <- lav_partable_vnames(PT, type = "ov.x", block = block.values[g] ) if (length(ov.names.x) == 0L) { next } # 1. variances/covariances exo.var.idx <- which( PT$op == "~~" & PT$block == block.values[g] & PT$rhs %in% ov.names.x & PT$lhs %in% ov.names.x & PT$user %in% c(0L, 3L) ) if (length(exo.var.idx) > 0L) { PT$ustart[exo.var.idx] <- as.numeric(NA) # to be overriden PT$free[exo.var.idx] <- 0L PT$exo[exo.var.idx] <- 1L PT$user[exo.var.idx] <- 3L } # 2. intercepts exo.int.idx <- which( PT$op == "~1" & PT$block == block.values[g] & PT$lhs %in% ov.names.x & PT$user == 0L ) if (length(exo.int.idx) > 0L) { PT$ustart[exo.int.idx] <- as.numeric(NA) # to be overriden PT$free[exo.int.idx] <- 0L PT$exo[exo.int.idx] <- 1L PT$user[exo.var.idx] <- 3L } } # blocks } # fixed.x # clean up PT <- lav_partable_complete(PT) if (add.idx) { attr(PT, "idx") <- keep.idx } PT } # NOTE: only within same level lav_partable_add_exo_cov <- function(PT, ov.names.x = NULL) { # PT PT <- as.data.frame(PT, stringsAsFactors = FALSE) # lavpta lavpta <- lav_partable_attributes(PT) # nblocks nblocks <- lavpta$nblocks block.values <- lav_partable_block_values(PT) # ov.names.x: list with element per block if (is.null(ov.names.x)) { ov.names.x <- lavpta$vnames$ov.x } else if (!is.list(ov.names.x)) { ov.names.x <- rep(list(ov.names.x), nblocks) } # remove ov.names.x if not present at same level/block if (nblocks > 1L) { for (b in seq_len(nblocks)) { rm.idx <- which(!ov.names.x[[b]] %in% lavpta$vnames$ov.x[[b]]) if (length(rm.idx) > 0L) { ov.names.x[[b]] <- ov.names.x[[b]][-rm.idx] } } # b } # add covariances among latent variables for (b in seq_len(nblocks)) { if (length(ov.names.x[[b]]) > 1L) { tmp <- utils::combn(ov.names.x[[b]], 2L) for (i in seq_len(ncol(tmp))) { # already present? cov1.idx <- which(PT$op == "~~" & PT$block == block.values[b] & PT$lhs == tmp[1, i] & PT$rhs == tmp[2, i]) cov2.idx <- which(PT$op == "~~" & PT$block == block.values[b] & PT$lhs == tmp[2, i] & PT$rhs == tmp[1, i]) # if not, add if (length(c(cov1.idx, cov2.idx)) == 0L) { ADD <- list( lhs = tmp[1, i], op = "~~", rhs = tmp[2, i], user = 3L, free = max(PT$free) + 1L, block = b, ustart = as.numeric(NA) ) # add group column if (!is.null(PT$group)) { ADD$group <- unique(PT$block[PT$block == b]) } # add level column if (!is.null(PT$level)) { ADD$level <- unique(PT$level[PT$block == b]) } # add lower column if (!is.null(PT$lower)) { ADD$lower <- as.numeric(-Inf) } # add upper column if (!is.null(PT$upper)) { ADD$upper <- as.numeric(+Inf) } PT <- lav_partable_add(PT, add = ADD) } } } # ov.names.x } # blocks PT } lavaan/R/lav_lavaan_step08_start.R0000644000176200001440000001026514627656441016607 0ustar liggesuserslav_lavaan_step08_start <- function(slotModel = NULL, # nolint lavoptions = NULL, lavpartable = NULL, lavsamplestats = NULL, lavh1 = NULL) { # # # # # # # # # # # # # 8. lavstart # # # # # # # # # # # # # # if slotModel is NULL # if lavpartable$est not NULL and lavoptions$start == "default" # if there are free variances with est==0 or there are NA's in est # compute start column in lavpartable via lav_start # else # set start column in lavpartable equal to est column # else # compute start column via lav_start and # check via lav_start_check_cov if demanded (lavoptions$check.start) samplestats.flag <- TRUE if (!is.null(lavoptions$samplestats) && !lavoptions$samplestats) { samplestats.flag <- FALSE } if (is.null(slotModel)) { # check if we have provided a full parameter table as model = input if (!is.null(lavpartable$est) && is.character(lavoptions$start) && lavoptions$start == "default") { if (lav_verbose()) { cat("lavstart ...") } # check if all 'est' values look ok # this is not the case, eg, if partables have been merged eg, as # in semTools' auxiliary() function # check for zero free variances and NA values zero.idx <- which(lavpartable$free > 0L & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs & lavpartable$est == 0) if (length(zero.idx) > 0L || any(is.na(lavpartable$est))) { lavpartable$start <- lav_start( start.method = lavoptions$start, lavpartable = lavpartable, lavsamplestats = lavsamplestats, model.type = lavoptions$model.type, reflect = FALSE, samplestats.flag = samplestats.flag, # order.lv.by = lavoptions$rotation.args$order.lv.by, order.lv.by = "none", mimic = lavoptions$mimic ) } else { lavpartable$start <- lavpartable$est } # check for exogenous parameters: if the dataset changed, we must # update them! (new in 0.6-16) # ... or not? (not compatible with how we bootstrap under fixed.x = T) # we really need to think about this more carefully... # # if (any(lavpartable$exo == 1L)) { # # FIXME: there should be an easier way just to # # (re)initialize the the exogenous part of the model # tmp <- lav_start(start.method = "lavaan", # not "simple" # # if fixed.x = TRUE # lavpartable = lavpartable, # lavsamplestats = lavsamplestats, # lavh1 = lavh1, # model.type = lavoptions$model.type, # reflect = FALSE, # #order.lv.by = lavoptions$rotation.args$order.lv.by, # order.lv.by = "none", # mimic = lavoptions$mimic, # debug = lav_debug()) # exo.idx <- which(lavpartable$exo == 1L) # lavpartable$start[exo.idx] <- tmp[exo.idx] # } if (lav_verbose()) { cat(" done.\n") } } else { if (lav_verbose()) { cat("lavstart ...") } start.values <- lav_start( start.method = lavoptions$start, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavh1 = lavh1, model.type = lavoptions$model.type, reflect = FALSE, samplestats.flag = samplestats.flag, # order.lv.by = lavoptions$rotation.args$order.lv.by, order.lv.by = "none", mimic = lavoptions$mimic ) # sanity check if (!is.null(lavoptions$check.start) && lavoptions$check.start) { start.values <- lav_start_check_cov( lavpartable = lavpartable, start = start.values ) } lavpartable$start <- start.values if (lav_verbose()) { cat(" done.\n") } } } lavpartable } lavaan/R/lav_prelis.R0000644000176200001440000000451314627656441014222 0ustar liggesusers# small utility functions to deal with PRELIS # Y.R.: 11 dec 2012 prelis.read.cor <- function(file = "") { # read in numbers as characters txt <- scan(file, what = "character", quiet = TRUE) # convert to numbers txt <- gsub("D", "e", txt) x <- as.numeric(txt) # create COR/COR matrix COR <- lav_matrix_lower2full(x, diagonal = TRUE) COR } prelis.read.acm <- function(file = "", rescale = 1e-3) { # read in raw data -- ignore first three elements # first element: 123.456789 (check?) # second element: 2.72 version number of prelis # third element: almost zero?? zz <- file(file, "rb") raw <- readBin(zz, what = "double", n = 1e+05)[-c(1, 2, 3)] close(zz) # scale numbers raw <- raw * rescale ACM <- lav_matrix_lower2full(raw, diagonal = TRUE) # elements are divided by 2?? ACM <- ACM * 2 ACM } prelis.write.data <- function(data, file = "prelis", na.rm = TRUE, labels = FALSE, std.ov = FALSE) { dfile <- paste(file, ".raw", sep = "") write.table(data, file = dfile, na = "-999999", col.names = FALSE, row.names = FALSE, quote = FALSE ) if (labels) { lfile <- paste(file, ".lab", sep = "") write.table(unique(names(data)), file = lfile, row.names = F, col.names = F, quote = F ) } } prelis.run <- function(X, type = "OR", keep.files = FALSE) { label <- names(X) nvar <- ncol(X) # write raw data prelis.write.data(X, file = "prelistmp") # write syntax txt <- paste("DA NI=", nvar, " NO=0 MI=-999999\n", sep = "") txt <- paste(txt, "LA", sep = "") tmp <- 0 for (i in 1:nvar) { if (tmp %% 6 == 0) txt <- paste(txt, "\n", sep = "") txt <- paste(txt, label[i], " ", sep = "") tmp <- tmp + 1 } txt <- paste(txt, "\n") txt <- paste(txt, "RA FI=prelistmp.raw\n", sep = "") txt <- paste(txt, type, " ALL\n", sep = "") txt <- paste(txt, "OU MA=PM SA=prelistmp.acm SM=prelistmp.cor\n", sep = "") writeLines(txt, con = "prelistmp.in") # run prelis system("prelis prelistmp.in prelistmp.out") # read in acm and cor ACM <- prelis.read.acm(file = "prelistmp.acm") COR <- prelis.read.cor(file = "prelistmp.cor") # clean up if (!keep.files) { unlink(c( "prelistmp.in", "prelistmp.out", "prelistmp.acm", "prelistmp.cor", "prelistmp.FREQ", "prelistmp.raw" )) } list(COR = COR, ACM = ACM) } lavaan/R/lav_mvnorm_h1.R0000644000176200001440000003450114627656441014632 0ustar liggesusers# the multivariate normal distribution, unrestricted (h1) # - everything is evalued under the MLEs: Mu = ybar, Sigma = S # 1) loglikelihood h1 (from raw data, or sample statistics) # 4) hessian h1 around MLEs # 5) information h1 (restricted Sigma/mu) # 5a: (unit) expected information h1 (A1 = Gamma.NT^{-1}) # 5b: (unit) observed information h1 (A1 = Gamma.NT^{-1}) # 5c: (unit) first.order information h1 (B1 = A1 %*% Gamma %*% A1) # 6) inverted information h1 mu + vech(Sigma) # 6a: (unit) inverted expected information (A1.inv = Gamma.NT) # 6b: (unit) inverted observed information (A1.inv = Gamma.NT) # 6c: (unit) inverted first-order information (B1.inv) # 7) ACOV h1 mu + vech(Sigma) # 7a: 1/N * Gamma.NT # 7b: 1/N * Gamma.NT # 7c: 1/N * (Gamma.NT * Gamma^{-1} * Gamma.NT) # 7d: 1/N * Gamma (sandwich) # YR 25 Mar 2016: first version # YR 19 Jan 2017: added 6) + 7) # YR 04 Jan 2020: adjust for sum(wt) != N # YR 22 Jul 2022: adding correlation= argument for information_expected # (only for catml; not used if correlation = TRUE!) # 1. log-likelihood h1 # 1a: input is raw data lav_mvnorm_h1_loglik_data <- function( Y = NULL, x.idx = integer(0L), casewise = FALSE, wt = NULL, Sinv.method = "eigen") { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } P <- NCOL(Y) # sample statistics if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.mean <- out$center sample.cov <- out$cov } else { sample.mean <- base::.colMeans(Y, m = N, n = P) sample.cov <- lav_matrix_cov(Y) } if (casewise) { LOG.2PI <- log(2 * pi) # invert sample.cov if (Sinv.method == "chol") { cS <- chol(sample.cov) icS <- backsolve(cS, diag(P)) Yc <- t(t(Y) - sample.mean) DIST <- rowSums((Yc %*% icS)^2) logdet <- -2 * sum(log(diag(icS))) } else { sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(sample.cov.inv, "logdet") # mahalanobis distance Yc <- t(t(Y) - sample.mean) DIST <- rowSums(Yc %*% sample.cov.inv * Yc) } loglik <- -(P * LOG.2PI + logdet + DIST) / 2 # weights if (!is.null(wt)) { loglik <- loglik * wt } } else { # invert sample.cov sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(sample.cov.inv, "logdet") loglik <- lav_mvnorm_h1_loglik_samplestats( sample.cov.logdet = logdet, sample.nvar = P, sample.nobs = N ) } # fixed.x? if (length(x.idx) > 0L) { loglik.x <- lav_mvnorm_h1_loglik_data( Y = Y[, x.idx, drop = FALSE], wt = wt, x.idx = integer(0L), casewise = casewise, Sinv.method = Sinv.method ) # subtract logl.X loglik <- loglik - loglik.x } loglik } # 1b: input are sample statistics only (logdet, N and P) lav_mvnorm_h1_loglik_samplestats <- function( sample.cov.logdet = NULL, sample.nvar = NULL, sample.nobs = NULL, # or sample.cov = NULL, x.idx = integer(0L), x.cov = NULL, Sinv.method = "eigen") { if (is.null(sample.nvar)) { P <- NCOL(sample.cov) } else { P <- sample.nvar # number of variables } N <- sample.nobs stopifnot(!is.null(P), !is.null(N)) LOG.2PI <- log(2 * pi) # all we need is the logdet if (is.null(sample.cov.logdet)) { sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(sample.cov.inv, "logdet") } else { logdet <- sample.cov.logdet } loglik <- -N / 2 * (P * LOG.2PI + logdet + P) # fixed.x? if (length(x.idx) > 0L) { if (is.null(sample.cov)) { if (is.null(x.cov)) { lav_msg_stop(gettext( "when x.idx is not empty, we need sample.cov or x.cov" )) } else { sample.cov.x <- x.cov } } else { sample.cov.x <- sample.cov[x.idx, x.idx, drop = FALSE] } loglik.x <- lav_mvnorm_h1_loglik_samplestats( sample.cov = sample.cov.x, sample.nobs = sample.nobs, x.idx = integer(0L), Sinv.method = Sinv.method ) # subtract logl.X loglik <- loglik - loglik.x } loglik } # 4. hessian of logl (around MLEs of Mu and Sigma) # 4a: hessian logl Mu and vech(Sigma) from raw data lav_mvnorm_h1_logl_hessian_data <- function( Y = NULL, wt = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, meanstructure = TRUE) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } # observed information observed <- lav_mvnorm_h1_information_observed_data( Y = Y, wt = wt, x.idx = x.idx, Sinv.method = Sinv.method, sample.cov.inv = sample.cov.inv, meanstructure = meanstructure ) -N * observed } # 4b: hessian Mu and vech(Sigma) from samplestats lav_mvnorm_h1_logl_hessian_samplestats <- function( sample.mean = NULL, # unused! sample.cov = NULL, sample.nobs = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, meanstructure = TRUE) { N <- sample.nobs # observed information observed <- lav_mvnorm_h1_information_observed_samplestats( sample.mean = sample.mean, sample.cov = sample.cov, x.idx = x.idx, Sinv.method = Sinv.method, sample.cov.inv = sample.cov.inv, meanstructure = meanstructure ) -N * observed } # 5) Information h1 (note: expected == observed if data is complete!) # 5a: unit expected information h1 lav_mvnorm_h1_information_expected <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, meanstructure = TRUE, correlation = FALSE) { if (is.null(sample.cov.inv)) { if (is.null(sample.cov)) { if (is.null(wt)) { sample.mean <- base::.colMeans(Y, m = NROW(Y), n = NCOL(Y)) sample.cov <- lav_matrix_cov(Y) } else { out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.cov <- out$cov } } # invert sample.cov sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = FALSE, Sinv.method = Sinv.method ) } I11 <- sample.cov.inv if(correlation) { I22 <- 0.5 * lav_matrix_duplication_cor_pre_post(sample.cov.inv %x% sample.cov.inv) } else { I22 <- 0.5 * lav_matrix_duplication_pre_post(sample.cov.inv %x% sample.cov.inv) } # fixed.x? if (length(x.idx) > 0L) { pstar.x <- lav_matrix_vech_which_idx( n = NCOL(sample.cov.inv), idx = x.idx ) I22[pstar.x, ] <- 0 I22[, pstar.x] <- 0 } if (meanstructure) { # fixed.x? if (length(x.idx) > 0L) { I11[x.idx, ] <- 0 I11[, x.idx] <- 0 } out <- lav_matrix_bdiag(I11, I22) } else { out <- I22 } out } # 5b: unit observed information h1 lav_mvnorm_h1_information_observed_data <- function( Y = NULL, wt = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, meanstructure = TRUE) { lav_mvnorm_h1_information_expected( Y = Y, Sinv.method = Sinv.method, wt = wt, x.idx = x.idx, sample.cov.inv = sample.cov.inv, meanstructure = meanstructure ) } # 5b-bis: observed information h1 from sample statistics lav_mvnorm_h1_information_observed_samplestats <- function( sample.mean = NULL, # unused! sample.cov = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, meanstructure = TRUE) { if (is.null(sample.cov.inv)) { # invert sample.cov sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = FALSE, Sinv.method = Sinv.method ) } I11 <- sample.cov.inv # fixed.x? if (length(x.idx) > 0L) { I11[x.idx, ] <- 0 I11[, x.idx] <- 0 } I22 <- 0.5 * lav_matrix_duplication_pre_post(sample.cov.inv %x% sample.cov.inv) # fixed.x? if (length(x.idx) > 0L) { pstar.x <- lav_matrix_vech_which_idx( n = NCOL(sample.cov.inv), idx = x.idx ) I22[pstar.x, ] <- 0 I22[, pstar.x] <- 0 } if (meanstructure) { out <- lav_matrix_bdiag(I11, I22) } else { out <- I22 } out } # 5c: unit first-order information h1 # note: first order information h1 == A1 %*% Gamma %*% A1 # (where A1 = obs/exp information h1) lav_mvnorm_h1_information_firstorder <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L), cluster.idx = NULL, Sinv.method = "eigen", sample.cov.inv = NULL, Gamma = NULL, meanstructure = TRUE) { if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") res <- lav_mvnorm_information_firstorder( Y = Y, wt = wt, cluster.idx = cluster.idx, Mu = out$center, Sigma = out$cov, x.idx = x.idx, meanstructure = meanstructure ) return(res) } # sample.cov.inv if (is.null(sample.cov.inv)) { # invert sample.cov if (is.null(sample.cov)) { sample.mean <- base::.colMeans(Y, m = NROW(Y), n = NCOL(Y)) sample.cov <- lav_matrix_cov(Y) } sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # question: is there any benefit computing Gamma/A1 instead of just # calling lav_mvnorm_information_firstorder()? # answer (2014): probably not; it is just reassuring that the expression # J = A1 %*% Gamma %*% A1 seems to hold # Gamma # FIXME: what about the 'unbiased = TRUE' option? if (is.null(Gamma)) { if (length(x.idx) > 0L) { Gamma <- lav_samplestats_Gamma(Y, x.idx = x.idx, fixed.x = TRUE, cluster.idx = cluster.idx, meanstructure = meanstructure ) } else { Gamma <- lav_samplestats_Gamma(Y, meanstructure = meanstructure, cluster.idx = cluster.idx ) } } # sample.cov.inv if (is.null(sample.cov.inv)) { # invert sample.cov if (is.null(sample.cov)) { sample.mean <- base::.colMeans(Y, m = NROW(Y), n = NCOL(Y)) sample.cov <- lav_matrix_cov(Y) } sample.cov.inv <- lav_matrix_symmetric_inverse( S = sample.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # A1 A1 <- lav_mvnorm_h1_information_expected( Y = Y, Sinv.method = Sinv.method, sample.cov.inv = sample.cov.inv, x.idx = x.idx, meanstructure = meanstructure ) A1 %*% Gamma %*% A1 } # 6) inverted information h1 mu + vech(Sigma) (not used?) # 6a: (unit) inverted expected information (A1.inv = Gamma.NT) # 6b: (unit) inverted observed information (A1.inv = Gamma.NT) lav_mvnorm_h1_inverted_information_expected <- lav_mvnorm_h1_inverted_information_observed <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L)) { # sample.cov if (is.null(sample.cov)) { if (is.null(wt)) { sample.mean <- base::.colMeans(Y, m = NROW(Y), n = NCOL(Y)) sample.cov <- lav_matrix_cov(Y) } else { out <- stats::cov.wt(Y, wt = wt, method = "ML") sample.cov <- out$cov } } if (length(x.idx) > 0L) { Gamma.NT <- lav_samplestats_Gamma_NT( Y = Y, wt = wt, x.idx = x.idx, COV = sample.cov, meanstructure = TRUE, fixed.x = TRUE ) } else { I11 <- sample.cov I22 <- 2 * lav_matrix_duplication_ginv_pre_post(sample.cov %x% sample.cov) Gamma.NT <- lav_matrix_bdiag(I11, I22) } Gamma.NT } # 6c: (unit) inverted first-order information (B1.inv) (not used?) # J1.inv = Gamma.NT %*% solve(Gamma) %*% Gamma.NT # lav_mvnorm_h1_inverted_information_firstorder <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L), Sinv.method = "eigen", sample.cov.inv = NULL, Gamma = NULL) { # lav_samplestats_Gamma() has no wt argument (yet) if (!is.null(wt)) { lav_msg_stop(gettext("function not supported if wt is not NULL")) } # Gamma # what about the 'unbiased = TRUE' option? if (is.null(Gamma)) { if (length(x.idx) > 0L) { Gamma <- lav_samplestats_Gamma(Y, x.idx = x.idx, fixed.x = TRUE, meanstructure = TRUE ) } else { Gamma <- lav_samplestats_Gamma(Y, meanstructure = TRUE) } } # Gamma.NT Gamma.NT <- lav_mvnorm_h1_inverted_information_expected( Y = Y, sample.cov = sample.cov, x.idx = x.idx ) if (length(x.idx) > 0L) { # FIXME: surely there is better way out <- Gamma.NT %*% MASS::ginv(Gamma) %*% Gamma.NT } else { out <- Gamma.NT %*% solve(Gamma, Gamma.NT) } out } # 7) ACOV h1 mu + vech(Sigma) (not used?) # 7a: 1/N * Gamma.NT # 7b: 1/N * Gamma.NT lav_mvnorm_h1_acov_expected <- lav_mvnorm_h1_acov_observed <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L)) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } Gamma.NT <- lav_mvnorm_h1_inverted_information_expected( Y = Y, wt = wt, sample.cov = sample.cov, x.idx = x.idx ) (1 / N) * Gamma.NT } # 7c: 1/N * (Gamma.NT * Gamma^{-1} * Gamma.NT) lav_mvnorm_h1_acov_firstorder <- function( Y = NULL, wt = NULL, sample.cov = NULL, Sinv.method = "eigen", x.idx = integer(0L), sample.cov.inv = NULL, Gamma = NULL) { if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } J1.inv <- lav_mvnorm_h1_inverted_information_firstorder( Y = Y, wt = wt, sample.cov = sample.cov, x.idx = x.idx, Sinv.method = Sinv.method, sample.cov.inv = sample.cov.inv, Gamma = Gamma ) (1 / N) * J1.inv } # 7d: 1/N * Gamma (sandwich) lav_mvnorm_h1_acov_sandwich <- function( Y = NULL, wt = NULL, sample.cov = NULL, x.idx = integer(0L), Gamma = NULL) { # lav_samplestats_Gamma() has no wt argument (yet) if (!is.null(wt)) { lav_msg_stop(gettext("function not supported if wt is not NULL")) } # if(!is.null(wt)) { # N <- sum(wt) # } else { N <- NROW(Y) # } # Gamma if (is.null(Gamma)) { if (length(x.idx) > 0L) { Gamma <- lav_samplestats_Gamma(Y, x.idx = x.idx, fixed.x = TRUE, meanstructure = TRUE ) } else { Gamma <- lav_samplestats_Gamma(Y, meanstructure = TRUE) } } (1 / N) * Gamma } lavaan/R/lav_lavaanList_inspect.R0000644000176200001440000002646114627656441016555 0ustar liggesusers# inspect a lavaanList object inspect.lavaanList <- function(object, what = "free", ...) { lavListInspect( object = object, what = what, add.labels = TRUE, add.class = TRUE, drop.list.single.group = TRUE ) } # the `tech' version: no labels, full matrices, ... for further processing lavTech.lavaanList <- function(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { lavListInspect( object = object, what = what, add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) } lavListTech <- function(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { lavListInspect( object = object, what = what, add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) } # just in case some uses lavInspect on a lavaanList object lavInspect.lavaanList <- function(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) { lavListInspect( object = object, what = what, add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) } lavListInspect <- function(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) { # object must inherit from class lavaanList stopifnot(inherits(object, "lavaanList")) # only a single argument if (length(what) > 1) { lav_msg_stop(gettext( "`what' arguments contains multiple arguments; only one is allowed")) } # be case insensitive what <- tolower(what) #### model matrices, with different contents #### if (what == "free") { lav_lavaanList_inspect_modelmatrices(object, what = "free", type = "free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) } else if (what == "partable" || what == "user") { lav_lavaanList_inspect_modelmatrices(object, what = "free", type = "partable", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) } else if (what == "start" || what == "starting.values") { lav_lavaanList_inspect_modelmatrices(object, what = "start", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group ) #### parameter table #### } else if (what == "list") { parTable(object) #### data + missingness #### } else if (what == "ngroups") { object@Data@ngroups } else if (what == "group") { object@Data@group } else if (what == "cluster") { object@Data@cluster } else if (what == "nlevels") { object@Data@nlevels } else if (what == "nclusters") { lav_object_inspect_cluster_info(object, level = 2L, what = "nclusters", drop.list.single.group = drop.list.single.group ) } else if (what == "ncluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "ncluster.size", drop.list.single.group = drop.list.single.group ) } else if (what == "cluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.size", drop.list.single.group = drop.list.single.group ) } else if (what == "cluster.id") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.id", drop.list.single.group = drop.list.single.group ) } else if (what == "cluster.idx") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.idx", drop.list.single.group = drop.list.single.group ) } else if (what == "cluster.label") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.label", drop.list.single.group = drop.list.single.group ) } else if (what == "cluster.sizes") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.sizes", drop.list.single.group = drop.list.single.group ) } else if (what == "average.cluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "average.cluster.size", drop.list.single.group = drop.list.single.group ) } else if (what == "ordered") { object@Data@ordered } else if (what == "group.label") { object@Data@group.label } else if (what == "level.label") { object@Data@level.label } else if (what == "nobs") { # only for original! unlist(object@Data@nobs) } else if (what == "norig") { # only for original! unlist(object@Data@norig) } else if (what == "ntotal") { # only for original! sum(unlist(object@Data@nobs)) #### from the model object (but stable) over datasets? #### } else if (what == "th.idx") { lav_lavaanList_inspect_th_idx(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group ) #### meanstructure, categorical #### } else if (what == "meanstructure") { object@Model@meanstructure } else if (what == "categorical") { object@Model@categorical } else if (what == "fixed.x") { object@Model@fixed.x } else if (what == "parameterization") { object@Model@parameterization # options } else if (what == "options" || what == "lavoptions") { object@Options # call } else if (what == "call") { as.list(object@call) #### not found #### } else { lav_msg_stop(gettextf( "unknown `what' argument in inspect function: `%s'", what)) } } lav_lavaanList_inspect_start <- function(object) { # from 0.5-19, they are in the partable if (!is.null(object@ParTable$start)) { OUT <- object@ParTable$start } else { # in < 0.5-19, we should look in @Fit@start OUT <- object@Fit@start } OUT } lav_lavaanList_inspect_modelmatrices <- function( object, what = "free", type = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { GLIST <- object@Model@GLIST for (mm in 1:length(GLIST)) { if (add.labels) { dimnames(GLIST[[mm]]) <- object@Model@dimNames[[mm]] } if (what == "free") { # fill in free parameter counts if (type == "free") { m.el.idx <- object@Model@m.free.idx[[mm]] x.el.idx <- object@Model@x.free.idx[[mm]] # } else if(type == "unco") { # m.el.idx <- object@Model@m.unco.idx[[mm]] # x.el.idx <- object@Model@x.unco.idx[[mm]] } else if (type == "partable") { m.el.idx <- object@Model@m.user.idx[[mm]] x.el.idx <- object@Model@x.user.idx[[mm]] } else { lav_msg_stop(gettextf("unknown type argument: %s", type)) } # erase everything GLIST[[mm]][, ] <- 0.0 GLIST[[mm]][m.el.idx] <- x.el.idx } else if (what == "start") { # fill in starting values m.user.idx <- object@Model@m.user.idx[[mm]] x.user.idx <- object@Model@x.user.idx[[mm]] START <- lav_lavaanList_inspect_start(object) GLIST[[mm]][m.user.idx] <- START[x.user.idx] } # class if (add.class) { if (object@Model@isSymmetric[mm]) { class(GLIST[[mm]]) <- c("lavaan.matrix.symmetric", "matrix") } else { class(GLIST[[mm]]) <- c("lavaan.matrix", "matrix") } } } # try to reflect `equality constraints' con.flag <- FALSE if (what == "free" && object@Model@eq.constraints) { # extract constraints from parameter table PT <- parTable(object) CON <- PT[PT$op %in% c("==", "<", ">"), c("lhs", "op", "rhs")] rownames(CON) <- NULL # replace 'labels' by parameter numbers ID <- lav_partable_constraints_label_id(PT) LABEL <- names(ID) for (con in 1:nrow(CON)) { # lhs LHS.labels <- all.vars(as.formula(paste("~", CON[con, "lhs"]))) if (length(LHS.labels) > 0L) { # par id LHS.freeid <- ID[match(LHS.labels, LABEL)] # substitute tmp <- CON[con, "lhs"] for (pat in 1:length(LHS.labels)) { tmp <- sub(LHS.labels[pat], LHS.freeid[pat], tmp) } CON[con, "lhs"] <- tmp } # rhs RHS.labels <- all.vars(as.formula(paste("~", CON[con, "rhs"]))) if (length(RHS.labels) > 0L) { # par id RHS.freeid <- ID[match(RHS.labels, LABEL)] # substitute tmp <- CON[con, "rhs"] for (pat in 1:length(RHS.labels)) { tmp <- sub(RHS.labels[pat], RHS.freeid[pat], tmp) } CON[con, "rhs"] <- tmp } } # con # add this info at the top # GLIST <- c(constraints = list(CON), GLIST) # no, not a good idea, it does not work with list.by.group # add it as a 'header' attribute? attr(CON, "header") <- "Note: model contains equality constraints:" con.flag <- TRUE } # should we group them per group? if (list.by.group) { lavmodel <- object@Model nmat <- lavmodel@nmat OUT <- vector("list", length = object@Data@ngroups) for (g in 1:object@Data@ngroups) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] mm.names <- names(GLIST[mm.in.group]) OUT[[g]] <- GLIST[mm.in.group] } if (object@Data@ngroups == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { if (length(object@Data@group.label) > 0L) { names(OUT) <- unlist(object@Data@group.label) } } } else { OUT <- GLIST } # header if (con.flag) { attr(OUT, "header") <- CON } # lavaan.list if (add.class) { class(OUT) <- c("lavaan.list", "list") } OUT } lav_lavaanList_inspect_th_idx <- function( object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # thresholds idx -- usually, we get it from SampleStats # but fortunately, there is a copy in Model, but no names... OUT <- object@Model@th.idx # nblocks nblocks <- length(OUT) # labels + class for (b in seq_len(nblocks)) { # if(add.labels && length(OUT[[b]]) > 0L) { # names(OUT[[b]]) <- object@SampleStats@th.names[[b]] # } if (add.class && !is.null(OUT[[b]])) { class(OUT[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { if (object@Data@nlevels == 1L && length(object@Data@group.label) > 0L) { names(OUT) <- unlist(object@Data@group.label) } else if (object@Data@nlevels > 1L && length(object@Data@group.label) == 0L) { names(OUT) <- object@Data@level.label } } OUT } lavaan/R/lav_model_gradient.R0000644000176200001440000012773614627656441015716 0ustar liggesusers# model gradient lav_model_gradient <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, type = "free", group.weight = TRUE, Delta = NULL, m.el.idx = NULL, x.el.idx = NULL, ceq.simple = FALSE) { nmat <- lavmodel@nmat estimator <- lavmodel@estimator representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure categorical <- lavmodel@categorical group.w.free <- lavmodel@group.w.free fixed.x <- lavmodel@fixed.x conditional.x <- lavmodel@conditional.x num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx nx.free <- lavmodel@nx.free if (.hasSlot(lavmodel, "estimator.args")) { estimator.args <- lavmodel@estimator.args } else { estimator.args <- list() } # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST if (estimator == "REML") lav_msg_warn(gettext( "analytical gradient not implement; use numerical approximation")) # group.weight # FIXME --> block.weight if (group.weight) { if (estimator %in% c("ML", "PML", "FML", "MML", "REML", "NTRLS", "catML")) { group.w <- (unlist(lavsamplestats@nobs) / lavsamplestats@ntotal) } else if (estimator == "DLS") { if (estimator.args$dls.FtimesNminus1) { group.w <- ((unlist(lavsamplestats@nobs) - 1) / lavsamplestats@ntotal) } else { group.w <- (unlist(lavsamplestats@nobs) / lavsamplestats@ntotal) } } else { # FIXME: double check! group.w <- ((unlist(lavsamplestats@nobs) - 1) / lavsamplestats@ntotal) } } else { group.w <- rep(1.0, lavmodel@nblocks) } # do we need WLS.est? if (estimator %in% c("WLS", "DWLS", "ULS", "GLS", "NTRLS", "DLS")) { # always compute WLS.est WLS.est <- lav_model_wls_est(lavmodel = lavmodel, GLIST = GLIST) # , # cov.x = lavsamplestats@cov.x) } if (estimator %in% c("ML", "PML", "FML", "REML", "NTRLS", "catML")) { # compute moments for all groups # if(conditional.x) { # Sigma.hat <- computeSigmaHatJoint(lavmodel = lavmodel, # GLIST = GLIST, # extra = (estimator %in% c("ML", "REML","NTRLS"))) # } else { Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, extra = (estimator %in% c( "ML", "REML", "NTRLS", "catML" )) ) # } if (meanstructure) { # if(conditional.x) { # Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) # } else { Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) # } } if (categorical) { TH <- computeTH(lavmodel = lavmodel, GLIST = GLIST) } if (conditional.x) { PI <- computePI(lavmodel = lavmodel, GLIST = GLIST) } else if (estimator == "PML") { PI <- vector("list", length = lavmodel@nblocks) } if (group.w.free) { GW <- computeGW(lavmodel = lavmodel, GLIST = GLIST) } } else if (estimator == "DLS" && estimator.args$dls.GammaNT == "model") { Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, extra = FALSE ) Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) } else if (estimator == "MML") { TH <- computeTH(lavmodel = lavmodel, GLIST = GLIST) THETA <- computeTHETA(lavmodel = lavmodel, GLIST = GLIST) GW <- computeGW(lavmodel = lavmodel, GLIST = GLIST) } # four approaches (FIXME!!!! merge this!) # - ML approach: using Omega (and Omega.mu) # Omega = 'POST' = Sigma.inv %*% (S - Sigma) %*% t(Sigma.inv) # (still 2x faster than Delta method) # - WLS/DWLS/GLS: using Delta + WLS.V; support for fixed.x, conditional.x # - (ML)/NTRLS: using Delta, no support for fixed.x, conditional.x # - PML/FML/MML: custom # 1. ML approach if ((estimator == "ML" || estimator == "REML" || estimator == "catML") && lavdata@nlevels == 1L && !lavmodel@conditional.x) { correlation <- FALSE if (.hasSlot(lavmodel, "correlation") && lavmodel@correlation) { correlation <- TRUE } if (meanstructure) { Omega <- computeOmega( Sigma.hat = Sigma.hat, Mu.hat = Mu.hat, lavsamplestats = lavsamplestats, estimator = estimator, meanstructure = TRUE, conditional.x = conditional.x, correlation = correlation ) Omega.mu <- attr(Omega, "mu") } else { Omega <- computeOmega( Sigma.hat = Sigma.hat, Mu.hat = NULL, lavsamplestats = lavsamplestats, estimator = estimator, meanstructure = FALSE, conditional.x = conditional.x, correlation = correlation ) Omega.mu <- vector("list", length = lavmodel@nblocks) } # compute DX (for all elements in every model matrix) DX <- vector("list", length = length(GLIST)) for (g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] mm.names <- names(GLIST[mm.in.group]) if (representation == "LISREL") { DX.group <- derivative.F.LISREL( GLIST[mm.in.group], Omega[[g]], Omega.mu[[g]] ) # FIXME!!! # add empty gamma if (lavmodel@conditional.x) { DX.group$gamma <- lavmodel@GLIST$gamma } # only save what we need DX[mm.in.group] <- DX.group[mm.names] } else if (representation == "RAM") { DX.group <- lav_ram_df( GLIST[mm.in.group], Omega[[g]], Omega.mu[[g]] ) # only save what we need DX[mm.in.group] <- DX.group[mm.names] } else { lav_msg_stop(gettext( "only LISREL and RAM representation has been implemented for now")) } # weight by group if (lavmodel@nblocks > 1L) { for (mm in mm.in.group) { DX[[mm]] <- group.w[g] * DX[[mm]] } } } # extract free parameters if (type == "free") { if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { # new in 0.6-11 dx <- numeric(lavmodel@nx.unco) for (g in 1:lavmodel@nblocks) { mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] for (mm in mm.in.group) { m.free.idx <- lavmodel@m.free.idx[[mm]] x.unco.idx <- lavmodel@x.unco.idx[[mm]] dx[x.unco.idx] <- DX[[mm]][m.free.idx] } } if (ceq.simple) { dx <- drop(crossprod(lavmodel@ceq.simple.K, dx)) } } else { dx <- numeric(nx.free) for (g in 1:lavmodel@nblocks) { mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] for (mm in mm.in.group) { m.free.idx <- lavmodel@m.free.idx[[mm]] x.free.idx <- lavmodel@x.free.idx[[mm]] dx[x.free.idx] <- DX[[mm]][m.free.idx] } } } } else { dx <- DX # handle equality constraints ### FIXME!!!! TODO!!!! } } else # ML # 2. using Delta - *LS family if (estimator %in% c("WLS", "DWLS", "ULS", "GLS", "NTGLS", "DLS")) { if (type != "free") { if (is.null(Delta)) { lav_msg_fixme("Delta should be given if type != free") } # stop("FIXME: WLS gradient with type != free needs fixing!") } else { Delta <- computeDelta( lavmodel = lavmodel, GLIST. = GLIST, ceq.simple = ceq.simple ) } for (g in 1:lavmodel@nblocks) { # diff <- as.matrix(lavsamplestats@WLS.obs[[g]] - WLS.est[[g]]) # group.dx <- -1 * ( t(Delta[[g]]) %*% lavsamplestats@WLS.V[[g]] %*% diff) # 0.5-17: use crossprod twice; treat DWLS/ULS special if (estimator == "WLS" || estimator == "GLS" || estimator == "DLS" || estimator == "NTRLS") { # full weight matrix diff <- lavsamplestats@WLS.obs[[g]] - WLS.est[[g]] # full weight matrix if (estimator == "GLS" || estimator == "WLS") { WLS.V <- lavsamplestats@WLS.V[[g]] group.dx <- -1 * crossprod( Delta[[g]], crossprod(WLS.V, diff) ) } else if (estimator == "DLS") { if (estimator.args$dls.GammaNT == "sample") { WLS.V <- lavsamplestats@WLS.V[[g]] # for now } else { dls.a <- estimator.args$dls.a GammaNT <- lav_samplestats_Gamma_NT( COV = Sigma.hat[[g]], MEAN = Mu.hat[[g]], rescale = FALSE, x.idx = lavsamplestats@x.idx[[g]], fixed.x = lavmodel@fixed.x, conditional.x = lavmodel@conditional.x, meanstructure = lavmodel@meanstructure, slopestructure = lavmodel@conditional.x ) W.DLS <- (1 - dls.a) * lavsamplestats@NACOV[[g]] + dls.a * GammaNT WLS.V <- lav_matrix_symmetric_inverse(W.DLS) } group.dx <- -1 * crossprod( Delta[[g]], crossprod(WLS.V, diff) ) } else if (estimator == "NTRLS") { stopifnot(!conditional.x) # WLS.V <- lav_samplestats_Gamma_inverse_NT( # ICOV = attr(Sigma.hat[[g]],"inv")[,,drop=FALSE], # COV = Sigma.hat[[g]][,,drop=FALSE], # MEAN = Mu.hat[[g]], # x.idx = lavsamplestats@x.idx[[g]], # fixed.x = fixed.x, # conditional.x = conditional.x, # meanstructure = meanstructure, # slopestructure = conditional.x) S <- lavsamplestats@cov[[g]] Sigma <- Sigma.hat[[g]] Sigma.inv <- attr(Sigma, "inv") nvar <- NROW(Sigma) if (meanstructure) { MEAN <- lavsamplestats@mean[[g]] Mu <- Mu.hat[[g]] POST.Sigma <- lav_matrix_duplication_pre( matrix( (Sigma.inv %*% (S - Sigma) %*% t(Sigma.inv)) %*% (diag(nvar) + (S - Sigma) %*% Sigma.inv) + (Sigma.inv %*% tcrossprod(MEAN - Mu) %*% Sigma.inv), ncol = 1 ) ) POST.Mu <- as.numeric(2 * Sigma.inv %*% (MEAN - Mu)) POST <- c(POST.Mu, POST.Sigma) } else { POST <- lav_matrix_duplication_pre( matrix((Sigma.inv %*% (S - Sigma) %*% t(Sigma.inv)) %*% (diag(nvar) + (S - Sigma) %*% Sigma.inv), ncol = 1) ) } group.dx <- as.numeric(-1 * crossprod(Delta[[g]], POST)) } } else if (estimator == "DWLS" || estimator == "ULS") { # diagonal weight matrix diff <- lavsamplestats@WLS.obs[[g]] - WLS.est[[g]] group.dx <- -1 * crossprod( Delta[[g]], lavsamplestats@WLS.VD[[g]] * diff ) } group.dx <- group.w[g] * group.dx if (g == 1) { dx <- group.dx } else { dx <- dx + group.dx } } # g if (type == "free") { # nothing to do } else { # make a GLIST dx <- lav_model_x2GLIST( lavmodel = lavmodel, x = dx, type = "custom", setDelta = FALSE, m.el.idx = m.el.idx, x.el.idx = x.el.idx ) } } # WLS # ML + conditional.x else if (estimator %in% c("ML", "catML") && lavmodel@conditional.x && lavdata@nlevels == 1L) { if (type != "free") { if (is.null(Delta)) { lav_msg_fixme("Delta should be given if type != free") } # stop("FIXME: WLS gradient with type != free needs fixing!") } else { Delta <- computeDelta( lavmodel = lavmodel, GLIST. = GLIST, ceq.simple = ceq.simple ) } for (g in 1:lavmodel@nblocks) { # augmented mean.x + cov.x matrix mean.x <- lavsamplestats@mean.x[[g]] cov.x <- lavsamplestats@cov.x[[g]] C3 <- rbind( c(1, mean.x), cbind(mean.x, cov.x + tcrossprod(mean.x)) ) Sigma <- Sigma.hat[[g]] Mu.g <- Mu.hat[[g]] PI.g <- PI[[g]] Sigma.inv <- attr(Sigma, "inv") nvar <- NROW(Sigma) S <- lavsamplestats@res.cov[[g]] # beta OBS <- t(cbind( lavsamplestats@res.int[[g]], lavsamplestats@res.slopes[[g]] )) EST <- t(cbind(Mu.g, PI.g)) # obs.beta <- c(lavsamplestats@res.int[[g]], # lav_matrix_vec(lavsamplestats@res.slopes[[g]])) # est.beta <- c(Mu.g, lav_matrix_vec(PI.g)) # beta.COV <- C3 %x% Sigma.inv # a <- t(obs.beta - est.beta) # b <- as.matrix(obs.beta - est.beta) # K <- lav_matrix_commutation(m = nvar, n = nvar) # AB <- (K %x% diag(NROW(C3)*NROW(C3))) %*% # (diag(nvar) %x% lav_matrix_vec(C3) %x% diag(nvar)) # K <- lav_matrix_commutation(m = nvar, n = NROW(C3)) # AB <- ( diag(NROW(C3)) %x% K %x% diag(nvar) ) %*% # (lav_matrix_vec(C3) %x% diag( nvar * nvar) ) # POST.beta <- 2 * beta.COV %*% (obs.beta - est.beta) d.BETA <- C3 %*% (OBS - EST) %*% Sigma.inv # NOTE: the vecr here, unlike lav_mvreg_dlogl_beta # this is because DELTA has used vec(t(BETA)), # instead of vec(BETA) # POST.beta <- 2 * lav_matrix_vecr(d.BETA) # NOT any longer, since 0.6-1!!! POST.beta <- 2 * lav_matrix_vec(d.BETA) # POST.sigma1 <- lav_matrix_duplication_pre( # (Sigma.inv %x% Sigma.inv) %*% t(AB) %*% (t(a) %x% b) ) # Sigma # POST.sigma2 <- lav_matrix_duplication_pre( # matrix( lav_matrix_vec( # Sigma.inv %*% (S - Sigma) %*% t(Sigma.inv)), ncol = 1L)) W.tilde <- S + t(OBS - EST) %*% C3 %*% (OBS - EST) d.SIGMA <- (Sigma.inv - Sigma.inv %*% W.tilde %*% Sigma.inv) d.vechSigma <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(d.SIGMA)) )) POST.sigma <- -1 * d.vechSigma # POST <- c(POST.beta, POST.sigma1 + POST.sigma2) POST <- c(POST.beta, POST.sigma) group.dx <- as.numeric(-1 * crossprod(Delta[[g]], POST)) # because we still use obj/2, we need to divide by 2! group.dx <- group.dx / 2 # fixed in 0.6-1 group.dx <- group.w[g] * group.dx if (g == 1) { dx <- group.dx } else { dx <- dx + group.dx } } # g if (type == "free") { # nothing to do } else { # make a GLIST dx <- lav_model_x2GLIST( lavmodel = lavmodel, x = dx, type = "custom", setDelta = FALSE, m.el.idx = m.el.idx, x.el.idx = x.el.idx ) } } # ML + conditional.x else if (estimator == "ML" && lavdata@nlevels > 1L) { if (type != "free") { lav_msg_fixme("type != free in lav_model_gradient for estimator ML for nlevels > 1") } else { Delta <- computeDelta( lavmodel = lavmodel, GLIST. = GLIST, ceq.simple = ceq.simple ) } # for each upper-level group.... for (g in 1:lavmodel@ngroups) { if (!lavsamplestats@missing.flag) { # complete data if (lavmodel@conditional.x) { DX <- lav_mvreg_cluster_dlogl_2l_samplestats( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Res.Sigma.W = Sigma.hat[[(g - 1) * 2 + 1]], Res.Int.W = Mu.hat[[(g - 1) * 2 + 1]], Res.Pi.W = PI[[(g - 1) * 2 + 1]], Res.Sigma.B = Sigma.hat[[(g - 1) * 2 + 2]], Res.Int.B = Mu.hat[[(g - 1) * 2 + 2]], Res.Pi.B = PI[[(g - 1) * 2 + 2]], Sinv.method = "eigen" ) } else { DX <- lav_mvnorm_cluster_dlogl_2l_samplestats( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Mu.W = Mu.hat[[(g - 1) * 2 + 1]], Sigma.W = Sigma.hat[[(g - 1) * 2 + 1]], Mu.B = Mu.hat[[(g - 1) * 2 + 2]], Sigma.B = Sigma.hat[[(g - 1) * 2 + 2]], Sinv.method = "eigen" ) } } else { # missing data if (lavmodel@conditional.x) { lav_msg_stop(gettext("gradient for twolevel + conditional.x + fiml is not ready; use optim.gradient = \"numerical\"")) } else { DX <- lav_mvnorm_cluster_missing_dlogl_2l_samplestats( Y1 = lavdata@X[[g]], Y2 = lavsamplestats@YLp[[g]][[2]]$Y2, Lp = lavdata@Lp[[g]], Mp = lavdata@Mp[[g]], Mu.W = Mu.hat[[(g - 1) * 2 + 1]], Sigma.W = Sigma.hat[[(g - 1) * 2 + 1]], Mu.B = Mu.hat[[(g - 1) * 2 + 2]], Sigma.B = Sigma.hat[[(g - 1) * 2 + 2]], Sinv.method = "eigen" ) } } group.dx <- as.numeric(DX %*% Delta[[g]]) # group weights (if any) group.dx <- group.w[g] * group.dx if (g == 1) { dx <- group.dx } else { dx <- dx + group.dx } } # g # divide by 2 * N dx <- dx / (2 * lavsamplestats@ntotal) # cat("dx1 (numerical) = \n"); print( zapsmall(dx1) ) # cat("dx (analytic) = \n"); print( zapsmall(dx ) ) } # ML + two-level else if (estimator == "PML" || estimator == "FML" || estimator == "MML") { if (type != "free") { lav_msg_fixme("type != free in lav_model_gradient for estimator PML") } else { Delta <- computeDelta( lavmodel = lavmodel, GLIST. = GLIST, ceq.simple = ceq.simple ) } for (g in 1:lavmodel@nblocks) { # print(GLIST) # print(lav_model_get_parameters(lavmodel = lavmodel, GLIST = GLIST)) # print(Sigma.hat[[g]]) # print(TH[[g]]) # cat("*****\n") # compute partial derivative of logLik with respect to # thresholds/means, slopes, variances, correlations if (estimator == "PML") { if (lavdata@nlevels > 1L) { lav_msg_stop(gettext( "PL gradient + multilevel not implemented; try optim.gradient = \"numerical\"")) } else if (conditional.x) { d1 <- pml_deriv1( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], TH = TH[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache[[g]], eXo = lavdata@eXo[[g]], wt = lavdata@weights[[g]], PI = PI[[g]], missing = lavdata@missing ) } else { d1 <- pml_deriv1( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], TH = TH[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache[[g]], eXo = NULL, wt = lavdata@weights[[g]], PI = NULL, missing = lavdata@missing ) } # not conditional.x # chain rule (fmin) group.dx <- as.numeric(t(d1) %*% Delta[[g]]) } # PML else if (estimator == "FML") { d1 <- fml_deriv1( Sigma.hat = Sigma.hat[[g]], TH = TH[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache[[g]] ) # chain rule (fmin) group.dx <- as.numeric(t(d1) %*% Delta[[g]]) / lavsamplestats@nobs[[g]] } else if (estimator == "MML") { group.dx <- lav_model_gradient_mml( lavmodel = lavmodel, GLIST = GLIST, THETA = THETA[[g]], TH = TH[[g]], group = g, lavdata = lavdata, sample.mean = lavsamplestats@mean[[g]], sample.mean.x = lavsamplestats@mean.x[[g]], lavcache = lavcache ) } # group weights (if any) group.dx <- group.w[g] * group.dx if (g == 1) { dx <- group.dx } else { dx <- dx + group.dx } } # g } else { lav_msg_stop(gettext( "no analytical gradient available for estimator"), estimator) } # group.w.free for ML if (lavmodel@group.w.free && estimator %in% c("ML", "MML", "FML", "PML", "REML", "catML")) { # est.prop <- unlist( computeGW(lavmodel = lavmodel, GLIST = GLIST) ) # obs.prop <- unlist(lavsamplestats@group.w) # FIXME: G2 based -- ML and friends only!! # dx.GW <- - (obs.prop - est.prop) # poisson version est.freq <- exp(unlist(computeGW(lavmodel = lavmodel, GLIST = GLIST))) obs.freq <- unlist(lavsamplestats@group.w) * lavsamplestats@ntotal dx.GW <- -(obs.freq - est.freq) # divide by N (to be consistent with the rest of lavaan) dx.GW <- dx.GW / lavsamplestats@ntotal # remove last element (fixed LAST group to zero) # dx.GW <- dx.GW[-length(dx.GW)] # fill in in dx gw.mat.idx <- which(names(lavmodel@GLIST) == "gw") gw.x.idx <- unlist(lavmodel@x.free.idx[gw.mat.idx]) dx[gw.x.idx] <- dx.GW } # dx is 1xnpar matrix of LIST (type != "free") if (is.matrix(dx)) { dx <- as.numeric(dx) } dx } # for testing purposes only # computeDeltaNumerical <- function(lavmodel = NULL, GLIST = NULL, g = 1L) { # # # state or final? # if(is.null(GLIST)) GLIST <- lavmodel@GLIST # # compute.moments <- function(x) { # GLIST <- lav_model_x2GLIST(lavmodel = lavmodel, x=x, type="free") # Sigma.hat <- computeSigmaHat(lavmodel = lavmodel, GLIST = GLIST) # S.vec <- lav_matrix_vech(Sigma.hat[[g]]) # if(lavmodel@meanstructure) { # Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST=GLIST) # out <- c(Mu.hat[[g]], S.vec) # } else { # out <- S.vec # } # out # } # # x <- lav_model_get_parameters(lavmodel = lavmodel, GLIST=GLIST, type="free") # Delta <- lav_func_jacobian_complex(func=compute.moments, x = x) # # Delta # } ### FIXME: should we here also: ### - weight for groups? (no, for now) ### - handle equality constraints? (yes, for now) computeDelta <- function(lavmodel = NULL, GLIST. = NULL, m.el.idx. = NULL, x.el.idx. = NULL, ceq.simple = FALSE, force.conditional.x.false = FALSE) { representation <- lavmodel@representation categorical <- lavmodel@categorical if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } else { correlation <- FALSE } conditional.x <- lavmodel@conditional.x group.w.free <- lavmodel@group.w.free nmat <- lavmodel@nmat nblocks <- lavmodel@nblocks nvar <- lavmodel@nvar num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx nexo <- lavmodel@nexo parameterization <- lavmodel@parameterization # number of thresholds per group (if any) nth <- sapply(th.idx, function(x) sum(x > 0L)) # state or final? if (is.null(GLIST.)) { GLIST <- lavmodel@GLIST } else { GLIST <- GLIST. } # type = "free" or something else? type <- "nonfree" m.el.idx <- m.el.idx. x.el.idx <- x.el.idx. if (is.null(m.el.idx) && is.null(x.el.idx)) { type <- "free" } # number of rows in DELTA.group pstar <- integer(nblocks) for (g in 1:nblocks) { pstar[g] <- as.integer(nvar[g] * (nvar[g] + 1) / 2) if (lavmodel@meanstructure) { pstar[g] <- nvar[g] + pstar[g] # first the means, then sigma } if (categorical) { pstar[g] <- pstar[g] - nvar[g] # remove variances pstar[g] <- pstar[g] - nvar[g] # remove means pstar[g] <- pstar[g] + nth[g] # add thresholds pstar[g] <- pstar[g] + length(num.idx[[g]]) # add num means pstar[g] <- pstar[g] + length(num.idx[[g]]) # add num vars } else if (correlation) { pstar[g] <- pstar[g] - nvar[g] # remove variances } if (conditional.x && nexo[g] > 0L) { pstar[g] <- pstar[g] + (nvar[g] * nexo[g]) # add slopes } if (group.w.free) { pstar[g] <- pstar[g] + 1L # add group weight } } # number of columns in DELTA + m.el.idx/x.el.idx if (type == "free") { if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { NCOL <- lavmodel@nx.unco } else { NCOL <- lavmodel@nx.free } m.el.idx <- x.el.idx <- vector("list", length = length(GLIST)) for (mm in 1:length(GLIST)) { m.el.idx[[mm]] <- lavmodel@m.free.idx[[mm]] if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { x.el.idx[[mm]] <- lavmodel@x.unco.idx[[mm]] } else { x.el.idx[[mm]] <- lavmodel@x.free.idx[[mm]] } # handle symmetric matrices if (lavmodel@isSymmetric[mm]) { # since we use 'x.free.idx', only symmetric elements # are duplicated (not the equal ones, only in x.free.free) dix <- duplicated(x.el.idx[[mm]]) if (any(dix)) { m.el.idx[[mm]] <- m.el.idx[[mm]][!dix] x.el.idx[[mm]] <- x.el.idx[[mm]][!dix] } } } } else { ## FIXME: this does *not* take into account symmetric ## matrices; hence NCOL will be too large, and empty ## columns will be added ## this is ugly, but it doesn't hurt ## alternative could be: ## NCOL <- sum(unlist(lapply(x.el.idx, function(x) length(unique(x))))) # NCOL <- sum(unlist(lapply(m.el.idx, length))) NCOL <- sum(unlist(lapply(x.el.idx, function(x) length(unique(x))))) # sanity check # nx <- sum(unlist(lapply(x.el.idx, length))) # stopifnot(NCOL == nx) } # compute Delta Delta <- vector("list", length = nblocks) for (g in 1:nblocks) { Delta.group <- matrix(0, nrow = pstar[g], ncol = NCOL) # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] # label rows of Delta.group --- FIXME!!! # if(categorical) { # # 1. th (means interleaved?) # # 2. pi # # 3. var num + cor # } else { # if(meanstructure) { # } # } # if(group.w.free) { # } # if theta, do some preparation if (representation == "LISREL" && parameterization == "theta") { sigma.hat <- computeSigmaHat.LISREL( MLIST = GLIST[mm.in.group], delta = FALSE ) dsigma <- diag(sigma.hat) # dcor/dcov for sigma R <- lav_deriv_cov2cor(sigma.hat, num.idx = lavmodel@num.idx[[g]]) theta.var.idx <- lav_matrix_diagh_idx(nvar[g]) } for (mm in mm.in.group) { mname <- names(lavmodel@GLIST)[mm] # skip empty ones if (!length(m.el.idx[[mm]])) next # get Delta columns for this model matrix if (representation == "LISREL") { # Sigma DELTA <- dxSigma <- derivative.sigma.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group], delta = parameterization == "delta" ) if (categorical && parameterization == "theta") { DELTA <- R %*% DELTA } if (categorical) { # reorder: first variances (of numeric), then covariances cov.idx <- lav_matrix_vech_idx(nvar[g]) covd.idx <- lav_matrix_vech_idx(nvar[g], diagonal = FALSE) var.idx <- which(is.na(match( cov.idx, covd.idx )))[num.idx[[g]]] cor.idx <- match(covd.idx, cov.idx) DELTA <- rbind( DELTA[var.idx, , drop = FALSE], DELTA[cor.idx, , drop = FALSE] ) } # correlation structure? if (!categorical && correlation) { rm.idx <- lav_matrix_diagh_idx(nvar[g]) DELTA <- DELTA[-rm.idx, , drop = FALSE] } if (!categorical) { if (conditional.x) { # means/intercepts DELTA.mu <- derivative.mu.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) # slopes if (lavmodel@nexo[g] > 0L) { DELTA.pi <- derivative.pi.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) if (lavmodel@multilevel) { DELTA <- rbind(DELTA.mu, DELTA.pi, DELTA) } else { # ATTENTION: we need to change the order here # lav_mvreg_scores_* uses 'Beta' where the # the intercepts are just the first row # using the col-major approach, we need to # interweave the intercepts with the slopes! nEls <- NROW(DELTA.mu) + NROW(DELTA.pi) # = (nexo + 1 int) * nvar # intercepts on top tmp <- rbind(DELTA.mu, DELTA.pi) # change row index row.idx <- lav_matrix_vec(matrix(seq.int(nEls), nrow = lavmodel@nexo[g] + 1L, ncol = lavmodel@nvar[g], byrow = TRUE )) DELTA.beta <- tmp[row.idx, , drop = FALSE] DELTA <- rbind(DELTA.beta, DELTA) } } else { DELTA <- rbind(DELTA.mu, DELTA) } } else if (!conditional.x && lavmodel@meanstructure) { DELTA.mu <- derivative.mu.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) DELTA <- rbind(DELTA.mu, DELTA) } } else if (categorical) { DELTA.th <- derivative.th.LISREL( m = mname, idx = m.el.idx[[mm]], th.idx = th.idx[[g]], MLIST = GLIST[mm.in.group], delta = TRUE ) if (parameterization == "theta") { # dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma)) dDelta.dx <- (dxSigma[theta.var.idx, , drop = FALSE] * -0.5 / (dsigma * sqrt(dsigma))) dth.dDelta <- derivative.th.LISREL( m = "delta", idx = 1:nvar[g], MLIST = GLIST[mm.in.group], th.idx = th.idx[[g]] ) # add dth.dDelta %*% dDelta.dx no.num.idx <- which(th.idx[[g]] > 0) DELTA.th[no.num.idx, ] <- DELTA.th[no.num.idx, , drop = FALSE] + (dth.dDelta %*% dDelta.dx)[no.num.idx, , drop = FALSE] } if (conditional.x && lavmodel@nexo[g] > 0L) { DELTA.pi <- derivative.pi.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) if (parameterization == "theta") { dpi.dDelta <- derivative.pi.LISREL( m = "delta", idx = 1:nvar[g], MLIST = GLIST[mm.in.group] ) # add dpi.dDelta %*% dDelta.dx no.num.idx <- which(!seq.int(1L, nvar[g]) %in% num.idx[[g]]) no.num.idx <- rep(seq.int(0, nexo[g] - 1) * nvar[g], each = length(no.num.idx) ) + no.num.idx DELTA.pi[no.num.idx, ] <- DELTA.pi[no.num.idx, , drop = FALSE] + (dpi.dDelta %*% dDelta.dx)[no.num.idx, , drop = FALSE] } DELTA <- rbind(DELTA.th, DELTA.pi, DELTA) } else { DELTA <- rbind(DELTA.th, DELTA) } } if (group.w.free) { DELTA.gw <- derivative.gw.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) DELTA <- rbind(DELTA.gw, DELTA) } } else if (representation == "RAM") { DELTA <- dxSigma <- lav_ram_dsigma( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) if (lavmodel@meanstructure) { DELTA.mu <- lav_ram_dmu( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) DELTA <- rbind(DELTA.mu, DELTA) } } else { lav_msg_stop(gettextf("representation %s not implemented yet", representation)) } Delta.group[, x.el.idx[[mm]]] <- DELTA } # mm # if type == "free" take care of equality constraints if (type == "free" && ceq.simple && .hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { Delta.group <- Delta.group %*% lavmodel@ceq.simple.K } Delta[[g]] <- Delta.group } # g # if multilevel, rbind levels within group if (.hasSlot(lavmodel, "multilevel") && lavmodel@multilevel) { DELTA <- vector("list", length = lavmodel@ngroups) for (g in 1:lavmodel@ngroups) { DELTA[[g]] <- rbind( Delta[[(g - 1) * 2 + 1]], Delta[[(g - 1) * 2 + 2]] ) } Delta <- DELTA } Delta } computeDeltaDx <- function(lavmodel = NULL, GLIST = NULL, target = "lambda", ceq.simple = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST representation <- lavmodel@representation nmat <- lavmodel@nmat nblocks <- lavmodel@nblocks th.idx <- lavmodel@th.idx # number of columns in DELTA + m.el.idx/x.el.idx type <- "free" # if(type == "free") { if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { NCOL <- lavmodel@nx.unco } else { NCOL <- lavmodel@nx.free } m.el.idx <- x.el.idx <- vector("list", length = length(GLIST)) for (mm in 1:length(GLIST)) { m.el.idx[[mm]] <- lavmodel@m.free.idx[[mm]] if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { x.el.idx[[mm]] <- lavmodel@x.unco.idx[[mm]] } else { x.el.idx[[mm]] <- lavmodel@x.free.idx[[mm]] } # handle symmetric matrices if (lavmodel@isSymmetric[mm]) { # since we use 'x.free.idx', only symmetric elements # are duplicated (not the equal ones, only in x.free.free) dix <- duplicated(x.el.idx[[mm]]) if (any(dix)) { m.el.idx[[mm]] <- m.el.idx[[mm]][!dix] x.el.idx[[mm]] <- x.el.idx[[mm]][!dix] } } } # } else { # NCOL <- sum(unlist(lapply(x.el.idx, function(x) length(unique(x))))) # } # compute Delta per group Delta <- vector("list", length = nblocks) for (g in 1:nblocks) { mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] Delta.group <- NULL for (mm in mm.in.group) { mname <- names(lavmodel@GLIST)[mm] # skip empty ones if (!length(m.el.idx[[mm]])) next # get Delta columns for this model matrix if (representation == "LISREL") { if (target == "lambda") { DELTA <- derivative.lambda.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "th") { DELTA <- derivative.th.LISREL( m = mname, th.idx = th.idx[[g]], idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group], delta = TRUE ) } else if (target == "mu") { DELTA <- derivative.mu.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "nu") { DELTA <- derivative.nu.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "tau") { DELTA <- derivative.tau.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "theta") { DELTA <- derivative.theta.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "gamma") { DELTA <- derivative.gamma.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "beta") { DELTA <- derivative.beta.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "alpha") { DELTA <- derivative.alpha.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "psi") { DELTA <- derivative.psi.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group] ) } else if (target == "sigma") { DELTA <- derivative.sigma.LISREL( m = mname, idx = m.el.idx[[mm]], MLIST = GLIST[mm.in.group], delta = TRUE ) } else { lav_msg_stop(gettextf("target %s not implemented yet", target)) } # initialize? if (is.null(Delta.group)) { Delta.group <- matrix(0, nrow = nrow(DELTA), ncol = NCOL) } Delta.group[, x.el.idx[[mm]]] <- DELTA } } # mm if (type == "free" && ceq.simple && .hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { Delta.group <- Delta.group %*% lavmodel@ceq.simple.K } Delta[[g]] <- Delta.group } # g Delta } computeOmega <- function(Sigma.hat = NULL, Mu.hat = NULL, lavsamplestats = NULL, estimator = "ML", meanstructure = FALSE, conditional.x = FALSE, correlation = FALSE) { # nblocks nblocks <- length(Sigma.hat) Omega <- vector("list", length = nblocks) Omega.mu <- vector("list", length = nblocks) for (g in 1:nblocks) { # ML if (estimator %in% c("ML", "REML", "catML")) { if (attr(Sigma.hat[[g]], "po") == FALSE) { # FIXME: WHAT IS THE BEST THING TO DO HERE?? # CURRENTLY: stop lav_msg_warn(gettext( "lav_model_gradient: Sigma.hat is not positive definite\n")) Sigma.hat.inv <- MASS::ginv(Sigma.hat[[g]]) } else { Sigma.hat.inv <- attr(Sigma.hat[[g]], "inv") } if (!lavsamplestats@missing.flag) { # complete data if (meanstructure) { if (conditional.x) { diff <- lavsamplestats@res.int[[g]] - Mu.hat[[g]] W.tilde <- lavsamplestats@res.cov[[g]] + tcrossprod(diff) } else { diff <- lavsamplestats@mean[[g]] - Mu.hat[[g]] W.tilde <- lavsamplestats@cov[[g]] + tcrossprod(diff) } # Browne 1995 eq 4.55 Omega.mu[[g]] <- t(t(diff) %*% Sigma.hat.inv) Omega[[g]] <- (Sigma.hat.inv %*% (W.tilde - Sigma.hat[[g]]) %*% Sigma.hat.inv) } else { if (conditional.x) { W.tilde <- lavsamplestats@res.cov[[g]] } else { W.tilde <- lavsamplestats@cov[[g]] } Omega[[g]] <- (Sigma.hat.inv %*% (W.tilde - Sigma.hat[[g]]) %*% Sigma.hat.inv) } } else { # missing data M <- lavsamplestats@missing[[g]] nvar <- ncol(lavsamplestats@cov[[g]]) OMEGA <- matrix(0, nvar, nvar) OMEGA.MU <- matrix(0, nvar, 1) for (p in 1:length(M)) { SX <- M[[p]][["SY"]] MX <- M[[p]][["MY"]] nobs <- M[[p]][["freq"]] var.idx <- M[[p]][["var.idx"]] Sigma.inv <- inv.chol(Sigma.hat[[g]][var.idx, var.idx], logdet = FALSE ) Mu <- Mu.hat[[g]][var.idx] W.tilde <- SX + tcrossprod(MX - Mu) OMEGA.MU[var.idx, 1] <- (OMEGA.MU[var.idx, 1] + nobs / lavsamplestats@ntotal * t(t(MX - Mu) %*% Sigma.inv)) OMEGA[var.idx, var.idx] <- (OMEGA[var.idx, var.idx] + nobs / lavsamplestats@ntotal * (Sigma.inv %*% (W.tilde - Sigma.hat[[g]][var.idx, var.idx]) %*% Sigma.inv)) } Omega.mu[[g]] <- OMEGA.MU Omega[[g]] <- OMEGA } # missing # GLS } else if (estimator == "GLS") { W.inv <- lavsamplestats@icov[[g]] W <- lavsamplestats@cov[[g]] Omega[[g]] <- (lavsamplestats@nobs[[g]] - 1) / lavsamplestats@nobs[[g]] * (W.inv %*% (W - Sigma.hat[[g]]) %*% W.inv) if (meanstructure) { diff <- as.matrix(lavsamplestats@mean[[g]] - Mu.hat[[g]]) Omega.mu[[g]] <- t(t(diff) %*% W.inv) } } # new in 0.6-18 if(correlation) { diag(Omega[[g]]) <- 0 } } # g if (meanstructure) attr(Omega, "mu") <- Omega.mu Omega } lav_model_gradient_DD <- function(lavmodel, GLIST = NULL, group = 1L) { if (is.null(GLIST)) GLIST <- lavmodel@GLIST #### FIX th + mu!!!!! Delta.lambda <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "lambda")[[group]] Delta.tau <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "tau")[[group]] Delta.nu <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "nu")[[group]] Delta.theta <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "theta")[[group]] Delta.beta <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "beta")[[group]] Delta.psi <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "psi")[[group]] Delta.alpha <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "alpha")[[group]] Delta.gamma <- computeDeltaDx(lavmodel, GLIST = GLIST, target = "gamma")[[group]] ov.y.dummy.ov.idx <- lavmodel@ov.y.dummy.ov.idx[[group]] ov.x.dummy.ov.idx <- lavmodel@ov.x.dummy.ov.idx[[group]] ov.y.dummy.lv.idx <- lavmodel@ov.y.dummy.lv.idx[[group]] ov.x.dummy.lv.idx <- lavmodel@ov.x.dummy.lv.idx[[group]] ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) th.idx <- lavmodel@th.idx[[group]] num.idx <- lavmodel@num.idx[[group]] ord.idx <- unique(th.idx[th.idx > 0L]) # fix Delta's... mm.in.group <- 1:lavmodel@nmat[group] + cumsum(c(0, lavmodel@nmat))[group] MLIST <- GLIST[mm.in.group] DD <- list() nvar <- lavmodel@nvar nfac <- ncol(MLIST$lambda) - length(lv.dummy.idx) # DD$theta theta.idx <- lav_matrix_diagh_idx(nvar) DD$theta <- Delta.theta[theta.idx, , drop = FALSE] if (length(ov.dummy.idx) > 0L) { psi.idx <- lav_matrix_diagh_idx(ncol(MLIST$psi))[lv.dummy.idx] DD$theta[ov.dummy.idx, ] <- Delta.psi[psi.idx, , drop = FALSE] } # num only? FIXME or just all of them? DD$theta <- DD$theta[num.idx, , drop = FALSE] # DD$nu DD$nu <- Delta.nu if (length(ov.dummy.idx) > 0L) { DD$nu[ov.dummy.idx, ] <- Delta.alpha[lv.dummy.idx, ] } DD$nu <- DD$nu[num.idx, , drop = FALSE] # needed? # DD$lambda nr <- nvar nc <- nfac lambda.idx <- nr * ((1:nc) - 1L) + rep(1:nvar, each = nc) DD$lambda <- Delta.lambda[lambda.idx, , drop = FALSE] if (length(ov.dummy.idx) > 0L) { nr <- nrow(MLIST$beta) nc <- nfac # only the first 1:nfac columns # beta.idx <- rep(nr*((1:nc) - 1L), each=length(lv.dummy.idx)) + rep(lv.dummy.idx, times=nc) ## FIXME beta.idx <- rep(nr * ((1:nc) - 1L), times = length(lv.dummy.idx)) + rep(lv.dummy.idx, each = nc) # l.idx <- inr*((1:nc) - 1L) + rep(ov.dummy.idx, each=nc) ## FIXME # l.idx <- rep(nr*((1:nc) - 1L), each=length(ov.dummy.idx)) + rep(ov.dummy.idx, times=nc) l.idx <- rep(nr * ((1:nc) - 1L), times = length(ov.dummy.idx)) + rep(ov.dummy.idx, each = nc) DD$lambda[match(l.idx, lambda.idx), ] <- Delta.beta[beta.idx, , drop = FALSE] } # DD$KAPPA DD$kappa <- Delta.gamma if (length(ov.dummy.idx) > 0L) { nr <- nrow(MLIST$gamma) nc <- ncol(MLIST$gamma) kappa.idx <- nr * ((1:nc) - 1L) + rep(lv.dummy.idx, each = nc) DD$kappa <- DD$kappa[kappa.idx, , drop = FALSE] } # DD$GAMMA if (!is.null(MLIST$gamma)) { nr <- nrow(MLIST$gamma) nc <- ncol(MLIST$gamma) lv.idx <- 1:nfac # MUST BE ROWWISE! gamma.idx <- rep(nr * ((1:nc) - 1L), times = length(lv.idx)) + rep(lv.idx, each = nc) DD$gamma <- Delta.gamma[gamma.idx, , drop = FALSE] } # DD$BETA if (!is.null(MLIST$beta)) { nr <- nc <- nrow(MLIST$beta) lv.idx <- 1:nfac # MUST BE ROWWISE! beta.idx <- rep(nr * ((1:nfac) - 1L), times = nfac) + rep(lv.idx, each = nfac) DD$beta <- Delta.beta[beta.idx, , drop = FALSE] } ## DD$psi DD$psi <- Delta.psi if (length(lv.dummy.idx) > 0L) { nr <- nc <- nrow(MLIST$psi) lv.idx <- 1:nfac # MUST BE ROWWISE! psi.idx <- rep(nr * ((1:nfac) - 1L), times = nfac) + rep(lv.idx, each = nfac) DD$psi <- DD$psi[psi.idx, , drop = FALSE] } ## DD$tau if (!is.null(MLIST$tau)) { DD$tau <- Delta.tau } DD } lavaan/R/lav_mvnorm_cluster.R0000644000176200001440000016445514627656441016017 0ustar liggesusers# loglikelihood clustered/twolevel data # YR: first version around Feb 2017 # take model-implied mean+variance matrices, and reorder/augment them # to facilitate computing of (log)likelihood in the two-level case # when conditional.x = FALSE: # - sigma.w and sigma.b: same dimensions, level-1 variables only # - sigma.zz: level-2 variables only # - sigma.yz: cov(level-1, level-2) # - mu.y: level-1 variables only (mu.w + mu.b) # - mu.w: y within part # - mu.b: y between part # - mu.z: level-2 variables only lav_mvnorm_cluster_implied22l <- function(Lp = NULL, implied = NULL, Mu.W = NULL, Mu.B = NULL, Sigma.W = NULL, Sigma.B = NULL) { if (!is.null(implied)) { # FIXME: only for single-group analysis! Sigma.W <- implied$cov[[1]] Mu.W <- implied$mean[[1]] Sigma.B <- implied$cov[[2]] Mu.B <- implied$mean[[2]] } # within/between.idx between.idx <- Lp$between.idx[[2]] within.idx <- Lp$within.idx[[2]] both.idx <- Lp$both.idx[[2]] # ov.idx per level ov.idx <- Lp$ov.idx # 'tilde' matrices: ALL variables within and between p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # Sigma.W.tilde Sigma.W.tilde <- matrix(0, p.tilde, p.tilde) Sigma.W.tilde[ov.idx[[1]], ov.idx[[1]]] <- Sigma.W # Sigma.B.tilde Sigma.B.tilde <- matrix(0, p.tilde, p.tilde) Sigma.B.tilde[ov.idx[[2]], ov.idx[[2]]] <- Sigma.B # Mu.W.tilde Mu.W.tilde <- numeric(p.tilde) Mu.W.tilde[ov.idx[[1]]] <- Mu.W # Mu.B.tilde Mu.B.tilde <- numeric(p.tilde) Mu.B.tilde[ov.idx[[2]]] <- Mu.B # add Mu.W[within.idx] to Mu.B Mu.WB.tilde <- numeric(p.tilde) Mu.WB.tilde[within.idx] <- Mu.W.tilde[within.idx] Mu.WB.tilde[both.idx] <- (Mu.B.tilde[both.idx] + Mu.W.tilde[both.idx]) # set Mu.W[both.idx] to zero (after we added to WB) Mu.W.tilde[both.idx] <- 0 # get Mu.B[both.idx[ from WB Mu.B.tilde[both.idx] <- Mu.WB.tilde[both.idx] # map to matrices needed for loglik if (length(within.idx) > 0L) { Mu.B.tilde[within.idx] <- 0 } if (length(between.idx) > 0L) { mu.z <- Mu.B.tilde[between.idx] mu.y <- Mu.WB.tilde[-between.idx] mu.w <- Mu.W.tilde[-between.idx] mu.b <- Mu.B.tilde[-between.idx] sigma.zz <- Sigma.B.tilde[between.idx, between.idx, drop = FALSE] sigma.yz <- Sigma.B.tilde[-between.idx, between.idx, drop = FALSE] sigma.b <- Sigma.B.tilde[-between.idx, -between.idx, drop = FALSE] sigma.w <- Sigma.W.tilde[-between.idx, -between.idx, drop = FALSE] } else { mu.z <- numeric(0L) mu.y <- Mu.WB.tilde mu.w <- Mu.W.tilde mu.b <- Mu.B.tilde sigma.zz <- matrix(0, 0L, 0L) sigma.yz <- matrix(0, nrow(Sigma.B.tilde), 0L) sigma.b <- Sigma.B.tilde sigma.w <- Sigma.W.tilde } list( sigma.w = sigma.w, sigma.b = sigma.b, sigma.zz = sigma.zz, sigma.yz = sigma.yz, mu.z = mu.z, mu.y = mu.y, mu.w = mu.w, mu.b = mu.b ) } lav_mvnorm_cluster_2l2implied <- function(Lp, sigma.w = NULL, sigma.b = NULL, sigma.zz = NULL, sigma.yz = NULL, mu.z = NULL, mu.y = NULL, mu.w = NULL, mu.b = NULL) { # between.idx between.idx <- Lp$between.idx[[2]] within.idx <- Lp$within.idx[[2]] both.idx <- Lp$both.idx[[2]] # ov.idx per level ov.idx <- Lp$ov.idx # 'tilde' matrices: ALL variables within and between p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # if we have mu.y, convert to mu.w and mu.b if (!is.null(mu.y)) { mu.b <- mu.y mu.w.tilde <- numeric(p.tilde) mu.w.tilde[ov.idx[[1]]] <- mu.y # NO NEED TO SET THIS TO ZERO! # otherwise, we get non-symmetric Hessian!! 0.6-5 # if(length(within.idx) > 0L) { # mu.w.tilde[ -within.idx ] <- 0 # } else { # mu.w.tilde[] <- 0 # } mu.w <- mu.w.tilde[ov.idx[[1]]] } # new in 0.6-18: ensure mu.w[both.idx] is zero? # NO: we get Hessian is not fully symmetric again!! # only do this at the very end (post-estimation) # Mu.W.tilde <- numeric(p.tilde) # Mu.B.tilde <- numeric(p.tilde) # Mu.W.tilde[ov.idx[[1]]] <- mu.w # Mu.B.tilde[ov.idx[[2]]] <- mu.b # Mu.B.tilde[between.idx] <- mu.z # if (length(within.idx) > 0) { # Mu.B.tilde[within.idx] <- 0 # } # Mu.B.tilde[both.idx] <- Mu.W.tilde[both.idx] + Mu.B.tilde[both.idx] # Mu.W.tilde[both.idx] <- 0 # Mu.W <- Mu.W.tilde[ov.idx[[1]]] # Mu.B <- Mu.B.tilde[ov.idx[[2]]] Mu.W.tilde <- numeric( p.tilde ) ###### DEBUG ############## #if(length(within.idx) > 0) { Mu.W.tilde[ ov.idx[[1]] ] <- mu.w #} ########################### Mu.W <- Mu.W.tilde[ ov.idx[[1]] ] # Mu.B Mu.B.tilde <- numeric(p.tilde) Mu.B.tilde[ ov.idx[[1]] ] <- mu.b Mu.B.tilde[ between.idx ] <- mu.z if(length(within.idx) > 0) { Mu.B.tilde[within.idx] <- 0 } Mu.B <- Mu.B.tilde[ ov.idx[[2]] ] # Sigma.W Sigma.W <- sigma.w # Sigma.B Sigma.B.tilde <- matrix(0, p.tilde, p.tilde) Sigma.B.tilde[ov.idx[[1]], ov.idx[[1]]] <- sigma.b Sigma.B.tilde[ov.idx[[1]], between.idx] <- sigma.yz Sigma.B.tilde[between.idx, ov.idx[[1]]] <- t(sigma.yz) Sigma.B.tilde[between.idx, between.idx] <- sigma.zz Sigma.B <- Sigma.B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE] list(Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B) } # Mu.W, Mu.B, Sigma.W, Sigma.B are the model-implied statistics # (not yet reordered) lav_mvnorm_cluster_loglik_samplestats_2l <- function(YLp = NULL, Lp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen", log2pi = FALSE, minus.two = TRUE) { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] between.idx <- Lp$between.idx[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] cluster.size.ns <- Lp$cluster.size.ns[[2]] # Y1 samplestats if (length(between.idx) > 0L) { S.PW <- YLp[[2]]$Sigma.W[-between.idx, -between.idx, drop = FALSE] } else { S.PW <- YLp[[2]]$Sigma.W } # Y2 samplestats cov.d <- YLp[[2]]$cov.d mean.d <- YLp[[2]]$mean.d # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse( S = sigma.w, logdet = TRUE, Sinv.method = Sinv.method ) sigma.w.logdet <- attr(sigma.w.inv, "logdet") attr(sigma.w.inv, "logdet") <- NULL if (length(between.idx) > 0L) { sigma.zz.inv <- lav_matrix_symmetric_inverse( S = sigma.zz, logdet = TRUE, Sinv.method = Sinv.method ) sigma.zz.logdet <- attr(sigma.zz.inv, "logdet") attr(sigma.zz.inv, "logdet") <- NULL sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy } else { sigma.zz.logdet <- 0 sigma.b.z <- sigma.b } # min 2* logliklihood L <- numeric(ncluster.sizes) # logdet B <- numeric(ncluster.sizes) # between qf for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # data between Y2Yc <- (cov.d[[clz]] + tcrossprod(mean.d[[clz]] - c(mu.z, mu.y))) # FIXME: avoid reorder/b.idx, so we can use between.idx if (length(between.idx) > 0L) { b.idx <- seq_len(length(Lp$between.idx[[2]])) Y2Yc.zz <- Y2Yc[b.idx, b.idx, drop = FALSE] Y2Yc.yz <- Y2Yc[-b.idx, b.idx, drop = FALSE] Y2Yc.yy <- Y2Yc[-b.idx, -b.idx, drop = FALSE] } else { Y2Yc.yy <- Y2Yc } # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = TRUE, Sinv.method = Sinv.method ) sigma.j.logdet <- attr(sigma.j.inv, "logdet") attr(sigma.j.inv, "logdet") <- NULL # check: what if sigma.j is non-pd? should not happen if (is.na(sigma.j.logdet)) { # stop, and return NA right away # return(as.numeric(NA)) # FORCE? # sigma.j <- lav_matrix_symmetric_force_pd(sigma.j) # sigma.j.inv <- lav_matrix_symmetric_inverse(S = sigma.j, # logdet = TRUE, Sinv.method = Sinv.method) # sigma.j.logdet <- attr(sigma.j.inv, "logdet") # attr(sigma.j.inv, "logdet") <- NULL } # logdet -- between only L[clz] <- (sigma.zz.logdet + sigma.j.logdet) if (length(between.idx) > 0L) { # part 1 -- zz sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi Vinv.11 <- sigma.zz.inv + nj * (sigma.zi.zy %*% sigma.ji.yz.zi) q.zz <- sum(Vinv.11 * Y2Yc.zz) # part 2 -- yz q.yz <- -nj * sum(sigma.ji.yz.zi * Y2Yc.yz) } else { q.zz <- q.yz <- 0 } # part 5 -- yyc q.yyc <- -nj * sum(sigma.j.inv * Y2Yc.yy) # qf -- between only B[clz] <- q.zz + 2 * q.yz - q.yyc } # q.yya + q.yyb # the reason why we multiply the trace by 'N - nclusters' is # S.PW has been divided by 'N - nclusters' q.W <- sum(cluster.size - 1) * sum(sigma.w.inv * S.PW) # logdet within part L.W <- sum(cluster.size - 1) * sigma.w.logdet # -2*times logl (without the constant) loglik <- sum(L * cluster.size.ns) + sum(B * cluster.size.ns) + q.W + L.W # functions below compute -2 * logl if (!minus.two) { loglik <- loglik / (-2) } # constant # Note: total 'N' = (nobs * #within vars) + (nclusters * #between vars) if (log2pi) { LOG.2PI <- log(2 * pi) nWithin <- length(c(Lp$both.idx[[2]], Lp$within.idx[[2]])) nBetween <- length(Lp$between.idx[[2]]) P <- Lp$nclusters[[1]] * nWithin + Lp$nclusters[[2]] * nBetween constant <- -(P * LOG.2PI) / 2 loglik <- loglik + constant } # loglik.x (only if loglik is requested) if (length(unlist(Lp$ov.x.idx)) > 0L && log2pi && !minus.two) { loglik <- loglik - YLp[[2]]$loglik.x } loglik } # first derivative -2*logl wrt Mu.W, Mu.B, Sigma.W, Sigma.B lav_mvnorm_cluster_dlogl_2l_samplestats <- function(YLp = NULL, Lp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, return.list = FALSE, Sinv.method = "eigen") { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] cluster.idx <- Lp$cluster.idx[[2]] between.idx <- Lp$between.idx[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] cluster.size.ns <- Lp$cluster.size.ns[[2]] # Y1 if (length(between.idx) > 0L) { S.PW <- YLp[[2]]$Sigma.W[-between.idx, -between.idx, drop = FALSE] } else { S.PW <- YLp[[2]]$Sigma.W } # Y2 cov.d <- YLp[[2]]$cov.d mean.d <- YLp[[2]]$mean.d # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse( S = sigma.w, logdet = FALSE, Sinv.method = Sinv.method ) # both level-1 and level-2 G.muy <- matrix(0, ncluster.sizes, length(mu.y)) G.Sigma.w <- matrix(0, ncluster.sizes, length(lav_matrix_vech(sigma.w))) G.Sigma.b <- matrix(0, ncluster.sizes, length(lav_matrix_vech(sigma.b))) if (length(between.idx) > 0L) { G.muz <- matrix(0, ncluster.sizes, length(mu.z)) G.Sigma.zz <- matrix( 0, ncluster.sizes, length(lav_matrix_vech(sigma.zz)) ) G.Sigma.yz <- matrix(0, ncluster.sizes, length(lav_matrix_vec(sigma.yz))) sigma.zz.inv <- lav_matrix_symmetric_inverse( S = sigma.zz, logdet = FALSE, Sinv.method = Sinv.method ) sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # level-2 vectors b.idx <- seq_len(length(Lp$between.idx[[2]])) zyc <- mean.d[[clz]] - c(mu.z, mu.y) yc <- zyc[-b.idx] zc <- zyc[b.idx] # level-2 crossproducts Y2Yc <- (cov.d[[clz]] + tcrossprod(mean.d[[clz]] - c(mu.z, mu.y))) b.idx <- seq_len(length(Lp$between.idx[[2]])) Y2Yc.zz <- Y2Yc[b.idx, b.idx, drop = FALSE] Y2Yc.yz <- Y2Yc[-b.idx, b.idx, drop = FALSE] Y2Yc.yy <- Y2Yc[-b.idx, -b.idx, drop = FALSE] # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = FALSE, Sinv.method = Sinv.method ) sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi sigma.zi.zy.ji <- t(sigma.ji.yz.zi) # common parts jYZj <- nj * (sigma.j.inv %*% (sigma.yz.zi %*% Y2Yc.zz %*% t(sigma.yz.zi) - Y2Yc.yz %*% t(sigma.yz.zi) - t(Y2Yc.yz %*% t(sigma.yz.zi)) + Y2Yc.yy) %*% sigma.j.inv) Z1 <- Y2Yc.zz %*% t(sigma.ji.yz.zi) %*% sigma.yz YZ1 <- t(Y2Yc.yz) %*% sigma.j.inv %*% sigma.yz # Mu.Z G.muz[clz, ] <- -2 * as.numeric( (sigma.zz.inv + nj * (sigma.zi.zy.ji %*% sigma.yz.zi)) %*% zc - nj * sigma.zi.zy.ji %*% yc ) # MU.Y G.muy[clz, ] <- 2 * nj * as.numeric(zc %*% sigma.zi.zy.ji - yc %*% sigma.j.inv) # SIGMA.W (between part) g.sigma.w <- sigma.j.inv - jYZj tmp <- g.sigma.w * 2 diag(tmp) <- diag(g.sigma.w) G.Sigma.w[clz, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * (sigma.j.inv - jYZj) tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.Sigma.b[clz, ] <- lav_matrix_vech(tmp) # SIGMA.ZZ g.sigma.zz <- (sigma.zz.inv + nj * sigma.zz.inv %*% ( t(sigma.yz) %*% (sigma.j.inv - jYZj) %*% sigma.yz - (1 / nj * Y2Yc.zz + t(Z1) + Z1 - t(YZ1) - YZ1)) %*% sigma.zz.inv) tmp <- g.sigma.zz * 2 diag(tmp) <- diag(g.sigma.zz) G.Sigma.zz[clz, ] <- lav_matrix_vech(tmp) # SIGMA.ZY g.sigma.yz <- 2 * nj * ( (sigma.j.inv %*% (sigma.yz.zi %*% Y2Yc.zz - sigma.yz - Y2Yc.yz) + jYZj %*% sigma.yz) %*% sigma.zz.inv) G.Sigma.yz[clz, ] <- lav_matrix_vec(g.sigma.yz) } # level-1 d.mu.y <- colSums(G.muy * cluster.size.ns) d.sigma.w1 <- lav_matrix_vech_reverse(colSums(G.Sigma.w * cluster.size.ns)) d.sigma.b <- lav_matrix_vech_reverse(colSums(G.Sigma.b * cluster.size.ns)) # level-2 d.mu.z <- colSums(G.muz * cluster.size.ns) d.sigma.zz <- lav_matrix_vech_reverse(colSums(G.Sigma.zz * cluster.size.ns)) d.sigma.yz <- matrix( colSums(G.Sigma.yz * cluster.size.ns), nrow(sigma.yz), ncol(sigma.yz) ) } # between.idx else { # no level-2 variables for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # level-2 vectors yc <- mean.d[[clz]] - mu.y # level-2 crossproducts Y2Yc.yy <- (cov.d[[clz]] + tcrossprod(mean.d[[clz]] - mu.y)) # construct sigma.j sigma.j <- (nj * sigma.b) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = FALSE, Sinv.method = Sinv.method ) # common part jYYj <- nj * sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv # MU.Y G.muy[clz, ] <- -2 * nj * as.numeric(yc %*% sigma.j.inv) # SIGMA.W (between part) g.sigma.w <- sigma.j.inv - jYYj tmp <- g.sigma.w * 2 diag(tmp) <- diag(g.sigma.w) G.Sigma.w[clz, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * (sigma.j.inv - jYYj) tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.Sigma.b[clz, ] <- lav_matrix_vech(tmp) } # level-1 d.mu.y <- colSums(G.muy * cluster.size.ns) d.sigma.w1 <- lav_matrix_vech_reverse(colSums(G.Sigma.w * cluster.size.ns)) d.sigma.b <- lav_matrix_vech_reverse(colSums(G.Sigma.b * cluster.size.ns)) # level-2 d.mu.z <- numeric(0L) d.sigma.zz <- matrix(0, 0L, 0L) d.sigma.yz <- matrix(0, 0L, 0L) } # Sigma.W (bis) d.sigma.w2 <- (Lp$nclusters[[1]] - nclusters) * (sigma.w.inv - sigma.w.inv %*% S.PW %*% sigma.w.inv) tmp <- d.sigma.w2 * 2 diag(tmp) <- diag(d.sigma.w2) d.sigma.w2 <- tmp d.sigma.w <- d.sigma.w1 + d.sigma.w2 # rearrange dout <- lav_mvnorm_cluster_2l2implied( Lp = Lp, sigma.w = d.sigma.w, sigma.b = d.sigma.b, sigma.yz = d.sigma.yz, sigma.zz = d.sigma.zz, mu.y = d.mu.y, mu.z = d.mu.z ) if (return.list) { out <- dout } else { out <- c( dout$Mu.W, lav_matrix_vech(dout$Sigma.W), dout$Mu.B, lav_matrix_vech(dout$Sigma.B) ) } out } # cluster-wise scores -2*logl wrt Mu.W, Mu.B, Sigma.W, Sigma.B lav_mvnorm_cluster_scores_2l <- function(Y1 = NULL, YLp = NULL, Lp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen") { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.idx <- Lp$cluster.idx[[2]] between.idx <- Lp$between.idx[[2]] # Y1 if (length(between.idx) > 0L) { Y1w <- Y1[, -Lp$between.idx[[2]], drop = FALSE] } else { Y1w <- Y1 } Y1w.cm <- t(t(Y1w) - mu.y) # Y2 Y2 <- YLp[[2]]$Y2 # NOTE: ORDER mu.b must match Y2 mu.b <- numeric(ncol(Y2)) if (length(between.idx) > 0L) { mu.b[-Lp$between.idx[[2]]] <- mu.y mu.b[Lp$between.idx[[2]]] <- mu.z } else { mu.b <- mu.y } Y2.cm <- t(t(Y2) - mu.b) # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse( S = sigma.w, logdet = FALSE, Sinv.method = Sinv.method ) # both level-1 and level-2 G.muy <- matrix(0, nclusters, length(mu.y)) G.Sigma.w <- matrix(0, nclusters, length(lav_matrix_vech(sigma.w))) G.Sigma.b <- matrix(0, nclusters, length(lav_matrix_vech(sigma.b))) G.muz <- matrix(0, nclusters, length(mu.z)) G.Sigma.zz <- matrix(0, nclusters, length(lav_matrix_vech(sigma.zz))) G.Sigma.yz <- matrix(0, nclusters, length(lav_matrix_vec(sigma.yz))) if (length(between.idx) > 0L) { sigma.zz.inv <- lav_matrix_symmetric_inverse( S = sigma.zz, logdet = FALSE, Sinv.method = Sinv.method ) sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy for (cl in seq_len(nclusters)) { # cluster size nj <- cluster.size[cl] # data within for the cluster (centered by mu.y) Y1m <- Y1w.cm[cluster.idx == cl, , drop = FALSE] yc <- Y2.cm[cl, -Lp$between.idx[[2]]] zc <- Y2.cm[cl, Lp$between.idx[[2]]] # data between Y2Yc <- tcrossprod(Y2.cm[cl, ]) Y2Yc.zz <- Y2Yc[Lp$between.idx[[2]], Lp$between.idx[[2]], drop = FALSE ] Y2Yc.yz <- Y2Yc[-Lp$between.idx[[2]], Lp$between.idx[[2]], drop = FALSE ] Y2Yc.yy <- Y2Yc[-Lp$between.idx[[2]], -Lp$between.idx[[2]], drop = FALSE ] # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = FALSE, Sinv.method = Sinv.method ) sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi sigma.zi.zy.ji <- t(sigma.ji.yz.zi) # common parts jYZj <- nj * (sigma.j.inv %*% (sigma.yz.zi %*% Y2Yc.zz %*% t(sigma.yz.zi) - Y2Yc.yz %*% t(sigma.yz.zi) - t(Y2Yc.yz %*% t(sigma.yz.zi)) + Y2Yc.yy) %*% sigma.j.inv) Z1 <- Y2Yc.zz %*% t(sigma.ji.yz.zi) %*% sigma.yz YZ1 <- t(Y2Yc.yz) %*% sigma.j.inv %*% sigma.yz # Mu.Z G.muz[cl, ] <- -2 * as.numeric( (sigma.zz.inv + nj * (sigma.zi.zy.ji %*% sigma.yz.zi)) %*% zc - nj * sigma.zi.zy.ji %*% yc ) # MU.Y G.muy[cl, ] <- 2 * nj * as.numeric(zc %*% sigma.zi.zy.ji - yc %*% sigma.j.inv) # SIGMA.W g.sigma.w <- ((nj - 1) * sigma.w.inv - sigma.w.inv %*% (crossprod(Y1m) - nj * Y2Yc.yy) %*% sigma.w.inv + sigma.j.inv - jYZj) tmp <- g.sigma.w * 2 diag(tmp) <- diag(g.sigma.w) G.Sigma.w[cl, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * (sigma.j.inv - jYZj) tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.Sigma.b[cl, ] <- lav_matrix_vech(tmp) # SIGMA.ZZ g.sigma.zz <- (sigma.zz.inv + nj * sigma.zz.inv %*% ( t(sigma.yz) %*% (sigma.j.inv - jYZj) %*% sigma.yz - (1 / nj * Y2Yc.zz + t(Z1) + Z1 - t(YZ1) - YZ1)) %*% sigma.zz.inv) tmp <- g.sigma.zz * 2 diag(tmp) <- diag(g.sigma.zz) G.Sigma.zz[cl, ] <- lav_matrix_vech(tmp) # SIGMA.ZY g.sigma.yz <- 2 * nj * ( (sigma.j.inv %*% (sigma.yz.zi %*% Y2Yc.zz - sigma.yz - Y2Yc.yz) + jYZj %*% sigma.yz) %*% sigma.zz.inv) G.Sigma.yz[cl, ] <- lav_matrix_vec(g.sigma.yz) } } # between.idx else { # no level-2 variables for (cl in seq_len(nclusters)) { # cluster size nj <- cluster.size[cl] # data within for the cluster (centered by mu.y) Y1m <- Y1w.cm[cluster.idx == cl, , drop = FALSE] yc <- Y2.cm[cl, ] # data between Y2Yc.yy <- tcrossprod(Y2.cm[cl, ]) # construct sigma.j sigma.j <- (nj * sigma.b) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = FALSE, Sinv.method = Sinv.method ) # common part jYYj <- nj * sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv # MU.Y G.muy[cl, ] <- -2 * nj * as.numeric(yc %*% sigma.j.inv) # SIGMA.W g.sigma.w <- ((nj - 1) * sigma.w.inv - sigma.w.inv %*% (crossprod(Y1m) - nj * Y2Yc.yy) %*% sigma.w.inv + sigma.j.inv - jYYj) tmp <- g.sigma.w * 2 diag(tmp) <- diag(g.sigma.w) G.Sigma.w[cl, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * (sigma.j.inv - jYYj) tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.Sigma.b[cl, ] <- lav_matrix_vech(tmp) } } # rearrange columns to Mu.W, Mu.B, Sigma.W, Sigma.B ov.idx <- Lp$ov.idx p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # Mu.W (for within-only) Mu.W.tilde <- matrix(0, nclusters, p.tilde) Mu.W.tilde[, ov.idx[[1]]] <- G.muy Mu.W.tilde[, Lp$both.idx[[2]]] <- 0 # ZERO!!! Mu.W <- Mu.W.tilde[, ov.idx[[1]], drop = FALSE] # Mu.B Mu.B.tilde <- matrix(0, nclusters, p.tilde) Mu.B.tilde[, ov.idx[[1]]] <- G.muy if (length(between.idx) > 0L) { Mu.B.tilde[, between.idx] <- G.muz } Mu.B <- Mu.B.tilde[, ov.idx[[2]], drop = FALSE] # Sigma.W Sigma.W <- G.Sigma.w # Sigma.B if (length(between.idx) > 0L) { p.tilde.star <- p.tilde * (p.tilde + 1) / 2 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) Sigma.B.tilde <- matrix(0, nclusters, p.tilde.star) col.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.b col.idx <- lav_matrix_vec(B.tilde[ov.idx[[1]], between.idx, drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.yz col.idx <- lav_matrix_vech(B.tilde[between.idx, between.idx, drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.zz col.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE ]) Sigma.B <- Sigma.B.tilde[, col.idx, drop = FALSE] } else { p.tilde.star <- p.tilde * (p.tilde + 1) / 2 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) Sigma.B.tilde <- matrix(0, nclusters, p.tilde.star) col.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.b col.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE ]) Sigma.B <- Sigma.B.tilde[, col.idx, drop = FALSE] # Sigma.B <- G.Sigma.b } SCORES <- cbind(Mu.W, Sigma.W, Mu.B, Sigma.B) SCORES } # first-order information: outer crossprod of scores per cluster lav_mvnorm_cluster_information_firstorder <- function(Y1 = NULL, YLp = NULL, Lp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, x.idx = NULL, divide.by.two = FALSE, Sinv.method = "eigen") { N <- NROW(Y1) SCORES <- lav_mvnorm_cluster_scores_2l( Y1 = Y1, YLp = YLp, Lp = Lp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, Sinv.method = Sinv.method ) # divide by 2 (if we want scores wrt objective function) if (divide.by.two) { SCORES <- SCORES / 2 } # unit information information <- crossprod(SCORES) / Lp$nclusters[[2]] # if x.idx, set rows/cols to zero if (length(x.idx) > 0L) { nw <- length(as.vector(Mu.W)) nw.star <- nw * (nw + 1) / 2 nb <- length(as.vector(Mu.B)) ov.idx <- Lp$ov.idx x.idx.w <- which(ov.idx[[1]] %in% x.idx) if (length(x.idx.w) > 0L) { xw.idx <- c( x.idx.w, nw + lav_matrix_vech_which_idx(n = nw, idx = x.idx.w) ) } else { xw.idx <- integer(0L) } x.idx.b <- which(ov.idx[[2]] %in% x.idx) if (length(x.idx.b) > 0L) { xb.idx <- c( x.idx.b, nb + lav_matrix_vech_which_idx(n = nb, idx = x.idx.b) ) } else { xb.idx <- integer(0L) } all.idx <- c(xw.idx, nw + nw.star + xb.idx) information[all.idx, ] <- 0 information[, all.idx] <- 0 } information } # expected information 'h1' model # order: mu.w within, vech(sigma.w) within, mu.b between, vech(sigma.b) between # mu.w rows/cols that are splitted within/between are forced to zero lav_mvnorm_cluster_information_expected <- function(Lp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, x.idx = integer(0L), Sinv.method = "eigen") { # translate to internal matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # create Delta.W.tilde, Delta.B.tilde ov.idx <- Lp$ov.idx nw <- length(ov.idx[[1]]) nb <- length(ov.idx[[2]]) p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) p.tilde.star <- p.tilde * (p.tilde + 1) / 2 npar <- p.tilde + p.tilde.star B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) w.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE]) b.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE]) Delta.W.tilde <- matrix(0, npar, npar) Delta.B.tilde <- matrix(0, npar, npar) Delta.W.tilde[ c(ov.idx[[1]], w.idx + p.tilde), c(ov.idx[[1]], w.idx + p.tilde) ] <- diag(nw + nw * (nw + 1) / 2) Delta.B.tilde[ c(ov.idx[[2]], b.idx + p.tilde), c(ov.idx[[2]], b.idx + p.tilde) ] <- diag(nb + nb * (nb + 1) / 2) Delta.W.tilde <- cbind(Delta.W.tilde, matrix(0, npar, npar)) Delta.B.tilde <- cbind(matrix(0, npar, npar), Delta.B.tilde) nobs <- Lp$nclusters[[1]] nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] n.s <- Lp$cluster.size.ns[[2]] between.idx <- Lp$between.idx[[2]] information.j <- matrix(0, npar * 2, npar * 2) for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # Delta.j -- changes per cluster(size) # this is why we can not write info = t(delta) info.sat delta Delta.j <- Delta.B.tilde + 1 / nj * Delta.W.tilde # compute Sigma.j sigma.j <- sigma.w + nj * sigma.b if (length(between.idx) > 0L) { omega.j <- matrix(0, p.tilde, p.tilde) omega.j[-between.idx, -between.idx] <- 1 / nj * sigma.j omega.j[-between.idx, between.idx] <- sigma.yz omega.j[between.idx, -between.idx] <- t(sigma.yz) omega.j[between.idx, between.idx] <- sigma.zz # omega.j <- rbind( cbind(sigma.zz, t(sigma.yz)), # cbind(sigma.yz, 1/nj * sigma.j) ) } else { omega.j <- 1 / nj * sigma.j } omega.j.inv <- solve(omega.j) I11.j <- omega.j.inv I22.j <- 0.5 * lav_matrix_duplication_pre_post(omega.j.inv %x% omega.j.inv) I.j <- lav_matrix_bdiag(I11.j, I22.j) info.j <- t(Delta.j) %*% I.j %*% Delta.j information.j <- information.j + n.s[clz] * info.j } Sigma.W.inv <- lav_matrix_symmetric_inverse( S = Sigma.W, logdet = FALSE, Sinv.method = Sinv.method ) # create Sigma.W.inv.tilde Sigma.W.inv.tilde <- matrix(0, p.tilde, p.tilde) Sigma.W.inv.tilde[ov.idx[[1]], ov.idx[[1]]] <- Sigma.W.inv I11.w <- Sigma.W.inv.tilde I22.w <- 0.5 * lav_matrix_duplication_pre_post(Sigma.W.inv.tilde %x% Sigma.W.inv.tilde) I.w <- lav_matrix_bdiag(I11.w, I22.w) information.w <- (nobs - nclusters) * (t(Delta.W.tilde) %*% I.w %*% Delta.W.tilde) # unit information information.tilde <- 1 / Lp$nclusters[[2]] * (information.w + information.j) # force zero for means both.idx in within part information.tilde[Lp$both.idx[[2]], ] <- 0 information.tilde[, Lp$both.idx[[2]]] <- 0 # if x.idx, set rows/cols to zero if (length(x.idx) > 0L) { xw.idx <- c( x.idx, p.tilde + lav_matrix_vech_which_idx(n = p.tilde, idx = x.idx) ) xb.idx <- npar + xw.idx all.idx <- c(xw.idx, xb.idx) information.tilde[all.idx, ] <- 0 information.tilde[, all.idx] <- 0 } # remove redundant rows/cols ok.idx <- c( ov.idx[[1]], w.idx + p.tilde, npar + ov.idx[[2]], npar + b.idx + p.tilde ) information <- information.tilde[ok.idx, ok.idx] information } # expected information -- delta # for non-saturated models only lav_mvnorm_cluster_information_expected_delta <- function(Lp = NULL, Delta = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen") { # translate to internal matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Delta -- this group npar <- NCOL(Delta) # create Delta.W.tilde, Delta.B.tilde ov.idx <- Lp$ov.idx nw <- length(ov.idx[[1]]) nw.star <- nw * (nw + 1) / 2 nb <- length(ov.idx[[2]]) Delta.W <- Delta[1:(nw + nw.star), , drop = FALSE] Delta.B <- Delta[-(1:(nw + nw.star)), , drop = FALSE] p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) p.tilde.star <- p.tilde * (p.tilde + 1) / 2 Delta.W.tilde.Mu <- matrix(0, p.tilde, npar) Delta.W.tilde.Sigma <- matrix(0, p.tilde.star, npar) Delta.B.tilde.Mu <- matrix(0, p.tilde, npar) Delta.B.tilde.Sigma <- matrix(0, p.tilde.star, npar) Delta.W.tilde.Mu[ov.idx[[1]], ] <- Delta.W[1:nw, ] Delta.B.tilde.Mu[ov.idx[[2]], ] <- Delta.B[1:nb, ] # correct Delta to reflect Mu.W[ both.idx ] is added to Mu.B[ both.idx ] # changed in 0.6-5 Delta.B.tilde.Mu[Lp$both.idx[[2]], ] <- (Delta.B.tilde.Mu[Lp$both.idx[[2]], ] + Delta.W.tilde.Mu[Lp$both.idx[[2]], ]) Delta.W.tilde.Mu[Lp$both.idx[[2]], ] <- 0 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) w.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE]) b.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE]) Delta.W.tilde.Sigma[w.idx, ] <- Delta.W[-(1:nw), ] Delta.B.tilde.Sigma[b.idx, ] <- Delta.B[-(1:nb), ] Delta.W.tilde <- rbind(Delta.W.tilde.Mu, Delta.W.tilde.Sigma) Delta.B.tilde <- rbind(Delta.B.tilde.Mu, Delta.B.tilde.Sigma) nobs <- Lp$nclusters[[1]] nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] n.s <- Lp$cluster.size.ns[[2]] between.idx <- Lp$between.idx[[2]] information.j <- matrix(0, npar, npar) for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # Delta.j -- changes per cluster(size) # this is why we can not write info = t(delta) info.sat delta Delta.j <- Delta.B.tilde + 1 / nj * Delta.W.tilde # compute Sigma.j sigma.j <- sigma.w + nj * sigma.b if (length(between.idx) > 0L) { omega.j <- matrix(0, p.tilde, p.tilde) omega.j[-between.idx, -between.idx] <- 1 / nj * sigma.j omega.j[-between.idx, between.idx] <- sigma.yz omega.j[between.idx, -between.idx] <- t(sigma.yz) omega.j[between.idx, between.idx] <- sigma.zz # omega.j <- rbind( cbind(sigma.zz, t(sigma.yz)), # cbind(sigma.yz, 1/nj * sigma.j) ) } else { omega.j <- 1 / nj * sigma.j } omega.j.inv <- solve(omega.j) I11.j <- omega.j.inv I22.j <- 0.5 * lav_matrix_duplication_pre_post(omega.j.inv %x% omega.j.inv) I.j <- lav_matrix_bdiag(I11.j, I22.j) info.j <- t(Delta.j) %*% I.j %*% Delta.j information.j <- information.j + n.s[clz] * info.j } Sigma.W.inv <- lav_matrix_symmetric_inverse( S = sigma.w, logdet = FALSE, Sinv.method = Sinv.method ) I11.w <- Sigma.W.inv I22.w <- 0.5 * lav_matrix_duplication_pre_post(Sigma.W.inv %x% Sigma.W.inv) I.w <- lav_matrix_bdiag(I11.w, I22.w) # force zero for means both.idx in within part # changed in 0.6-5 I.w[Lp$both.idx[[2]], ] <- 0 I.w[, Lp$both.idx[[2]]] <- 0 information.w <- (nobs - nclusters) * (t(Delta.W) %*% I.w %*% Delta.W) # unit information information <- 1 / Lp$nclusters[[2]] * (information.w + information.j) information } # observed information # order: mu.w within, vech(sigma.w) within, mu.b between, vech(sigma.b) between # mu.w rows/cols that are splitted within/between are forced to zero # # numerical approximation (for now) lav_mvnorm_cluster_information_observed <- function(Lp = NULL, YLp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, x.idx = integer(0L), Sinv.method = "eigen") { nobs <- Lp$nclusters[[1]] nw <- length(as.vector(Mu.W)) nw.star <- nw * (nw + 1) / 2 nb <- length(as.vector(Mu.B)) nb.star <- nb * (nb + 1) / 2 ov.idx <- Lp$ov.idx p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # Mu.W (for within-only) Mu.W.tilde <- numeric(p.tilde) Mu.W.tilde[ov.idx[[1]]] <- Mu.W # local function -- gradient GRAD <- function(x) { # Mu.W (for within-only) Mu.W.tilde2 <- numeric(p.tilde) Mu.W.tilde2[ov.idx[[1]]] <- x[1:nw] Mu.W.tilde2[Lp$both.idx[[2]]] <- Mu.W.tilde[Lp$both.idx[[2]]] Mu.W2 <- Mu.W.tilde2[ov.idx[[1]]] Sigma.W2 <- lav_matrix_vech_reverse(x[nw + 1:nw.star]) Mu.B2 <- x[nw + nw.star + 1:nb] Sigma.B2 <- lav_matrix_vech_reverse(x[nw + nw.star + nb + 1:nb.star]) dx <- lav_mvnorm_cluster_dlogl_2l_samplestats( YLp = YLp, Lp = Lp, Mu.W = Mu.W2, Sigma.W = Sigma.W2, Mu.B = Mu.B2, Sigma.B = Sigma.B2, return.list = FALSE, Sinv.method = Sinv.method ) # dx is for -2*logl -1 / 2 * dx } # start.x start.x <- c( as.vector(Mu.W), lav_matrix_vech(Sigma.W), as.vector(Mu.B), lav_matrix_vech(Sigma.B) ) # total information information <- -1 * numDeriv::jacobian(func = GRAD, x = start.x) # unit information information <- information / Lp$nclusters[[2]] # if x.idx, set rows/cols to zero if (length(x.idx) > 0L) { x.idx.w <- which(ov.idx[[1]] %in% x.idx) if (length(x.idx.w) > 0L) { xw.idx <- c( x.idx.w, nw + lav_matrix_vech_which_idx(n = nw, idx = x.idx.w) ) } else { xw.idx <- integer(0L) } x.idx.b <- which(ov.idx[[2]] %in% x.idx) if (length(x.idx.b) > 0L) { xb.idx <- c( x.idx.b, nb + lav_matrix_vech_which_idx(n = nb, idx = x.idx.b) ) } else { xb.idx <- integer(0L) } all.idx <- c(xw.idx, nw + nw.star + xb.idx) information[all.idx, ] <- 0 information[, all.idx] <- 0 } information } # estimate ML estimates of Mu.W, Mu.B, Sigma.W, Sigma.B # using the EM algorithm # # per cluster-SIZE # lav_mvnorm_cluster_em_sat <- function(YLp = NULL, Lp = NULL, tol = 1e-04, max.iter = 5000, min.variance = 1e-05) { # lavdata between.idx <- Lp$between.idx[[2]] within.idx <- Lp$within.idx[[2]] Y2 <- YLp[[2]]$Y2 # starting values for Sigma ov.idx <- Lp$ov.idx # COVT <- lavsamplestats@cov[[1]] # Sigma.W <- diag( diag(COVT)[ov.idx[[1]]] ) # Sigma.B <- diag( diag(COVT)[ov.idx[[2]]] ) Sigma.W <- diag(length(ov.idx[[1]])) Sigma.B <- diag(length(ov.idx[[2]])) Mu.W <- numeric(length(ov.idx[[1]])) Mu.B <- numeric(length(ov.idx[[2]])) # Mu.W.tilde <- YLp[[2]]$Mu.W # Mu.B.tilde <- YLp[[2]]$Mu.B # if(length(between.idx) > 0) { # Mu.W <- Mu.W.tilde[-between.idx] # } else { # Mu.W <- Mu.W.tilde # } # if(length(within.idx) > 0) { # Mu.B <- Mu.B.tilde[-within.idx] # } else { # Mu.B <- Mu.B.tilde # } # report initial fx fx <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) # if verbose, report if (lav_verbose()) { cat( "EM iter:", sprintf("%3d", 0), " fx =", sprintf("%17.10f", fx), "\n" ) } # translate to internal matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z mu.w <- out$mu.w mu.b <- out$mu.b sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # mu.z and sigma.zz can be computed beforehand if (length(between.idx) > 0L) { Z <- Y2[, between.idx, drop = FALSE] mu.z <- colMeans(Z, na.rm = TRUE) sigma.zz <- cov(Z, use = "pairwise.complete.obs") * (Lp$nclusters[[2]] - 1L) / Lp$nclusters[[2]] # sigma.zz <- 1/Lp$nclusters[[2]] * crossprod(Z) - tcrossprod(mu.z) # Y1Y1 <- Y1Y1[-between.idx, -between.idx, drop=FALSE] } # EM iterations fx.old <- fx for (i in 1:max.iter) { # E-step estep <- lav_mvnorm_cluster_em_estepb( # Y1 = Y1, YLp = YLp, Lp = Lp, sigma.w = sigma.w, sigma.b = sigma.b, mu.w = mu.w, mu.b = mu.b, sigma.yz = sigma.yz, sigma.zz = sigma.zz, mu.z = mu.z ) # mstep sigma.w <- estep$sigma.w sigma.b <- estep$sigma.b sigma.yz <- estep$sigma.yz mu.w <- estep$mu.w mu.b <- estep$mu.b implied2 <- lav_mvnorm_cluster_2l2implied( Lp = Lp, sigma.w = estep$sigma.w, sigma.b = estep$sigma.b, sigma.zz = sigma.zz, sigma.yz = estep$sigma.yz, mu.z = mu.z, mu.y = NULL, mu.w = estep$mu.w, mu.b = estep$mu.b ) # check for (near-zero) variances at the within level, and set # them to min.variance Sigma.W <- implied2$Sigma.W zero.var <- which(diag(Sigma.W) < min.variance) if (length(zero.var) > 0L) { Sigma.W[, zero.var] <- sigma.w[, zero.var] <- 0 Sigma.W[zero.var, ] <- sigma.w[zero.var, ] <- 0 diag(Sigma.W)[zero.var] <- diag(sigma.w)[zero.var] <- min.variance } fx <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Mu.W = implied2$Mu.W, Sigma.W = Sigma.W, Mu.B = implied2$Mu.B, Sigma.B = implied2$Sigma.B, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) # fx.delta fx.delta <- fx - fx.old # what if fx.delta is negative? if (fx.delta < 0) { lav_msg_warn(gettext( "logl decreased during EM steps of the saturated (H1) model")) } if (lav_verbose()) { cat( "EM iter:", sprintf("%3d", i), " fx =", sprintf("%17.10f", fx), " fx.delta =", sprintf("%9.8f", fx.delta), "\n" ) } # convergence check if (fx.delta < tol) { break } else { fx.old <- fx } } # EM iterations list( Sigma.W = implied2$Sigma.W, Sigma.B = implied2$Sigma.B, Mu.W = implied2$Mu.W, Mu.B = implied2$Mu.B, logl = fx ) } # based on lav_mvnorm_cluster_em_estep lav_mvnorm_cluster_em_h0 <- function(lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavpartable = NULL, lavmodel = NULL, lavoptions = NULL, verbose.x = FALSE, fx.tol = 1e-08, dx.tol = 1e-05, max.iter = 5000, mstep.iter.max = 10000L, mstep.rel.tol = 1e-10) { # single group only for now stopifnot(lavdata@ngroups == 1L) # lavdata Lp <- lavdata@Lp[[1]] # first group only (for now) ov.names.l <- lavdata@ov.names.l[[1]] # first group only (for now) Y1 <- lavdata@X[[1]] # first group only YLp <- lavsamplestats@YLp[[1]] # first group only between.idx <- Lp$between.idx[[2]] Y2 <- YLp[[2]]$Y2 # initial values x.current <- lav_model_get_parameters(lavmodel) # implied if (is.null(lavimplied)) { lavimplied <- lav_model_implied(lavmodel) } # TODO: what if current 'starting' parameters imply a non-pd sigma.b? # report initial fx fx <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Mu.W = lavimplied$mean[[1]], Sigma.W = lavimplied$cov[[1]], Mu.B = lavimplied$mean[[2]], Sigma.B = lavimplied$cov[[2]], Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) # if verbose, report if (lav_verbose()) { cat( "EM iter:", sprintf("%3d", 0), " fx =", sprintf("%17.10f", fx), "\n" ) } # translate to internal matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = lavimplied$mean[[1]], Sigma.W = lavimplied$cov[[1]], Mu.B = lavimplied$mean[[2]], Sigma.B = lavimplied$cov[[2]] ) mu.y <- out$mu.y mu.z <- out$mu.z mu.w <- out$mu.w mu.b <- out$mu.b sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # mu.z and sigma.zz can be computed beforehand if (length(between.idx) > 0L) { Z <- Y2[, between.idx, drop = FALSE] mu.z <- colMeans(Y2)[between.idx] sigma.zz <- cov(Z) * (Lp$nclusters[[2]] - 1L) / Lp$nclusters[[2]] # sigma.zz <- 1/Lp$nclusters[[2]] * crossprod(Z) - tcrossprod(mu.z) # Y1Y1 <- Y1Y1[-between.idx, -between.idx, drop=FALSE] } # EM iterations fx.old <- fx fx2.old <- 0 REL <- numeric(max.iter) for (i in 1:max.iter) { # E-step estep <- lav_mvnorm_cluster_em_estepb( YLp = YLp, Lp = Lp, sigma.w = sigma.w, sigma.b = sigma.b, mu.w = mu.w, mu.b = mu.b, sigma.yz = sigma.yz, sigma.zz = sigma.zz, mu.z = mu.z ) # back to model-implied dimensions implied <- lav_mvnorm_cluster_2l2implied( Lp = Lp, sigma.w = estep$sigma.w, sigma.b = estep$sigma.b, sigma.zz = sigma.zz, sigma.yz = estep$sigma.yz, mu.z = mu.z, mu.y = NULL, mu.w = estep$mu.w, mu.b = estep$mu.b ) rownames(implied$Sigma.W) <- ov.names.l[[1]] rownames(implied$Sigma.B) <- ov.names.l[[2]] # M-step # fit two-group model local.partable <- lavpartable # if a group column exists, delete it (it will be overriden anyway) local.partable$group <- NULL level.idx <- which(names(local.partable) == "level") names(local.partable)[level.idx] <- "group" local.partable$est <- NULL local.partable$se <- NULL # give current values as starting values free.idx <- which(lavpartable$free > 0L) local.partable$ustart[free.idx] <- x.current local.fit <- lavaan(local.partable, sample.cov = list( within = implied$Sigma.W, between = implied$Sigma.B ), sample.mean = list( within = implied$Mu.W, between = implied$Mu.B ), sample.nobs = Lp$nclusters, sample.cov.rescale = FALSE, control = list( iter.max = mstep.iter.max, rel.tol = mstep.rel.tol ), fixed.x = any(lavpartable$exo == 1L), estimator = "ML", warn = FALSE, # no warnings check.start = FALSE, check.post = FALSE, check.gradient = FALSE, check.vcov = FALSE, baseline = FALSE, h1 = FALSE, se = "none", test = "none" ) # end of M-step implied2 <- local.fit@implied fx <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Mu.W = implied2$mean[[1]], Sigma.W = implied2$cov[[1]], Mu.B = implied2$mean[[2]], Sigma.B = implied2$cov[[2]], Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) # fx.delta fx.delta <- fx - fx.old # derivatives lavmodel <- lav_model_set_parameters(lavmodel, x = local.fit@optim$x) dx <- lav_model_gradient(lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats ) max.dx <- max(abs(dx)) if (lav_verbose()) { cat( "EM iter:", sprintf("%3d", i), " fx =", sprintf("%17.10f", fx), " fx.delta =", sprintf("%9.8f", fx.delta), " mstep.iter =", sprintf( "%3d", lavInspect(local.fit, "iterations") ), " max.dx = ", sprintf("%9.8f", max.dx), "\n" ) } # stopping rule check if (fx.delta < fx.tol) { if (lav_verbose()) { cat("EM stopping rule reached: fx.delta < ", fx.tol, "\n") } break } else { fx.old <- fx x.current <- local.fit@optim$x if (verbose.x) { print(round(x.current, 3)) } } # second stopping rule check -- derivatives if (max.dx < dx.tol) { if (lav_verbose()) { cat("EM stopping rule reached: max.dx < ", dx.tol, "\n") } break } # translate to internal matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = implied2$mean[[1]], Sigma.W = implied2$cov[[1]], Mu.B = implied2$mean[[2]], Sigma.B = implied2$cov[[2]] ) mu.y <- out$mu.y mu.z <- out$mu.z mu.w <- out$mu.w mu.b <- out$mu.b sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz } # EM iterations x <- local.fit@optim$x # add attributes if (i < max.iter) { attr(x, "converged") <- TRUE attr(x, "warn.txt") <- "" } else { attr(x, "converged") <- FALSE attr(x, "warn.txt") <- paste("maxmimum number of iterations (", max.iter, ") ", "was reached without convergence.\n", sep = "" ) } attr(x, "iterations") <- i attr(x, "control") <- list( em.iter.max = max.iter, em.fx.tol = fx.tol, em.dx.tol = dx.tol ) attr(fx, "fx.group") <- fx # single group for now attr(x, "fx") <- fx x } # get the random effects (here: expected values for cluster means) # and optionally a standard error lav_mvnorm_cluster_em_estep_ranef <- function(YLp = NULL, Lp = NULL, sigma.w = NULL, sigma.b = NULL, sigma.yz = NULL, sigma.zz = NULL, mu.z = NULL, mu.w = NULL, mu.b = NULL, se = FALSE) { # sample stats nobs <- Lp$nclusters[[1]] nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] between.idx <- Lp$between.idx[[2]] Y2 <- YLp[[2]]$Y2 nvar.y <- ncol(sigma.w) nvar.z <- ncol(sigma.zz) MB.j <- matrix(0, nrow = nclusters, ncol = nvar.y) SE.j <- matrix(0, nrow = nclusters, ncol = nvar.y) mu.y <- mu.w + mu.b if (length(between.idx) > 0L) { sigma.1 <- cbind(sigma.yz, sigma.b) mu <- c(mu.z, mu.y) } else { sigma.1 <- sigma.b mu <- mu.y } # E-step for (cl in seq_len(nclusters)) { nj <- cluster.size[cl] # data if (length(between.idx) > 0L) { # z comes first! b.j <- c( Y2[cl, between.idx], Y2[cl, -between.idx] ) ybar.j <- Y2[cl, -between.idx] } else { ybar.j <- b.j <- Y2[cl, ] } sigma.j <- sigma.w + nj * sigma.b if (length(between.idx) > 0L) { omega.j <- rbind( cbind(sigma.zz, t(sigma.yz)), cbind(sigma.yz, 1 / nj * sigma.j) ) } else { omega.j <- 1 / nj * sigma.j } omega.j.inv <- solve(omega.j) # E(v|y) Ev <- as.numeric(mu.b + (sigma.1 %*% omega.j.inv %*% (b.j - mu))) MB.j[cl, ] <- Ev if (se) { # Cov(v|y) Covv <- sigma.b - (sigma.1 %*% omega.j.inv %*% t(sigma.1)) # force symmetry Covv <- (Covv + t(Covv)) / 2 Covv.diag <- diag(Covv) nonzero.idx <- which(Covv.diag > 0) SE.j[cl, ] <- numeric(length(Covv.diag)) SE.j[cl, nonzero.idx] <- sqrt(Covv.diag[nonzero.idx]) } } if (se) { attr(MB.j, "se") <- SE.j } MB.j } # per cluster lav_mvnorm_cluster_em_estep <- function( # Y1 = NULL, YLp = NULL, Lp = NULL, sigma.w = NULL, sigma.b = NULL, sigma.yz = NULL, sigma.zz = NULL, mu.z = NULL, mu.w = NULL, mu.b = NULL) { # sample stats nobs <- Lp$nclusters[[1]] nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.idx <- Lp$cluster.idx[[2]] within.idx <- Lp$within.idx[[2]] between.idx <- Lp$between.idx[[2]] both.idx <- Lp$both.idx[[2]] Y2 <- YLp[[2]]$Y2 Y1Y1 <- YLp[[2]]$Y1Y1 nvar.y <- ncol(sigma.w) nvar.z <- ncol(sigma.zz) CW2.j <- matrix(0, nrow = nvar.y, ncol = nvar.y) CB.j <- matrix(0, nrow = nvar.y, ncol = nvar.y) MW.j <- matrix(0, nrow = nclusters, ncol = nvar.y) MB.j <- matrix(0, nrow = nclusters, ncol = nvar.y) ZY.j <- matrix(0, nrow = nvar.z, ncol = nvar.y) mu.y <- mu.w + mu.b if (length(between.idx) > 0L) { sigma.1 <- cbind(sigma.yz, sigma.b) mu <- c(mu.z, mu.y) Y1Y1 <- Y1Y1[-between.idx, -between.idx, drop = FALSE] } else { sigma.1 <- sigma.b mu <- mu.y } # E-step for (cl in seq_len(nclusters)) { nj <- cluster.size[cl] # data if (length(between.idx) > 0L) { # z comes first! b.j <- c( Y2[cl, between.idx], Y2[cl, -between.idx] ) ybar.j <- Y2[cl, -between.idx] } else { ybar.j <- b.j <- Y2[cl, ] } sigma.j <- sigma.w + nj * sigma.b if (length(between.idx) > 0L) { omega.j <- rbind( cbind(sigma.zz, t(sigma.yz)), cbind(sigma.yz, 1 / nj * sigma.j) ) } else { omega.j <- 1 / nj * sigma.j } omega.j.inv <- solve(omega.j) # E(v|y) Ev <- as.numeric(mu.b + (sigma.1 %*% omega.j.inv %*% (b.j - mu))) # Cov(v|y) Covv <- sigma.b - (sigma.1 %*% omega.j.inv %*% t(sigma.1)) # force symmetry Covv <- (Covv + t(Covv)) / 2 # E(vv|y) = Cov(v|y) + E(v|y)E(v|y)^T Evv <- Covv + tcrossprod(Ev) # store for this cluster MW.j[cl, ] <- ybar.j - Ev MB.j[cl, ] <- Ev CW2.j <- CW2.j + nj * (Evv - tcrossprod(ybar.j, Ev) - tcrossprod(Ev, ybar.j)) CB.j <- CB.j + Evv # between only if (length(between.idx) > 0L) { ZY.j <- ZY.j + tcrossprod(Y2[cl, between.idx], Ev) } } M.w <- 1 / nobs * colSums(MW.j * cluster.size) M.b <- 1 / nclusters * colSums(MB.j) C.b <- 1 / nclusters * CB.j C.w <- 1 / nobs * (Y1Y1 + CW2.j) # end of E-step # make symmetric (not needed here?) # C.b <- (C.b + t(C.b))/2 # C.w <- (C.w + t(C.w))/2 # between only if (length(between.idx) > 0L) { A <- 1 / nclusters * ZY.j - tcrossprod(mu.z, M.b) } sigma.w <- C.w - tcrossprod(M.w) sigma.b <- C.b - tcrossprod(M.b) mu.w <- M.w mu.b <- M.b if (length(between.idx) > 0L) { sigma.yz <- t(A) } list( sigma.w = sigma.w, sigma.b = sigma.b, mu.w = mu.w, mu.b = mu.b, sigma.yz = sigma.yz, sigma.zz = sigma.zz, mu.z = mu.z ) } # per cluster SIZE lav_mvnorm_cluster_em_estepb <- function( # Y1 = NULL, # not used! YLp = NULL, Lp = NULL, sigma.w = NULL, sigma.b = NULL, sigma.yz = NULL, sigma.zz = NULL, mu.z = NULL, mu.w = NULL, mu.b = NULL) { # sample stats nobs <- Lp$nclusters[[1]] nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.idx <- Lp$cluster.idx[[2]] between.idx <- Lp$between.idx[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] n.s <- Lp$cluster.size.ns[[2]] Y2 <- YLp[[2]]$Y2 Y1Y1 <- YLp[[2]]$Y1Y1 nvar.y <- ncol(sigma.w) nvar.z <- ncol(sigma.zz) mu.y <- mu.w + mu.b if (length(between.idx) > 0L) { sigma.1 <- cbind(sigma.yz, sigma.b) mu <- c(mu.z, mu.y) Y1Y1 <- Y1Y1[-between.idx, -between.idx, drop = FALSE] } else { sigma.1 <- sigma.b mu <- mu.y } # per cluster SIZE CW2.s <- matrix(0, nrow = nvar.y, ncol = nvar.y) CB.s <- matrix(0, nrow = nvar.y, ncol = nvar.y) MW.s <- matrix(0, nrow = ncluster.sizes, ncol = nvar.y) MB.s <- matrix(0, nrow = ncluster.sizes, ncol = nvar.y) ZY.s <- matrix(0, nvar.z, nvar.y) # E-step for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # data if (length(between.idx) > 0L) { # z comes first! b.j <- cbind( Y2[cluster.size == nj, between.idx, drop = FALSE], Y2[cluster.size == nj, -between.idx, drop = FALSE] ) ybar.j <- Y2[cluster.size == nj, -between.idx, drop = FALSE] } else { ybar.j <- b.j <- Y2[cluster.size == nj, , drop = FALSE] } sigma.j <- sigma.w + nj * sigma.b if (length(between.idx) > 0L) { omega.j <- rbind( cbind(sigma.zz, t(sigma.yz)), cbind(sigma.yz, 1 / nj * sigma.j) ) } else { omega.j <- 1 / nj * sigma.j } omega.j.inv <- solve(omega.j) sigma.1.j.inv <- sigma.1 %*% omega.j.inv # E(v|y) b.jc <- t(t(b.j) - mu) tmp <- b.jc %*% t(sigma.1.j.inv) Ev <- t(t(tmp) + mu.b) # Cov(v|y) Covv <- n.s[clz] * (sigma.b - (sigma.1.j.inv %*% t(sigma.1))) # force symmetry Covv <- (Covv + t(Covv)) / 2 # E(vv|y) = Cov(v|y) + E(v|y)E(v|y)^T Evv <- Covv + crossprod(Ev) # store for this cluster SIZE MW.s[clz, ] <- nj * colSums(ybar.j - Ev) MB.s[clz, ] <- colSums(Ev) CW2.s <- CW2.s + nj * (Evv - crossprod(ybar.j, Ev) - crossprod(Ev, ybar.j)) CB.s <- CB.s + Evv # between only if (length(between.idx) > 0L) { ZY.s <- ZY.s + crossprod(Y2[cluster.size == nj, between.idx, drop = FALSE ], Ev) } } # cluster-sizes M.ws <- 1 / nobs * colSums(MW.s) M.bs <- 1 / nclusters * colSums(MB.s) C.bs <- 1 / nclusters * CB.s C.ws <- 1 / nobs * (Y1Y1 + CW2.s) # between only if (length(between.idx) > 0L) { As <- 1 / nclusters * ZY.s - tcrossprod(mu.z, M.bs) } sigma.w <- C.ws - tcrossprod(M.ws) sigma.b <- C.bs - tcrossprod(M.bs) mu.w <- M.ws mu.b <- M.bs if (length(between.idx) > 0L) { sigma.yz <- t(As) } list( sigma.w = sigma.w, sigma.b = sigma.b, mu.w = mu.w, mu.b = mu.b, sigma.yz = sigma.yz, sigma.zz = sigma.zz, mu.z = mu.z ) } lavaan/R/lav_partable_labels.R0000644000176200001440000001402214627656440016033 0ustar liggesusers# generate labels for each parameter lav_partable_labels <- function(partable, blocks = c("group", "level"), group.equal = "", group.partial = "", type = "user") { # catch empty partable if (length(partable$lhs) == 0L) { return(character(0L)) } # default labels label <- paste(partable$lhs, partable$op, partable$rhs, sep = "") # handle multiple groups if ("group" %in% blocks) { if (is.character(partable$group)) { group.label <- unique(partable$group) group.label <- group.label[nchar(group.label) > 0L] ngroups <- length(group.label) } else { ngroups <- lav_partable_ngroups(partable) group.label <- 1:ngroups } if (ngroups > 1L) { for (g in 2:ngroups) { label[partable$group == group.label[g]] <- paste(label[partable$group == group.label[g]], ".g", g, sep = "" ) } } } else { ngroups <- 1L } # cat("DEBUG: label start:\n"); print(label); cat("\n") # cat("group.equal = ", group.equal, "\n") # cat("group.partial = ", group.partial, "\n") # use group.equal so that equal sets of parameters get the same label if (ngroups > 1L && length(group.equal) > 0L) { if ("intercepts" %in% group.equal || "residuals" %in% group.equal || "residual.covariances" %in% group.equal) { ov.names.nox <- vector("list", length = ngroups) for (g in 1:ngroups) { ov.names.nox[[g]] <- unique(unlist(lav_partable_vnames(partable, "ov.nox", group = g))) } } if ("thresholds" %in% group.equal) { ov.names.ord <- vector("list", length = ngroups) for (g in 1:ngroups) { ov.names.ord[[g]] <- unique(unlist(lav_partable_vnames(partable, "ov.ord", group = g))) } } if ("means" %in% group.equal || "lv.variances" %in% group.equal || "lv.covariances" %in% group.equal) { lv.names <- vector("list", length = ngroups) for (g in 1:ngroups) { lv.names[[g]] <- unique(unlist(lav_partable_vnames(partable, "lv", group = g))) } } # g1.flag: TRUE if included, FALSE if not g1.flag <- logical(length(partable$lhs)) # LOADINGS if ("loadings" %in% group.equal) { g1.flag[partable$op == "=~" & partable$group == 1L] <- TRUE } # COMPOSITE LOADINGS (new in 0.6-4) if ("composite.loadings" %in% group.equal) { g1.flag[partable$op == "<~" & partable$group == 1L] <- TRUE } # INTERCEPTS (OV) if ("intercepts" %in% group.equal) { g1.flag[partable$op == "~1" & partable$group == 1L & partable$lhs %in% ov.names.nox[[1L]]] <- TRUE } # THRESHOLDS (OV-ORD) if ("thresholds" %in% group.equal) { g1.flag[partable$op == "|" & partable$group == 1L & partable$lhs %in% ov.names.ord[[1L]]] <- TRUE } # MEANS (LV) if ("means" %in% group.equal) { g1.flag[partable$op == "~1" & partable$group == 1L & partable$lhs %in% lv.names[[1L]]] <- TRUE } # REGRESSIONS if ("regressions" %in% group.equal) { g1.flag[partable$op == "~" & partable$group == 1L] <- TRUE } # RESIDUAL variances (FIXME: OV ONLY!) if ("residuals" %in% group.equal) { g1.flag[partable$op == "~~" & partable$group == 1L & partable$lhs %in% ov.names.nox[[1L]] & partable$lhs == partable$rhs] <- TRUE } # RESIDUAL covariances (FIXME: OV ONLY!) if ("residual.covariances" %in% group.equal) { g1.flag[partable$op == "~~" & partable$group == 1L & partable$lhs %in% ov.names.nox[[1L]] & partable$lhs != partable$rhs] <- TRUE } # LV VARIANCES if ("lv.variances" %in% group.equal) { g1.flag[partable$op == "~~" & partable$group == 1L & partable$lhs %in% lv.names[[1L]] & partable$lhs == partable$rhs] <- TRUE } # LV COVARIANCES if ("lv.covariances" %in% group.equal) { g1.flag[partable$op == "~~" & partable$group == 1L & partable$lhs %in% lv.names[[1L]] & partable$lhs != partable$rhs] <- TRUE } # if group.partial, set corresponding flag to FALSE if (length(group.partial) > 0L) { g1.flag[label %in% group.partial & partable$group == 1L] <- FALSE } # for each (constrained) parameter in 'group 1', find a similar one # in the other groups (we assume here that the models need # NOT be the same across groups! g1.idx <- which(g1.flag) for (i in 1:length(g1.idx)) { ref.idx <- g1.idx[i] idx <- which(partable$lhs == partable$lhs[ref.idx] & partable$op == partable$op[ref.idx] & partable$rhs == partable$rhs[ref.idx] & partable$group > 1L) label[idx] <- label[ref.idx] } } # cat("DEBUG: g1.idx = ", g1.idx, "\n") # cat("DEBUG: label after group.equal:\n"); print(label); cat("\n") # handle other block identifier (not 'group') for (block in blocks) { if (block == "group") { next } else if (block == "level" && !is.null(partable[[block]])) { # all but first level lev_vals <- lav_partable_level_values(partable) idx <- which(partable[[block]] != lev_vals[1]) label[idx] <- paste(label[idx], ".", "l", partable[[block]][idx], sep = "" ) } else if (!is.null(partable[[block]])) { label <- paste(label, ".", block, partable[[block]], sep = "") } } # user-specified labels -- override everything!! user.idx <- which(nchar(partable$label) > 0L) label[user.idx] <- partable$label[user.idx] # cat("DEBUG: user.idx = ", user.idx, "\n") # cat("DEBUG: label after user.idx:\n"); print(label); cat("\n") # which labels do we need? if (type == "user") { idx <- 1:length(label) } else if (type == "free") { # idx <- which(partable$free > 0L & !duplicated(partable$free)) idx <- which(partable$free > 0L) # } else if(type == "unco") { # idx <- which(partable$unco > 0L & !duplicated(partable$unco)) } else { lav_msg_stop(gettext("argument `type' must be one of free or user")) } label[idx] } lavaan/R/lav_lavaan_step05_samplestats.R0000644000176200001440000000561414627656441020011 0ustar liggesuserslav_lavaan_step05_samplestats <- function(slotSampleStats = NULL, # nolint lavdata = NULL, lavoptions = NULL, WLS.V = NULL, # nolint NACOV = NULL, # nolint sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, ov.names = NULL, ov.names.x = NULL, lavpartable = NULL) { # # # # # # # # # # # # # # # # 5. lavsamplestats # # # # # # # # # # # # # # # # # if slotSampleStats not NULL # copy to lavsamplestats # else # if lavdata@data.type == "full" # compute lavsamplestats via lav_samplestats_from_data # else # if lavdata@data.type == "moment" # if lavoptions$meanstructure TRUE but sample.mean is NULL: # ** warning ** # compute lavsamplestats via lav_samplestats_from_moments # else # create lavsamplestats object (type lavSampleStats) with data from # lavdata and lavpta if (!is.null(slotSampleStats)) { lavsamplestats <- slotSampleStats } else if (lavdata@data.type == "full") { if (lav_verbose()) { cat("lavsamplestats ...") } lavsamplestats <- lav_samplestats_from_data( lavdata = lavdata, lavoptions = lavoptions, WLS.V = WLS.V, NACOV = NACOV ) if (lav_verbose()) { cat(" done.\n") } } else if (lavdata@data.type == "moment") { if (lav_verbose()) { cat("lavsamplestats ...") } # check if we have sample.mean and meanstructure = TRUE if (lavoptions$meanstructure && is.null(sample.mean)) { lav_msg_warn( gettext("sample.mean= argument is missing, but model contains mean/intercept parameters.")) } lavsamplestats <- lav_samplestats_from_moments( sample.cov = sample.cov, sample.mean = sample.mean, sample.th = sample.th, sample.nobs = sample.nobs, ov.names = ov.names, ov.names.x = ov.names.x, WLS.V = WLS.V, NACOV = NACOV, lavoptions = lavoptions ) if (lav_verbose()) { cat(" done.\n") } } else { # no data lavsamplestats <- new("lavSampleStats", ngroups = lavdata@ngroups, nobs = as.list(rep(0L, lavdata@ngroups)), cov.x = vector("list", length = lavdata@ngroups), mean.x = vector("list", length = lavdata@ngroups), th.idx = attr(lavpartable, "th.idx"), missing.flag = FALSE ) } if (lav_debug()) { print(str(lavsamplestats)) } lavsamplestats } lavaan/R/lav_norm.R0000644000176200001440000000462314627656441013701 0ustar liggesusers# simple derivatives of the normal distribution # dnorm dnorm_dummy <- function(y, mu = 0, sigma2 = 1) { sigma <- sqrt(sigma2) 1 / (sigma * sqrt(2 * pi)) * exp(-0.5 * ((y - mu) / sigma * (y - mu) / sigma)) } # dnorm_dmu_x <- function(x, y, sigma2 = 1) { # dnorm_dummy(y = y, mu = x, sigma2 = sigma2) # } # numDeriv:::grad(func=dnorm_dmu_x, x=0.3, y=2.3, sigma2=16) # partial derivative - mu dnorm_dmu <- function(y, mu = 0, sigma2 = 1) { dy <- dnorm(x = y, mean = mu, sd = sqrt(sigma2)) (y - mu) / sigma2 * dy } # dnorm_dsigma2_x <- function(x, y, mu = 0) { # dnorm_dummy(y = y, mu = mu, sigma2 = x) # } # numDeriv:::grad(func=dnorm_dsigma2_x, x=16, y=2.3, mu=0.3) # partial derivative - sigma2 dnorm_dsigma2 <- function(y, mu = 0, sigma2 = 1) { dy <- dnorm(x = y, mean = mu, sd = sqrt(sigma2)) (1 / (2 * sigma2 * sigma2) * (y - mu) * (y - mu) - 1 / (2 * sigma2)) * dy } # dnorm_dy_x <- function(x, mu = 0, sigma2 = 1) { # dnorm_dummy(y = x, mu = mu, sigma2 = sigma2) # } # numDeriv:::grad(func=dnorm_dy_x, x=2.3, mu=0.3, sigma2=16) # partial derivative - y dnorm_dy <- function(y, mu = 0, sigma2 = 1) { dy <- dnorm(x = y, mean = mu, sd = sqrt(sigma2)) -(y - mu) / sigma2 * dy } #### d log dnorm #### # # d log dnorm() / d theta = 1/dy d dnorm() / d theta dlogdnorm <- function(y, mu = 0, sigma2 = 1) { sigma <- sqrt(sigma2) -log(sigma * sqrt(2 * pi)) + (-0.5 * ((y - mu) / sigma * (y - mu) / sigma)) } # dlogdnorm_dmu_x <- function(x, y, sigma2 = 1) { # dlogdnorm(y = y, mu = x, sigma2 = sigma2) # } # numDeriv:::grad(func=dlogdnorm_dmu_x, x=0.3, y=2.3, sigma2=16) # partial derivative - mu dlogdnorm_dmu <- function(y, mu = 0, sigma2 = 1) { (y - mu) / sigma2 } # dlogdnorm_dmu(y = 2.3, mu = 0.3, sigma2 = 16) # dlogdnorm_dsigma2_x <- function(x, y, mu = 0) { # dlogdnorm(y = y, mu = mu, sigma2 = x) # } # numDeriv:::grad(func=dlogdnorm_dsigma2_x, x=16, y=2.3, mu=0.3) # partial derivative - sigma2 dlogdnorm_dsigma2 <- function(y, mu = 0, sigma2 = 1) { 1 / (2 * sigma2 * sigma2) * (y - mu) * (y - mu) - 1 / (2 * sigma2) } # dlogdnorm_dsigma2(y = 2.3, mu = 0.3, sigma2 = 16) # dlogdnorm_dy_x <- function(x, mu = 0, sigma2 = 1) { # dlogdnorm(y = x, mu = mu, sigma2 = sigma2) # } # numDeriv:::grad(func=dlogdnorm_dy_x, x=2.3, mu=0.3, sigma2=16) # partial derivative - y dlogdnorm_dy <- function(y, mu = 0, sigma2 = 1) { -(y - mu) / sigma2 } # dlogdnorm_dy(y = 2.3, mu = 0.3, sigma2 = 16) lavaan/R/ldw_trace.R0000644000176200001440000000617114627656441014030 0ustar liggesusers# create (if not already created) an environment to put cached objects in # this is executed when the package is 'compiled' ! if (!exists("lavaan_cache_env")) lavaan_cache_env <- new.env(parent = emptyenv()) # tracing possibility in functions defined below, an example of use : # # in the function where you want to trace add a line # ldw_trace(x) # where x is a characterstring you want to show in the trace # # thereafter execute a script like this: # library(lavaan) # lavaan:::set_trace(TRUE) # model <- ' # # latent variable definitions # ind60 =~ x1 + x2 + x3 # dem60 =~ y1 + a*y2 + b*y3 + c*y4 # dem65 =~ y5 + a*y6 + b*y7 + c*y8 # # # regressions # dem60 ~ ind60 # dem65 ~ ind60 + dem60 # # # residual correlations # y1 ~~ y5 # y2 ~~ y4 + y6 # y3 ~~ y7 # y4 ~~ y8 # y6 ~~ y8 # ' # fit <- sem(model, data = PoliticalDemocracy) # summary(fit) # lavaan:::set_trace(FALSE) # lavaan:::print_trace("PolDem_trace.txt") # ldw_trace <- function(content = "") { ignore.in.stack <- c( "eval", "try", "tryCatch", "tryCatchList", "tryCatchOne", "doTryCatch", "which", "unique", "as.list", "as.character", "unlist", "ldw_trace", "source", "withVisible", "tryCatch.W.E", "withCallingHandlers", "do.call" ) if (!exists("TRACE", lavaan_cache_env)) { return(invisible(NULL)) } if (!exists("TRACENR", lavaan_cache_env)) assign("TRACENR", 1L, lavaan_cache_env) tracenr <- get("TRACENR", lavaan_cache_env) x <- sub("[() ].*$", "", as.character(sys.calls())) if (length(x) == 0) { return(invisible(NULL)) } a <- paste0("trc", formatC(tracenr, format = "d", width = 5, flag = "0")) x <- x[!(x %in% ignore.in.stack)] if (length(x) > 0) { assign(a, list(stack = x, content = content, time = Sys.time()), lavaan_cache_env) assign("TRACENR", tracenr + 1L, lavaan_cache_env) } invisible(NULL) } set_trace <- function(state = NULL, silent = FALSE) { traceon <- exists("TRACE", lavaan_cache_env) msg <- "" if (is.null(state)) { rm(list = ls(lavaan_cache_env, pattern = "^trc"), envir = lavaan_cache_env) if (exists("TRACENR", lavaan_cache_env)) rm("TRACENR", envir = lavaan_cache_env) msg <- "Traces removed." } else if (state) { if (traceon) { msg <- "Trace already active!" } else { assign("TRACE", TRUE, lavaan_cache_env) msg <- "Trace on." } } else { if (traceon) { rm("TRACE", envir = lavaan_cache_env) msg <- "Trace off." } else { msg <- "Trace not active!" } } if (!silent) cat(msg, "\n", sep = "") invisible(NULL) } get_trace <- function() { traceobjects <- ls(lavaan_cache_env, pattern = "^trc") if (length(traceobjects) == 0) { return(list()) } x <- mget(traceobjects, envir = lavaan_cache_env) x <- x[order(names(x))] x } print_trace <- function(file = "", clean_after = (file != "")) { cat("Trace print on ", format(Sys.time(), format = "%F"), "\n\n", file = file) x <- get_trace() for (x1 in x) { cat(format(x1$time, format = "%T"), paste(x1$stack, collapse = ">"), ":", x1$content, "\n", sep = " ", file = file, append = TRUE ) } if (clean_after) set_trace(NULL, TRUE) } lavaan/R/lav_model_hessian.R0000644000176200001440000001077414627656441015544 0ustar liggesusers# numeric approximation of the Hessian # using an analytic gradient lav_model_hessian <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavcache = NULL, group.weight = TRUE, ceq.simple = FALSE, h = 1e-06) { estimator <- lavmodel@estimator # catch numerical gradient if (lavoptions$optim.gradient == "numerical") { obj.f <- function(x) { lavmodel2 <- lav_model_set_parameters(lavmodel, x = x) lav_model_objective( lavmodel = lavmodel2, lavsamplestats = lavsamplestats, lavdata = lavdata )[1] } x <- lav_model_get_parameters(lavmodel = lavmodel) Hessian <- numDeriv::hessian(func = obj.f, x = x) return(Hessian) } # computing the Richardson extrapolation if (!ceq.simple && .hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { npar <- lavmodel@nx.unco type.glist <- "unco" } else { npar <- lavmodel@nx.free type.glist <- "free" } Hessian <- matrix(0, npar, npar) x <- lav_model_get_parameters(lavmodel = lavmodel) if (!ceq.simple && .hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { # unpack x <- drop(x %*% t(lavmodel@ceq.simple.K)) } for (j in seq_len(npar)) { # FIXME: the number below should vary as a function of 'x[j]' h.j <- h x.left <- x.left2 <- x.right <- x.right2 <- x x.left[j] <- x[j] - h.j x.left2[j] <- x[j] - 2 * h.j x.right[j] <- x[j] + h.j x.right2[j] <- x[j] + 2 * h.j g.left <- lav_model_gradient( lavmodel = lavmodel, GLIST = lav_model_x2GLIST( lavmodel = lavmodel, type = type.glist, x.left ), lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight, ceq.simple = ceq.simple ) g.left2 <- lav_model_gradient( lavmodel = lavmodel, GLIST = lav_model_x2GLIST( lavmodel = lavmodel, type = type.glist, x.left2 ), lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight, ceq.simple = ceq.simple ) g.right <- lav_model_gradient( lavmodel = lavmodel, GLIST = lav_model_x2GLIST( lavmodel = lavmodel, type = type.glist, x.right ), lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight, ceq.simple = ceq.simple ) g.right2 <- lav_model_gradient( lavmodel = lavmodel, GLIST = lav_model_x2GLIST( lavmodel = lavmodel, type = type.glist, x.right2 ), lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight, ceq.simple = ceq.simple ) Hessian[, j] <- (g.left2 - 8 * g.left + 8 * g.right - g.right2) / (12 * h.j) } # check if Hessian is (almost) symmetric, as it should be max.diff <- max(abs(Hessian - t(Hessian))) if (max.diff > 1e-05 * max(diag(Hessian))) { # hm, Hessian is not symmetric -> WARNING! lav_msg_warn(gettextf( "Hessian is not fully symmetric. Max diff = %1$s (Max diag Hessian = %2$s)", max.diff, max(diag(Hessian)))) # FIXME: use numDeriv::hessian instead? } Hessian <- (Hessian + t(Hessian)) / 2.0 Hessian } # if only chol would accept a complex matrix... lav_model_hessian_complex <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, group.weight = TRUE) { gradf <- function(x) { GLIST <- lav_model_x2GLIST(lavmodel = lavmodel, x = x) dx <- lav_model_gradient( lavmodel = lavmodel, GLIST = GLIST, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight ) dx } x <- lav_model_get_parameters(lavmodel = lavmodel) Hessian <- lav_func_jacobian_complex(func = gradf, x = x) Hessian } lavaan/R/lav_model_vcov.R0000644000176200001440000005464414627656441015073 0ustar liggesusers# bootstrap based NVCOV lav_model_nvcov_bootstrap <- function(lavmodel = NULL, lavsamplestats = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, lavdata = NULL, lavcache = NULL, lavpartable = NULL) { # number of bootstrap draws if (!is.null(lavoptions$bootstrap)) { R <- lavoptions$bootstrap } else { R <- 1000L } boot.type <- "ordinary" if ("bollen.stine" %in% lavoptions$test) { boot.type <- "bollen.stine" } TEST <- NULL COEF <- lav_bootstrap_internal( object = NULL, lavmodel. = lavmodel, lavsamplestats. = lavsamplestats, lavpartable. = lavpartable, lavoptions. = lavoptions, lavdata. = lavdata, R = R, check.post = lavoptions$check.post, type = boot.type, FUN = ifelse(boot.type == "bollen.stine", "coeftest", "coef" ) ) # warn = -1L) COEF.orig <- COEF # new in 0.6-12: always warn for failed and nonadmissible error.idx <- attr(COEF, "error.idx") nfailed <- length(error.idx) # zero if NULL if (nfailed > 0L) { lav_msg_warn(gettextf( "%s bootstrap runs failed or did not converge.", nfailed)) } notok <- length(attr(COEF, "nonadmissible")) # zero if NULL if (notok > 0L) { lav_msg_warn(gettextf( "%s bootstrap runs resulted in nonadmissible solutions.", notok)) } if (length(error.idx) > 0L) { # new in 0.6-13: we must still remove them! COEF <- COEF[-error.idx, , drop = FALSE] # this also drops the attributes } if (boot.type == "bollen.stine") { nc <- ncol(COEF) TEST <- COEF[, nc] COEF <- COEF[, -nc, drop = FALSE] } # FIXME: cov rescale? Yes for now nboot <- nrow(COEF) NVarCov <- lavsamplestats@ntotal * (cov(COEF) * (nboot - 1) / nboot) # save COEF and TEST (if any) attr(NVarCov, "BOOT.COEF") <- COEF.orig # including attributes attr(NVarCov, "BOOT.TEST") <- TEST NVarCov } # robust `sem' NVCOV (see Browne, 1984, bentler & dijkstra 1985) lav_model_nvcov_robust_sem <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, lavimplied = NULL, lavh1 = NULL, lavoptions = NULL, use.ginv = FALSE) { # compute inverse of the expected(!) information matrix if (lavmodel@estimator == "ML" && lavoptions$mimic == "Mplus") { # YR - 11 aug 2010 - what Mplus seems to do is (see Muthen apx 4 eq102) # - A1 is not based on Sigma.hat and Mu.hat, # but on lavsamplestats@cov and lavsamplestats@mean... ('unstructured') # - Gamma is not identical to what is used for WLS; closer to EQS # - N/N-1 bug in G11 for NVarCov (but not test statistic) # - we divide by N-1! (just like EQS) E.inv <- lav_model_information_expected_MLM( lavmodel = lavmodel, lavsamplestats = lavsamplestats, extra = TRUE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) } else { E.inv <- lav_model_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, extra = TRUE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) } # check if E.inv is ok if (inherits(E.inv, "try-error")) { return(E.inv) } Delta <- attr(E.inv, "Delta") WLS.V <- attr(E.inv, "WLS.V") # Gamma Gamma <- lavsamplestats@NACOV if (lavmodel@estimator == "ML" && lavoptions$mimic == "Mplus" && !lavsamplestats@NACOV.user) { # 'fix' G11 part of Gamma (NOTE: this is NOT needed for SB test # statistic for (g in 1:lavsamplestats@ngroups) { gg1 <- (lavsamplestats@nobs[[g]] - 1) / lavsamplestats@nobs[[g]] if (lavmodel@conditional.x) { nvar <- NCOL(lavsamplestats@res.cov[[g]]) } else { nvar <- NCOL(lavsamplestats@cov[[g]]) } G11 <- Gamma[[g]][1:nvar, 1:nvar, drop = FALSE] Gamma[[g]][1:nvar, 1:nvar] <- G11 * gg1 } # g } tDVGVD <- matrix(0, ncol = ncol(E.inv), nrow = nrow(E.inv)) for (g in 1:lavsamplestats@ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal if (lavoptions$mimic == "Mplus") { fg1 <- (lavsamplestats@nobs[[g]] - 1) / lavsamplestats@ntotal } else { # from 0.6 onwards, we use fg1 == fg, to be more consistent with # lav_test() fg1 <- fg } # fg twice for WLS.V, 1/fg1 once for GaMMA # if fg==fg1, there would be only one fg, as in Satorra 1999 p.8 # t(Delta) * WLS.V %*% Gamma %*% WLS.V %*% Delta if (lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { # diagonal weight matrix WD <- WLS.V[[g]] * Delta[[g]] } else { # full weight matrix WD <- WLS.V[[g]] %*% Delta[[g]] } tDVGVD <- tDVGVD + fg * fg / fg1 * crossprod(WD, Gamma[[g]] %*% WD) } # g NVarCov <- (E.inv %*% tDVGVD %*% E.inv) # to be reused by lav_test() attr(NVarCov, "Delta") <- Delta if ((lavoptions$information[1] == lavoptions$information[2]) && (lavoptions$h1.information[1] == lavoptions$h1.information[2]) && (lavoptions$information[2] == "expected" || lavoptions$observed.information[1] == lavoptions$observed.information[2])) { # only when same type of information is used # new in 0.6-6 attr(NVarCov, "E.inv") <- E.inv attr(NVarCov, "WLS.V") <- WLS.V } NVarCov } lav_model_nvcov_robust_sandwich <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, use.ginv = FALSE) { # sandwich estimator: A.inv %*% B %*% t(A.inv) # where A.inv == E.inv # B == outer product of case-wise scores # inverse observed/expected information matrix E.inv <- lav_model_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, extra = FALSE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) # check if E.inv is ok if (inherits(E.inv, "try-error")) { return(E.inv) } # new in 0.6-6, check for h1.information.meat lavoptions2 <- lavoptions if (!is.null(lavoptions$information.meat)) { lavoptions2$information <- lavoptions$information.meat } if (!is.null(lavoptions$h1.information.meat)) { lavoptions2$h1.information <- lavoptions$h1.information.meat } # outer product of case-wise scores B0 <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions2, extra = TRUE, check.pd = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = use.ginv ) # compute sandwich estimator NVarCov <- E.inv %*% B0 %*% E.inv attr(NVarCov, "B0.group") <- attr(B0, "B0.group") if ((lavoptions$information[1] == lavoptions$information[2]) && (lavoptions$h1.information[1] == lavoptions$h1.information[2]) && (lavoptions$information[2] == "expected" || lavoptions$observed.information[1] == lavoptions$observed.information[2])) { # only when same type of information is used # new in 0.6-6 attr(NVarCov, "E.inv") <- E.inv } NVarCov } # two stage # - two.stage: Gamma = I_1^{-1} # - robust.two.stage: Gamma = incomplete Gamma (I_1^{-1} J_1 I_1^{-1}) # where I_1 and J_1 are based on the (saturated) model h1 # (either unstructured, or structured) # # references: # # - Savalei \& Bentler (2009) eq (6) for se = "two.stage" # - Savalei \& Falk (2014) eq (3) for se = "robust.two.stage" # - Yuan \& Bentler (2000) lav_model_nvcov_two_stage <- function(lavmodel = NULL, lavsamplestats = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, lavdata = NULL, use.ginv = FALSE) { # expected OR observed, depending on lavoptions$information if (is.null(lavoptions) && is.null(lavoptions$information[1])) { lavoptions <- list( information = "observed", observed.information = "h1", h1.information = "structured" ) } # restrictions: # only works if: # - information is expected, # - or information is observed but with observed.information == "h1" if (lavoptions$information[1] == "observed" && lavoptions$observed.information[1] != "h1") { lav_msg_stop( gettext("two.stage + observed information currently only works with observed.information = 'h1'")) } # no weights (yet) if (!is.null(lavdata@weights[[1]])) { lav_msg_stop(gettext("two.stage + sampling.weights is not supported yet")) } # no fixed.x (yet) # if(!is.null(lavsamplestats@x.idx) && # length(lavsamplestats@x.idx[[1]]) > 0L) { # lav_msg_stop(gettext("two.stage + fixed.x = TRUE is not supported yet")) # } # information matrix E.inv <- lav_model_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavimplied = lavimplied, lavh1 = lavh1, extra = TRUE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) Delta <- attr(E.inv, "Delta") WLS.V <- attr(E.inv, "WLS.V") # this is 'H' or 'A1' in the literature attr(E.inv, "Delta") <- NULL attr(E.inv, "WLS.V") <- NULL # check if E.inv is ok if (inherits(E.inv, "try-error")) { return(E.inv) } # check WLS.V = A1 if (is.null(WLS.V)) { lav_msg_stop(gettext("WLS.V/H/A1 is NULL, observed.information = hessian?")) } # Gamma Gamma <- vector("list", length = lavsamplestats@ngroups) # handle multiple groups tDVGVD <- matrix(0, ncol = ncol(E.inv), nrow = nrow(E.inv)) for (g in 1:lavsamplestats@ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal # fg1 <- (lavsamplestats@nobs[[g]]-1)/lavsamplestats@ntotal fg1 <- fg # fg twice for WLS.V, 1/fg1 once for GaMMA # if fg==fg1, there would be only one fg, as in Satorra 1999 p.8 # t(Delta) * WLS.V %*% Gamma %*% WLS.V %*% Delta WD <- WLS.V[[g]] %*% Delta[[g]] # to compute (incomplete) GAMMA, should we use # structured or unstructured mean/sigma? # # we use the same setting as to compute 'H' (the h1 information matrix) # so that at Omega = H if data is complete if (lavoptions$h1.information[1] == "unstructured") { MU <- lavsamplestats@missing.h1[[g]]$mu SIGMA <- lavsamplestats@missing.h1[[g]]$sigma } else { MU <- lavimplied$mean[[g]] SIGMA <- lavimplied$cov[[g]] } # compute 'Gamma' (or Omega.beta) if (lavoptions$se == "two.stage") { # this is Savalei & Bentler (2009) if (lavoptions$information[1] == "expected") { Info <- lav_mvnorm_missing_information_expected( Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], wt = lavdata@weights[[g]], Mu = MU, Sigma = SIGMA, x.idx = lavsamplestats@x.idx[[g]] ) } else { Info <- lav_mvnorm_missing_information_observed_samplestats( Yp = lavsamplestats@missing[[g]], # wt not needed Mu = MU, Sigma = SIGMA, x.idx = lavsamplestats@x.idx[[g]] ) } Gamma[[g]] <- lav_matrix_symmetric_inverse(Info) } else { # we assume "robust.two.stage" # NACOV is here incomplete Gamma # Savalei & Falk (2014) # if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } Gamma[[g]] <- lav_mvnorm_missing_h1_omega_sw( Y = lavdata@X[[g]], Mp = lavdata@Mp[[g]], Yp = lavsamplestats@missing[[g]], wt = lavdata@weights[[g]], cluster.idx = cluster.idx, Mu = MU, Sigma = SIGMA, x.idx = lavsamplestats@x.idx[[g]], information = lavoptions$information[1] ) } # compute tDVGVD <- tDVGVD + fg * fg / fg1 * crossprod(WD, Gamma[[g]] %*% WD) } # g NVarCov <- (E.inv %*% tDVGVD %*% E.inv) # to be reused by lavaanTest attr(NVarCov, "Delta") <- Delta attr(NVarCov, "Gamma") <- Gamma if ((lavoptions$information[1] == lavoptions$information[2]) && (lavoptions$h1.information[1] == lavoptions$h1.information[2]) && (lavoptions$information[2] == "expected" || lavoptions$observed.information[1] == lavoptions$observed.information[2])) { # only when same type of information is used # new in 0.6-6 attr(NVarCov, "E.inv") <- E.inv attr(NVarCov, "WLS.V") <- WLS.V } NVarCov } lav_model_vcov <- function(lavmodel = NULL, lavsamplestats = NULL, lavoptions = NULL, lavdata = NULL, lavpartable = NULL, lavcache = NULL, lavimplied = NULL, lavh1 = NULL, use.ginv = FALSE) { likelihood <- lavoptions$likelihood information <- lavoptions$information[1] # first one is for vcov se <- lavoptions$se mimic <- lavoptions$mimic # special cases if (se == "none" || se == "external" || se == "twostep") { return(matrix(0, 0, 0)) } if (se == "standard") { NVarCov <- lav_model_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, extra = FALSE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) } else if (se == "first.order") { NVarCov <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, extra = TRUE, check.pd = FALSE, augmented = TRUE, inverted = TRUE, use.ginv = use.ginv ) } else if (se == "robust.sem" || se == "robust.cluster.sem") { NVarCov <- lav_model_nvcov_robust_sem( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavcache = lavcache, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, use.ginv = use.ginv ) } else if (se == "robust.huber.white" || se == "robust.cluster") { NVarCov <- lav_model_nvcov_robust_sandwich( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, use.ginv = use.ginv ) } else if (se %in% c("two.stage", "robust.two.stage")) { NVarCov <- lav_model_nvcov_two_stage( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, use.ginv = use.ginv ) } else if (se == "bootstrap") { NVarCov <- try( lav_model_nvcov_bootstrap( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavpartable = lavpartable ), silent = TRUE ) } else { lav_msg_warn(gettextf("unknown se type: %s", se)) } if (!inherits(NVarCov, "try-error")) { # denominator! if (lavmodel@estimator %in% c("ML", "PML", "FML") && likelihood == "normal") { if (lavdata@nlevels == 1L) { N <- lavsamplestats@ntotal # new in 0.6-9 (to mimic method="lm" in effectLite) # special case: univariate regression in each group if (lavoptions$mimic == "lm" && .hasSlot(lavmodel, "modprop") && all(lavmodel@modprop$uvreg)) { N <- sum(unlist(lavsamplestats@nobs) - (unlist(lavmodel@modprop$nexo) + 1L)) # always adding the intercept (for now) } } else { # total number of clusters (over groups) N <- 0 for (g in 1:lavsamplestats@ngroups) { N <- N + lavdata@Lp[[g]]$nclusters[[2]] } } } else { N <- lavsamplestats@ntotal - lavsamplestats@ngroups } VarCov <- 1 / N * NVarCov # check if VarCov is pd -- new in 0.6-2 # mostly important if we have (in)equality constraints (MASS::ginv!) if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { # do nothing } else if (!is.null(lavoptions$check.vcov) && lavoptions$check.vcov) { eigvals <- eigen(VarCov, symmetric = TRUE, only.values = TRUE )$values # correct for (in)equality constraints neq <- 0L niq <- 0L if (nrow(lavmodel@con.jac) > 0L) { ceq.idx <- attr(lavmodel@con.jac, "ceq.idx") cin.idx <- attr(lavmodel@con.jac, "cin.idx") ina.idx <- attr(lavmodel@con.jac, "inactive.idx") if (length(ceq.idx) > 0L) { neq <- qr(lavmodel@con.jac[ceq.idx, , drop = FALSE])$rank } if (length(cin.idx) > 0L) { niq <- length(cin.idx) - length(ina.idx) # only active } # total number of relevant constraints neiq <- neq + niq if (neiq > 0L) { eigvals <- rev(eigvals)[-seq_len(neiq)] } } min.val <- min(eigvals) # if(any(eigvals < -1 * sqrt(.Machine$double.eps)) && if (min.val < .Machine$double.eps^(3 / 4)) { # VarCov.chol <- suppressWarnings(try(chol(VarCov, # pivot = TRUE), silent = TRUE)) # VarCov.rank <- attr(VarCov.chol, "rank") # VarCov.pivot <- attr(VarCov.chol, "pivot") # VarCov.badidx <- VarCov.pivot[ VarCov.rank + 1L ] # pt.idx <- which(lavpartable$free == VarCov.badidx) # par.string <- paste(lavpartable$lhs[pt.idx], # lavpartable$op[ pt.idx], # lavpartable$rhs[pt.idx]) # if(lavdata@ngroups > 1L) { # par.string <- paste0(par.string, " in group ", # lavpartable$group[pt.idx]) # } # if(lavdata@nlevels > 1L) { # par.string <- paste0(par.string, " in level ", # lavpartable$level[pt.idx]) # } if (min.val > 0) { lav_msg_warn( gettextf("The variance-covariance matrix of the estimated parameters (vcov) does not appear to be positive definite! The smallest eigenvalue (= %e) is close to zero. This may be a symptom that the model is not identified.", min(min.val))) } else { lav_msg_warn( gettextf("The variance-covariance matrix of the estimated parameters (vcov) does not appear to be positive definite! The smallest eigenvalue (= %e) is smaller than zero. This may be a symptom that the model is not identified.", min(min.val))) } } } } else { lav_msg_warn( gettext("Could not compute standard errors! The information matrix could not be inverted. This may be a symptom that the model is not identified.") ) VarCov <- NULL } # could not invert VarCov } lav_model_vcov_se <- function(lavmodel, lavpartable, VCOV = NULL, BOOT = NULL) { # 0. special case if (is.null(VCOV)) { se <- rep(as.numeric(NA), lavmodel@nx.user) se[lavpartable$free == 0L] <- 0.0 return(se) } # 1. free parameters only x.var <- diag(VCOV) # check for negative values (what to do: NA or 0.0?) x.var[x.var < 0] <- as.numeric(NA) x.se <- sqrt(x.var) if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { GLIST <- lav_model_x2GLIST( lavmodel = lavmodel, x = x.se, type = "unco" ) } else { GLIST <- lav_model_x2GLIST( lavmodel = lavmodel, x = x.se, type = "free" ) } # se for full parameter table, but with 0.0 entries for def/ceq/cin # elements se <- lav_model_get_parameters( lavmodel = lavmodel, GLIST = GLIST, type = "user", extra = FALSE ) # 2. fixed parameters -> se = 0.0 se[which(lavpartable$free == 0L)] <- 0.0 # 3. defined parameters: def.idx <- which(lavpartable$op == ":=") if (length(def.idx) > 0L) { if (!is.null(BOOT)) { # we must remove the NA rows (and hope we have something left) error.idx <- attr(BOOT, "error.idx") if (length(error.idx) > 0L) { BOOT <- BOOT[-error.idx, , drop = FALSE] # drops attributes } BOOT.def <- apply(BOOT, 1L, lavmodel@def.function) if (length(def.idx) == 1L) { BOOT.def <- as.matrix(BOOT.def) } else { BOOT.def <- t(BOOT.def) } def.cov <- cov(BOOT.def) } else { # regular delta method x <- lav_model_get_parameters(lavmodel = lavmodel, type = "free") JAC <- try(lav_func_jacobian_complex(func = lavmodel@def.function, x = x), silent = TRUE ) if (inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lav_func_jacobian_simple(func = lavmodel@def.function, x = x) } if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { JAC <- JAC %*% t(lavmodel@ceq.simple.K) } def.cov <- JAC %*% VCOV %*% t(JAC) } # check for negative se's diag.def.cov <- diag(def.cov) diag.def.cov[diag.def.cov < 0] <- as.numeric(NA) se[def.idx] <- sqrt(diag.def.cov) } se } lavaan/R/lav_partable_attributes.R0000644000176200001440000000364414627656441016770 0ustar liggesusers# return 'attributes' of a lavaan partable -- generate a new set if necessary lav_partable_attributes <- function(partable, pta = NULL) { if (is.null(pta)) { # attached to partable? pta <- attributes(partable) if (!is.null(pta$vnames) && !is.null(pta$nvar)) { # looks like a pta pta$ovda <- NULL return(pta) } else { pta <- list() } } # vnames pta$vnames <- lav_partable_vnames(partable, type = "*") # vidx tmp.ov <- pta$vnames$ov tmp.lv <- pta$vnames$lv nblocks <- length(pta$vnames$ov) pta$vidx <- lapply(names(pta$vnames), function(v) { lapply(seq_len(nblocks), function(b) { if (v == "lv.marker") { match(pta$vnames[[v]][[b]], tmp.ov[[b]]) } else if (grepl("lv", v)) { match(pta$vnames[[v]][[b]], tmp.lv[[b]]) } else if (grepl("th", v)) { # thresholds have '|t' pattern tmp.th <- sapply(strsplit(pta$vnames[[v]][[b]], "|t", fixed = TRUE ), "[[", 1L) match(tmp.th, tmp.ov[[b]]) } else if (grepl("eqs", v)) { # mixture of tmp.ov/tmp.lv integer(0L) } else { match(pta$vnames[[v]][[b]], tmp.ov[[b]]) } }) }) names(pta$vidx) <- names(pta$vnames) # meanstructure pta$meanstructure <- any(partable$op == "~1") # nblocks pta$nblocks <- nblocks # ngroups pta$ngroups <- lav_partable_ngroups(partable) # nlevels pta$nlevels <- lav_partable_nlevels(partable) # nvar pta$nvar <- lapply(pta$vnames$ov, length) # nfac pta$nfac <- lapply(pta$vnames$lv, length) # nfac.nonnormal - for numerical integration pta$nfac.nonnormal <- lapply(pta$vnames$lv.nonnormal, length) # th.idx (new in 0.6-1) pta$th.idx <- lapply(seq_len(pta$nblocks), function(b) { out <- numeric(length(pta$vnames$th.mean[[b]])) idx <- (pta$vnames$th.mean[[b]] %in% pta$vnames$th[[b]]) out[idx] <- pta$vidx$th[[b]] out }) pta } lavaan/R/lav_samplestats_igamma.R0000644000176200001440000000761214627656441016602 0ustar liggesusers# YR 18 Dec 2015 # - functions to (directly) compute the inverse of 'Gamma' (the asymptotic # variance matrix of the sample statistics) # - often used as 'WLS.V' (the weight matrix in WLS estimation) # and when computing the expected information matrix # NOTE: # - three types: # 1) plain (conditional.x = FALSE, fixed.x = FALSE) # 2) fixed.x (conditional.x = FALSE, fixed.x = TRUE) # 3) conditional.x (conditional.x = TRUE) # - if conditional.x = TRUE, we ignore fixed.x (can be TRUE or FALSE) # NORMAL-THEORY lav_samplestats_Gamma_inverse_NT <- function(Y = NULL, COV = NULL, ICOV = NULL, MEAN = NULL, rescale = TRUE, x.idx = integer(0L), fixed.x = FALSE, conditional.x = FALSE, meanstructure = FALSE, slopestructure = FALSE) { # check arguments if (length(x.idx) == 0L) { conditional.x <- FALSE fixed.x <- FALSE } if (is.null(ICOV)) { if (is.null(COV)) { stopifnot(!is.null(Y)) # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) COV <- cov(Y) if (rescale) { COV <- COV * (N - 1) / N # ML version } } ICOV <- solve(COV) } # if conditional.x, we may also need COV and MEAN if (conditional.x && length(x.idx) > 0L && (meanstructure || slopestructure)) { if (is.null(COV)) { stopifnot(!is.null(Y)) # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) COV <- cov(Y) if (rescale) { COV <- COV * (N - 1) / N # ML version } } if (is.null(MEAN)) { stopifnot(!is.null(Y)) MEAN <- unname(colMeans(Y)) } } # rename S.inv <- ICOV S <- COV M <- MEAN # unconditional if (!conditional.x) { # unconditional - stochastic x if (!fixed.x) { Gamma.inv <- 0.5 * lav_matrix_duplication_pre_post(S.inv %x% S.inv) if (meanstructure) { Gamma.inv <- lav_matrix_bdiag(S.inv, Gamma.inv) } # unconditional - fixed x } else { # handle fixed.x = TRUE Gamma.inv <- 0.5 * lav_matrix_duplication_pre_post(S.inv %x% S.inv) # zero rows/cols corresponding with x/x combinations nvar <- NROW(ICOV) pstar <- nvar * (nvar + 1) / 2 M <- matrix(0, nvar, nvar) M[lav_matrix_vech_idx(nvar)] <- seq_len(pstar) zero.idx <- lav_matrix_vech(M[x.idx, x.idx, drop = FALSE]) Gamma.inv[zero.idx, ] <- 0 Gamma.inv[, zero.idx] <- 0 if (meanstructure) { S.inv.nox <- S.inv S.inv.nox[x.idx, ] <- 0 S.inv.nox[, x.idx] <- 0 Gamma.inv <- lav_matrix_bdiag(S.inv.nox, Gamma.inv) } } } else { # conditional.x # 4 possibilities: # - no meanstructure, no slopes # - meanstructure, no slopes # - no meanstructure, slopes # - meanstructure, slopes S11 <- S.inv[-x.idx, -x.idx, drop = FALSE] Gamma.inv <- 0.5 * lav_matrix_duplication_pre_post(S11 %x% S11) if (meanstructure || slopestructure) { C <- S[x.idx, x.idx, drop = FALSE] MY <- M[-x.idx] MX <- M[x.idx] C3 <- rbind( c(1, MX), cbind(MX, C + tcrossprod(MX)) ) } if (meanstructure) { if (slopestructure) { A11 <- C3 %x% S11 } else { c11 <- 1 / solve(C3)[1, 1, drop = FALSE] A11 <- c11 %x% S11 } } else { if (slopestructure) { A11 <- C %x% S11 } else { A11 <- matrix(0, 0, 0) } } if (meanstructure || slopestructure) { Gamma.inv <- lav_matrix_bdiag(A11, Gamma.inv) } } Gamma.inv } lavaan/R/xxx_fsr.R0000644000176200001440000004426314627656441013571 0ustar liggesusers# factor score regression # four methods: # - naive (regression or Bartlett) # - Skrondal & Laake (2001) (regression models only) # - Croon (2002) (general + robust SE) # - simple: always use Bartlett, replace var(f) by psi estimate # # TODO: # - Hishino & Bentler: this is simple + WLS # changes 09 dec 2018: add analytic SE ('standard') # make this the new default fsr <- function(model = NULL, data = NULL, cmd = "sem", fsr.method = "Croon", fs.method = "Bartlett", fs.scores = FALSE, mm.options = list(se = "standard", test = "standard"), Gamma.NT = TRUE, lvinfo = FALSE, mm.list = NULL, ..., output = "lavaan") { # we need full data if (is.null(data)) { lav_msg_stop(gettext("full data is required for factor score regression")) } # dot dot dot dotdotdot <- list(...) # ------------- handling of warn/debug/verbose switches ---------- if (!is.null(dotdotdot$debug)) { current.debug <- lav_debug() if (lav_debug(dotdotdot$debug)) on.exit(lav_debug(current.debug), TRUE) dotdotdot$debug <- NULL if (lav_debug()) { dotdotdot$warn <- TRUE # force warnings if debug dotdotdot$verbose <- TRUE # force verbose if debug } } if (!is.null(dotdotdot$warn)) { current.warn <- lav_warn() if (lav_warn(dotdotdot$warn)) on.exit(lav_warn(current.warn), TRUE) dotdotdot$warn <- NULL } if (!is.null(dotdotdot$verbose)) { current.verbose <- lav_verbose() if (lav_verbose(dotdotdot$verbose)) on.exit(lav_verbose(current.verbose), TRUE) dotdotdot$verbose <- NULL } # check fsr.method argument fsr.method <- tolower(fsr.method) if (fsr.method == "naive") { # nothing to do } else if (fsr.method %in% c( "skrondal", "laake", "skrondallaake", "skrondal.laake", "skrondal-laake" )) { fsr.method <- "skrondal.laake" } else if (fsr.method == "croon") { # nothing to do } else if (fsr.method == "simple") { # force fs.method to Bartlett! fs.method <- "Bartlett" } else { lav_msg_stop(gettext("invalid option for argument fsr.method:"), fsr.method) } # check fs.method argument fs.method <- tolower(fs.method) if (fs.method %in% c("bartlett", "barttlett", "bartlet")) { fs.method <- "Bartlett" } else if (fs.method == "regression") { # nothing to do } else { lav_msg_stop(gettext("invalid option for argument fs.method:"), fs.method ) } if (output %in% c("scores", "fs.scores", "fsr.scores")) { fs.scores <- TRUE } # change 'default' values for fsr if (is.null(dotdotdot$se)) { dotdotdot$se <- "standard" } if (is.null(dotdotdot$test)) { dotdotdot$test <- "standard" } if (is.null(dotdotdot$missing)) { dotdotdot$missing <- "ml" } if (is.null(dotdotdot$meanstructure)) { dotdotdot$meanstructure <- TRUE } # STEP 0: process full model, without fitting dotdotdot0 <- dotdotdot dotdotdot0$do.fit <- NULL dotdotdot0$se <- "none" # to avoid warning about missing="listwise" dotdotdot0$test <- "none" # to avoid warning about missing="listwise" # check for arguments that we do not want (eg sample.cov)? # TODO # initial processing of the model, no fitting FIT <- suppressWarnings(do.call(cmd, args = c(list( model = model, data = data, # meanstructure = TRUE, do.fit = FALSE ), dotdotdot0) )) lavoptions <- lavInspect(FIT, "options") # restore lavoptions$se <- dotdotdot$se lavoptions$test <- dotdotdot$test ngroups <- lavInspect(FIT, "ngroups") lavpta <- FIT@pta lavpartable <- lav_partable_set_cache(FIT@ParTable, lavpta) # FIXME: not ready for multiple groups yet if (ngroups > 1L) { lav_msg_stop(gettext("fsr code not ready for multiple groups (yet)")) } # if missing = "listwise", make data complete if (lavoptions$missing == "listwise") { # FIXME: make this work for multiple groups!! OV <- unique(unlist(lavpta$vnames$ov)) data <- na.omit(data[, OV]) } # any `regular' latent variables? lv.names <- unique(unlist(FIT@pta$vnames$lv.regular)) ov.names <- unique(unlist(FIT@pta$vnames$ov)) # check for higher-order factors good.idx <- logical(length(lv.names)) for (f in seq_len(length(lv.names))) { # check the indicators FAC <- lv.names[f] IND <- lavpartable$rhs[lavpartable$lhs == FAC & lavpartable$op == "=~"] if (all(IND %in% ov.names)) { good.idx[f] <- TRUE } # FIXME: check for mixed lv/ov indicators } lv.names <- lv.names[good.idx] if (length(lv.names) == 0L) { lav_msg_stop(gettext("model does not contain any (measured) latent variables")) } nfac <- length(lv.names) # check parameter table PT <- lav_partable_set_cache(parTable(FIT)) PT$est <- PT$se <- NULL # extract structural part PT.PA <- lav_partable_subset_structural_model(PT) # check if we can use skrondal & laake (no mediational terms?) if (fsr.method == "skrondal.laake") { # determine eqs.y and eqs.x names eqs.x.names <- unlist(FIT@pta$vnames$eqs.x) eqs.y.names <- unlist(FIT@pta$vnames$eqs.y) eqs.names <- unique(c(eqs.x.names, eqs.y.names)) if (any(eqs.x.names %in% eqs.y.names)) { lav_msg_stop( gettextf("mediational relationships are not allowed for the Skrondal.Laake method; use %s instead.", dQuote("Croon"))) } } # STEP 1a: compute factor scores for each measurement model (block) # how many measurement models? if (!is.null(mm.list)) { if (fsr.method != "simple") { lav_msg_stop(gettext("mm.list only available if fsr.method = \"simple\"")) } nblocks <- length(mm.list) # check each measurement block for (b in seq_len(nblocks)) { if (!all(mm.list[[b]] %in% lv.names)) { lav_msg_stop( gettextf("mm.list contains unknown latent variable(s): %s", lav_msg_view(mm.list[[b]][mm.list[[b]] %in% lv.names], log.sep = "none"))) } } } else { # TODO: here comes the automatic 'detection' of linked # measurement models # # for now we take a single latent variable per measurement model block mm.list <- as.list(lv.names) nblocks <- length(mm.list) } # compute factor scores, per latent variable FS.SCORES <- vector("list", length = ngroups) LVINFO <- vector("list", length = ngroups) if (ngroups > 1L) { names(FS.SCORES) <- names(LVINFO) <- lavInspect(FIT, "group.label") } for (g in 1:ngroups) { FS.SCORES[[g]] <- vector("list", length = nblocks) # names(FS.SCORES[[g]]) <- lv.names LVINFO[[g]] <- vector("list", length = nblocks) # names(LVINFO[[g]]) <- lv.names } # adjust options dotdotdot2 <- dotdotdot dotdotdot2$se <- "none" dotdotdot2$test <- "none" dotdotdot2$debug <- FALSE # only transmitted to lavaan call = ok dotdotdot2$verbose <- FALSE # only transmitted to lavaan call = ok dotdotdot2$auto.cov.lv.x <- TRUE # allow correlated exogenous factors # override with mm.options dotdotdot2 <- modifyList(dotdotdot2, mm.options) # we assume the same number/names of lv's per group!!! MM.FIT <- vector("list", nblocks) Sigma2.block <- vector("list", nblocks) for (b in 1:nblocks) { # create parameter table for this measurement block only PT.block <- lav_partable_subset_measurement_model( PT = PT, add.lv.cov = TRUE, lv.names = mm.list[[b]] ) # fit 1-factor model fit.block <- do.call("lavaan", args = c(list( model = PT.block, data = data ), dotdotdot2) ) # check convergence if (!lavInspect(fit.block, "converged")) { lav_msg_stop( gettextf("measurement model for %s did not converge.", lav_msg_view(mm.list[[b]])) ) } # store fitted measurement model MM.FIT[[b]] <- fit.block # fs.method? if (fsr.method == "skrondal.laake") { # dependent -> Bartlett if (lv.names[b] %in% eqs.y.names) { fs.method <- "Bartlett" } else { fs.method <- "regression" } } # compute factor scores SC <- lavPredict(fit.block, method = fs.method, fsm = TRUE) FSM <- attr(SC, "fsm") attr(SC, "fsm") <- NULL # warning, FSM may be a list per pattern! # if(fit.block@Options$missing == "ml") { # # do something... # ngroups <- fit.block@Data@ngroups # FSM.missing <- FSM # FSM <- vector("list", length = "ngroups") # for(g in seq_len(ngroups)) { # # } # } LAMBDA <- computeLAMBDA(fit.block@Model) # FIXME: remove dummy lv's? THETA <- computeTHETA(fit.block@Model) # FIXME: remove not used ov? PSI <- computeVETA(fit.block@Model) # if ngroups = 1, make list again if (ngroups == 1L) { # because lavPredict() drops the list SC <- list(SC) } # store results for (g in 1:ngroups) { FS.SCORES[[g]][[b]] <- SC[[g]] if (fsr.method %in% c("croon", "simple")) { offset <- FSM[[g]] %*% THETA[[g]] %*% t(FSM[[g]]) scale <- FSM[[g]] %*% LAMBDA[[g]] scale.inv <- solve(scale) scoffset <- scale.inv %*% offset %*% scale.inv LVINFO[[g]][[b]] <- list( lv.names = mm.list[[b]], fsm = FSM[[g]], lambda = LAMBDA[[g]], psi = PSI[[g]], theta = THETA[[g]], offset = offset, scale = scale, scale.inv = scale.inv, scoffset = scoffset ) } } # g # Delta.21: list per group Delta.21 <- lav_fsr_delta21(fit.block, FSM) # vcov Sigma1.block <- vcov(fit.block) tmp <- matrix(0, nrow(Delta.21[[1]]), nrow(Delta.21[[1]])) lavsamplestats <- fit.block@SampleStats for (g in 1:ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal tmp <- tmp + fg * (Delta.21[[g]] %*% Sigma1.block %*% t(Delta.21[[g]])) } Sigma2.block[[b]] <- tmp } # measurement block # Sigma.2 = Delta.21 %*% Sigma.1 %*% t(Delta.21) Sigma.2 <- lav_matrix_bdiag(Sigma2.block) # compute empirical covariance matrix factor scores + observed variables # in structural part group.values <- lav_partable_group_values(PT.PA) FS.COV <- vector("list", length = ngroups) FSR.COV <- vector("list", length = ngroups) FSR.COV2 <- vector("list", length = ngroups) Y <- vector("list", length = ngroups) if (lavoptions$meanstructure) { FS.MEAN <- vector("list", length = ngroups) } else { FS.MEAN <- NULL } for (g in seq_len(ngroups)) { # full data for structural model struc.names <- lavNames(PT.PA, "ov", group = group.values[g]) # reorder struc.names, so that order is the same as in MM (new in 0.6-9) lv.idx <- which(struc.names %in% lv.names) struc.names[lv.idx] <- lv.names struc.ov.idx <- which(!struc.names %in% lv.names) struc.lv.idx <- which(struc.names %in% lv.names) lv.order <- match(lv.names, struc.names) if (length(struc.ov.idx) > 0L) { ov.idx <- which(FIT@Data@ov.names[[g]] %in% struc.names[struc.ov.idx]) Y.g <- matrix(0, nrow = nrow(FS.SCORES[[g]][[1]]), ncol = length(struc.names) ) Y.g[, struc.lv.idx] <- do.call( "cbind", FS.SCORES[[g]] )[, lv.order, drop = FALSE] Y.g[, struc.ov.idx] <- FIT@Data@X[[g]][, ov.idx, drop = FALSE] } else { Y.g <- do.call("cbind", FS.SCORES[[g]])[, lv.order, drop = FALSE] } Y[[g]] <- Y.g # sample statistics for structural model COV <- cov(Y.g) # divided by N-1 if (lavoptions$likelihood == "normal") { Ng <- lavInspect(FIT, "nobs")[g] COV <- COV * (Ng - 1) / Ng } FS.COV[[g]] <- COV if (lavoptions$meanstructure) { FS.MEAN[[g]] <- colMeans(Y.g) } # STEP 1b: if using `Croon' method: correct COV matrix: if (fsr.method %in% c("croon")) { scoffset <- lav_matrix_bdiag(lapply(LVINFO[[g]], "[[", "scoffset")) scale.inv <- lav_matrix_bdiag(lapply(LVINFO[[g]], "[[", "scale.inv")) SCOFFSET <- matrix(0, nrow = length(struc.names), ncol = length(struc.names) ) SCOFFSET[struc.lv.idx, struc.lv.idx] <- scoffset SCALE.INV <- diag(length(struc.names)) SCALE.INV[struc.lv.idx, struc.lv.idx] <- scale.inv FSR.COV[[g]] <- SCALE.INV %*% FS.COV[[g]] %*% SCALE.INV - SCOFFSET } else if (fsr.method == "simple") { psi <- lav_matrix_bdiag(lapply(LVINFO[[g]], "[[", "psi")) FSR.COV[[g]] <- FS.COV[[g]] # scalar version only (for now) diag(FSR.COV[[g]])[struc.lv.idx] <- psi } else { FSR.COV[[g]] <- FS.COV[[g]] } # copy with different labels FSR.COV2[[g]] <- FSR.COV[[g]] # add row/col names rownames(FS.COV[[g]]) <- colnames(FS.COV[[g]]) <- struc.names rownames(FSR.COV[[g]]) <- colnames(FSR.COV[[g]]) <- struc.names rownames(FSR.COV2[[g]]) <- colnames(FSR.COV2[[g]]) <- struc.names rownames(FSR.COV2[[g]])[struc.lv.idx] <- colnames(FSR.COV2[[g]])[struc.lv.idx] <- paste(lv.names, ".si", sep = "") # check if FSR.COV is positive definite for all groups eigvals <- eigen(FSR.COV[[g]], symmetric = TRUE, only.values = TRUE)$values if (any(eigvals < .Machine$double.eps^(3 / 4))) { lav_msg_stop(gettext( "corrected covariance matrix of factor scores is not positive definite"), if (ngroups > 1L) gettextf("in group %s", g) else "" ) } } # g # STEP 1c: do we need full set of factor scores? if (fs.scores) { # transform? if (fsr.method %in% c("croon", "simple")) { for (g in 1:ngroups) { OLD.inv <- solve(FS.COV[[g]]) OLD.inv.sqrt <- lav_matrix_symmetric_sqrt(OLD.inv) FSR.COV.sqrt <- lav_matrix_symmetric_sqrt(FSR.COV[[g]]) SC <- as.matrix(Y[[g]]) SC <- SC %*% OLD.inv.sqrt %*% FSR.COV.sqrt SC <- as.data.frame(SC) names(SC) <- lv.names Y[[g]] <- SC } } # unlist if multiple groups, add group column if (ngroups == 1L) { Y <- as.data.frame(Y[[1]]) } else { lav_msg_fixme("fix this!") } } # STEP 2: fit structural model using (corrected?) factor scores # free all means/intercepts (of observed variables only) lv.names.pa <- lavNames(PT.PA, "lv") int.idx <- which(PT.PA$op == "~1" & !PT.PA$lhs %in% lv.names.pa) PT.PA$free[int.idx] <- 1L PT.PA$free[PT.PA$free > 0L] <- seq_len(sum(PT.PA$free > 0L)) PT.PA$ustart[int.idx] <- NA # adjust lavoptions if (is.null(dotdotdot$do.fit)) { lavoptions$do.fit <- TRUE } else { lavoptions$do.fit <- dotdotdot$do.fit } if (is.null(dotdotdot$se)) { lavoptions$se <- "standard" } else { lavoptions$se <- dotdotdot$se } if (is.null(dotdotdot$test)) { lavoptions$test <- "standard" } else { lavoptions$test <- dotdotdot$test } if (is.null(dotdotdot$sample.cov.rescale)) { lavoptions$sample.cov.rescale <- FALSE } else { lavoptions$sample.cov.rescale <- dotdotdot$sample.cov.rescale } # fit structural model -- point estimation ONLY lavoptions2 <- lavoptions # if(lavoptions$se == "standard") { # lavoptions2$se <- "external" # } # lavoptions2$test <- "none" lavoptions2$se <- "none" lavoptions2$test <- "none" lavoptions2$missing <- "listwise" # always complete data anyway... fit <- lavaan(PT.PA, sample.cov = FSR.COV, sample.mean = FS.MEAN, sample.nobs = FIT@SampleStats@nobs, slotOptions = lavoptions2 ) # only to correct the SE, we create another model, augmented with # the croon parameters PT.PA2 <- parTable(fit) PT.si <- lav_fsr_pa2si(PT.PA2, LVINFO = LVINFO) idx1 <- PT.si$free[PT.si$user == 10L & PT.si$free > 0L] idx2 <- PT.si$free[PT.si$user != 10L & PT.si$free > 0L] lavoptions3 <- lavoptions2 lavoptions3$optim.method <- "none" lavoptions3$test <- "standard" lavoptions3$se <- "none" lavoptions3$check.gradient <- FALSE lavoptions3$information <- "expected" ## FIXME: lav_model_gradient + delta fit.si2 <- lavaan(PT.si, sample.cov = FSR.COV2, sample.mean = FS.MEAN, sample.nobs = FIT@SampleStats@nobs, slotOptions = lavoptions3 ) Info.all <- lavTech(fit.si2, "information") * nobs(fit) I33 <- Info.all[idx2, idx2] I32 <- Info.all[idx2, idx1] I23 <- Info.all[idx1, idx2] I22 <- Info.all[idx1, idx1] I33.inv <- lav_matrix_symmetric_inverse(I33) V1 <- I33.inv V2 <- I33.inv %*% I32 %*% Sigma.2 %*% t(I32) %*% I33.inv VCOV <- V1 + V2 # fill in standard errors step 2 PT.PA2$se[PT.PA2$free > 0L] <- sqrt(diag(VCOV)) if (output == "lavaan" || output == "fsr") { lavoptions3$se <- "twostep" fit <- lavaan::lavaan(PT.PA2, sample.cov = FSR.COV, sample.mean = FS.MEAN, sample.nobs = FIT@SampleStats@nobs, slotOptions = lavoptions3 ) fit@vcov$vcov <- VCOV } # extra info extra <- list( FS.COV = FS.COV, FS.SCORES = Y, FSR.COV = FSR.COV, LVINFO = LVINFO, Sigma.2 = Sigma.2 ) # standard errors # lavsamplestats <- fit@SampleStats # lavsamplestats@NACOV <- Omega.f # VCOV <- lav_model_vcov(fit@Model, lavsamplestats = lavsamplestats, # lavoptions = lavoptions) # SE <- lav_model_vcov_se(fit@Model, fit@ParTable, VCOV = VCOV) # PE$se <- SE # tmp.se <- ifelse(PE$se == 0.0, NA, PE$se) # zstat <- pvalue <- TRUE # if(zstat) { # PE$z <- PE$est / tmp.se # if(pvalue) { # PE$pvalue <- 2 * (1 - pnorm( abs(PE$z) )) # } # } if (output == "fsr") { HEADER <- paste("This is fsr (0.2) -- factor score regression using ", "fsr.method = ", fsr.method, sep = "" ) out <- list(header = HEADER, MM.FIT = MM.FIT, STRUC.FIT = fit) if (lvinfo) { out$lvinfo <- extra } class(out) <- c("lavaan.fsr", "list") } else if (output %in% c("lavaan", "fit")) { out <- fit } else if (output == "extra") { out <- extra } else if (output == "lvinfo") { out <- LVINFO } else if (output %in% c("scores", "f.scores", "fs.scores")) { out <- Y } else if (output %in% c( "FSR.COV", "fsr.cov", "croon", "cov.croon", "croon.cov", "COV", "cov" )) { out <- FSR.COV } else if (output %in% c("FS.COV", "fs.cov")) { out <- FS.COV } else { lav_msg_stop(gettext("unknown output= argument:"), output) } out } lavaan/R/lav_matrix_rotate_utils.R0000644000176200001440000000737314627656440017034 0ustar liggesusers# collection of functions that deal with rotation matrices # YR 3 April 2019 -- initial version # YR 6 Jan 2023: add promax # YR 30 Jan 2024: orthogonal rotation matrix now reaches the full space # generate random orthogonal rotation matrix # # reference for the orthogonal case: # # Stewart, G. W. (1980). The Efficient Generation of Random Orthogonal Matrices # with an Application to Condition Estimators. SIAM Journal on Numerical # Analysis, 17(3), 403-409. http://www.jstor.org/stable/2156882 # lav_matrix_rotate_gen <- function(M = 10L, orthogonal = TRUE) { # catch M=1 if (M == 1L) { return(matrix(1, 1, 1)) } if (orthogonal) { # create random normal matrix tmp <- matrix(rnorm(M * M), nrow = M, ncol = M) # use QR decomposition qr.out <- qr(tmp) Q <- qr.Q(qr.out) R <- qr.R(qr.out) # ... "normalized so that the diagonal elements of R are positive" sign.diag.r <- sign(diag(R)) out <- Q * rep(sign.diag.r, each = M) } else { # just normalize *columns* of tmp -> crossprod(out) has 1 on diagonal out <- t(t(tmp) / sqrt(diag(crossprod(tmp)))) } out } # check if ROT is an orthogonal matrix if orthogonal = TRUE, or normal if # orthogonal = FALSE lav_matrix_rotate_check <- function(ROT = NULL, orthogonal = TRUE, tolerance = sqrt(.Machine$double.eps)) { # we assume ROT is a matrix M <- nrow(ROT) # crossprod RR <- crossprod(ROT) # target if (orthogonal) { # ROT^T %*% ROT = I target <- diag(M) } else { # diagonal should be 1 target <- RR diag(target) <- 1 } # compare for near-equality res <- all.equal(target = target, current = RR, tolerance = tolerance) # return TRUE or FALSE if (is.logical(res) && res) { out <- TRUE } else { out <- FALSE } out } # get weights vector needed to weight the rows using Kaiser normalization lav_matrix_rotate_kaiser_weights <- function(A = NULL) { normalize <- 1 / sqrt(rowSums(A * A)) idxZero <- which(normalize == 0) # catch rows with all zero (thanks to Coen Bernaards for suggesting this) normalize[idxZero] <- normalize[idxZero] + .Machine$double.eps normalize } # get weights vector needed to weight the rows using Cureton & Mulaik (1975) # standardization # see also Browne (2001) page 128-129 # # Note: the 'final' weights are mutliplied by the Kaiser weights (see CEFA) # lav_matrix_rotate_cm_weights <- function(A = NULL) { P <- nrow(A) M <- ncol(A) # first principal component of AA' A.eigen <- eigen(tcrossprod(A), symmetric = TRUE) a <- A.eigen$vectors[, 1] * sqrt(A.eigen$values[1]) Kaiser.weights <- 1 / sqrt(rowSums(A * A)) a.star <- abs(a * Kaiser.weights) # always between 0 and 1 m.sqrt.inv <- 1 / sqrt(M) acos.m.sqrt.inv <- acos(m.sqrt.inv) delta <- numeric(P) delta[a.star < m.sqrt.inv] <- pi / 2 tmp <- (acos.m.sqrt.inv - acos(a.star)) / (acos.m.sqrt.inv - delta) * (pi / 2) # add constant (see Cureton & Mulaik, 1975, page 187) cm <- cos(tmp) * cos(tmp) + 0.001 # final weights = weighted by Kaiser weights cm * Kaiser.weights } # taken from the stats package, but skipping varimax (already done): lav_matrix_rotate_promax <- function(x, m = 4, varimax.ROT = NULL) { # this is based on promax() from factanal.R in /src/library/stats/R # 1. create 'ideal' pattern matrix Q <- x * abs(x)^(m - 1) # 2. regress x on Q to obtain 'rotation matrix' (same as 'procrustes') U <- lm.fit(x, Q)$coefficients # 3. rescale so that solve(crossprod(U)) has 1 on the diagonal d <- diag(solve(t(U) %*% U)) U <- U %*% diag(sqrt(d)) dimnames(U) <- NULL # 4. create rotated factor matrix z <- x %*% U # 5. update rotation amtrix U <- varimax.ROT %*% U # here we plugin the rotation matrix from varimax list(loadings = z, rotmat = U) } lavaan/R/lav_standardize.R0000644000176200001440000006616014627656441015242 0ustar liggesuserslav_standardize_lv_x <- function(x, lavobject, partable = NULL, cov.std = TRUE, lv.var = NULL, rotation = FALSE) { # set new values for x lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) if (rotation) { x.unrotated <- x lavmodel@GLIST <- lavTech(lavobject, "est.unrotated") # unrotated! est.rot <- lav_model_efa_rotate_x( x = x.unrotated, lavmodel = lavmodel, # unrotated! lavoptions = lavobject@Options, init.rot = lavmodel@H, type = "user", extra = TRUE ) GLIST <- attr(est.rot, "extra")$GLIST attributes(est.rot) <- NULL est <- est.rot } else { GLIST <- lavmodel@GLIST # if this changes, tag @TDJorgensen in commit message est <- lav_model_get_parameters(lavmodel, type = "user") } x.stand.user <- lav_standardize_lv( lavobject = lavobject, partable = partable, est = est, GLIST = GLIST, cov.std = cov.std, lv.var = lv.var ) x.stand.user } lav_standardize_all_x <- function(x, lavobject, partable = NULL, cov.std = TRUE, rotation = FALSE) { lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) if (rotation) { x.unrotated <- x lavmodel@GLIST <- lavTech(lavobject, "est.unrotated") # unrotated! est.rot <- lav_model_efa_rotate_x( x = x.unrotated, lavmodel = lavmodel, # unrotated! lavoptions = lavobject@Options, init.rot = lavmodel@H, type = "user", extra = TRUE ) GLIST <- attr(est.rot, "extra")$GLIST attributes(est.rot) <- NULL est <- est.rot } else { GLIST <- lavmodel@GLIST # if this changes, tag @TDJorgensen in commit message est <- lav_model_get_parameters(lavmodel, type = "user") } x.stand.user <- lav_standardize_all( lavobject = lavobject, partable = partable, est = est, est.std = NULL, GLIST = GLIST, cov.std = cov.std ) x.stand.user } lav_standardize_all_nox_x <- function(x, lavobject, partable = NULL, cov.std = TRUE, rotation = FALSE) { lavmodel <- lav_model_set_parameters(lavmodel = lavobject@Model, x = x) if (rotation) { x.unrotated <- x lavmodel@GLIST <- lavTech(lavobject, "est.unrotated") # unrotated! est.rot <- lav_model_efa_rotate_x( x = x.unrotated, lavmodel = lavmodel, # unrotated! lavoptions = lavobject@Options, init.rot = lavmodel@H, type = "user", extra = TRUE ) GLIST <- attr(est.rot, "extra")$GLIST attributes(est.rot) <- NULL est <- est.rot } else { GLIST <- lavmodel@GLIST # if this changes, tag @TDJorgensen in commit message est <- lav_model_get_parameters(lavmodel, type = "user") } x.stand.user <- lav_standardize_all_nox( lavobject = lavobject, partable = partable, est = est, est.std = NULL, GLIST = GLIST, cov.std = cov.std ) x.stand.user } lav_unstandardize_ov_x <- function(x, lavobject) { partable <- lavobject@ParTable partable$ustart <- x lav_unstandardize_ov( partable = partable, ov.var = lavobject@SampleStats@var, cov.std = TRUE ) } lav_standardize_lv <- function(lavobject = NULL, partable = NULL, est = NULL, GLIST = NULL, cov.std = TRUE, lv.var = NULL, lavmodel = NULL, lavpartable = NULL) { if (is.null(lavobject)) { stopifnot(!is.null(lavmodel)) stopifnot(!is.null(lavpartable)) if (is.null(est)) { if (!is.null(lavpartable$est)) { est <- lavpartable$est # if this changes, tag @TDJorgensen in commit message } else { lav_msg_stop(gettext("could not find `est' in lavpartable")) } } } else { lavmodel <- lavobject@Model lavpartable <- lavobject@ParTable if (is.null(est)) { est <- lav_object_inspect_est(lavobject) } } if (is.null(partable)) { partable <- lavpartable } if (is.null(GLIST)) { GLIST <- lavmodel@GLIST } out <- est N <- length(est) stopifnot(N == length(partable$lhs)) nmat <- lavmodel@nmat # compute ETA if (is.null(lv.var)) { LV.ETA <- computeVETA( lavmodel = lavmodel, GLIST = GLIST ) } for (g in 1:lavmodel@nblocks) { ov.names <- vnames(lavpartable, "ov", block = g) # not user, # which may be incomplete lv.names <- vnames(lavpartable, "lv", block = g) # shortcut: no latents in this block, nothing to do if (length(lv.names) == 0L) { next } # which mm belong to block g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (is.null(lv.var)) { ETA2 <- diag(LV.ETA[[g]]) } else { ETA2 <- lv.var[[g]] } # change negative values to NA ETA2[ETA2 < 0] <- as.numeric(NA) ETA <- sqrt(ETA2) # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] * ETA[match(partable$lhs[idx], lv.names)] # 1b. "=~" regular higher-order lv indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% ov.names) & partable$block == g) out[idx] <- (out[idx] * ETA[match(partable$lhs[idx], lv.names)] / ETA[match(partable$rhs[idx], lv.names)]) # 1c. "=~" indicators that are both in ov and lv # idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% lv.names & partable$block == g) out[idx] <- out[idx] / ETA[match(partable$lhs[idx], lv.names)] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% lv.names & partable$block == g) out[idx] <- out[idx] * ETA[match(partable$rhs[idx], lv.names)] # 3a. "~~" ov # idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & # partable$block == g) # 3b. "~~" lv # ATTENTION: in Mplus 4.1, the off-diagonal residual covariances # were computed by the formula cov(i,j) / sqrt(i.var*j.var) # were i.var and j.var where diagonal elements of ETA # # in Mplus 6.1 (but also AMOS and EQS), the i.var and j.var # elements are the 'PSI' diagonal elements!! # variances rv.idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & partable$lhs == partable$rhs & partable$block == g) out[rv.idx] <- (out[rv.idx] / ETA[match(partable$lhs[rv.idx], lv.names)] / ETA[match(partable$rhs[rv.idx], lv.names)]) # covariances lv # three types: # - only lhs is LV (and fixed.x = FALSE) # - only rhs is LV (and fixed.x = FALSE) # - both lhs and rhs are LV (regular case) if (cov.std) { if (!is.complex(est[rv.idx])) { RV <- sqrt(abs(est[rv.idx])) # abs in case of heywood cases } else { RV <- sqrt(est[rv.idx]) } rv.names <- partable$lhs[rv.idx] } # left idx.lhs <- which(partable$op == "~~" & partable$lhs %in% lv.names & partable$lhs != partable$rhs & partable$block == g) if (length(idx.lhs) > 0L) { if (cov.std == FALSE) { out[idx.lhs] <- (out[idx.lhs] / ETA[match(partable$lhs[idx.lhs], lv.names)]) } else { out[idx.lhs] <- (out[idx.lhs] / RV[match(partable$lhs[idx.lhs], rv.names)]) } } # right idx.rhs <- which(partable$op == "~~" & partable$rhs %in% lv.names & partable$lhs != partable$rhs & partable$block == g) if (length(idx.rhs) > 0L) { if (cov.std == FALSE) { out[idx.rhs] <- (out[idx.rhs] / ETA[match(partable$rhs[idx.rhs], lv.names)]) } else { out[idx.rhs] <- (out[idx.rhs] / RV[match(partable$rhs[idx.rhs], rv.names)]) } } # 4a. "~1" ov # idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & # partable$block == g) # 4b. "~1" lv idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & partable$block == g) out[idx] <- out[idx] / ETA[match(partable$lhs[idx], lv.names)] } # 5a ":=" idx <- which(partable$op == ":=") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@def.function(x) } # 5b "==" idx <- which(partable$op == "==") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@ceq.function(x) } # 5c. "<" or ">" idx <- which((partable$op == "<" | partable$op == ">")) if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@cin.function(x) } out } lav_standardize_all <- function(lavobject = NULL, partable = NULL, est = NULL, est.std = NULL, GLIST = NULL, cov.std = TRUE, ov.var = NULL, lv.var = NULL, lavmodel = NULL, lavpartable = NULL, cov.x = NULL) { if (is.null(lavobject)) { stopifnot(!is.null(lavmodel)) stopifnot(!is.null(lavpartable)) if (is.null(est)) { if (!is.null(lavpartable$est)) { est <- lavpartable$est # if this changes, tag @TDJorgensen in commit message } else { lav_msg_stop(gettext("could not find `est' in lavpartable")) } } } else { lavmodel <- lavobject@Model lavpartable <- lavobject@ParTable if (is.null(est)) { est <- lav_object_inspect_est(lavobject) } if (lavmodel@conditional.x) { if (is.null(cov.x)) { # try SampleStats slot # if("SampleStats" %in% slotNames(lavobject)) { # cov.x <- lavobject@SampleStats@cov.x if (!is.null(lavobject@implied$cov.x[[1]])) { cov.x <- lavobject@implied$cov.x # if this changes, tag @TDJorgensen in commit message } else { # perhaps lavaanList object # extract it from GLIST per block cov.x <- vector("list", length = lavmodel@nblocks) for (b in seq_len(lavmodel@nblocks)) { # which mm belong to block b? mm.in.block <- (seq_len(lavmodel@nmat[b]) + cumsum(c(0, lavmodel@nmat))[b]) MLIST <- lavmodel@GLIST[mm.in.block] cov.x[[b]] <- MLIST[["cov.x"]] } } } } } if (is.null(partable)) { partable <- lavpartable } if (is.null(GLIST)) { GLIST <- lavmodel@GLIST } if (is.null(est.std)) { est.std <- lav_standardize_lv( lavobject = lavobject, partable = partable, est = est, GLIST = GLIST, cov.std = cov.std, lv.var = lv.var, lavmodel = lavmodel, lavpartable = lavpartable ) } out <- est.std N <- length(est.std) stopifnot(N == length(partable$lhs)) VY <- computeVY( lavmodel = lavmodel, GLIST = GLIST, diagonal.only = TRUE ) for (g in 1:lavmodel@nblocks) { ov.names <- vnames(lavpartable, "ov", block = g) # not user lv.names <- vnames(lavpartable, "lv", block = g) if (is.null(ov.var)) { OV2 <- VY[[g]] # replace zero values by NA (but keep negative values) zero.idx <- which(abs(OV2) < .Machine$double.eps) if (length(zero.idx) > 0L) { OV2[zero.idx] <- as.numeric(NA) } # replace negative values by NA (for sqrt) tmp.OV2 <- OV2 neg.idx <- which(tmp.OV2 < 0) if (length(neg.idx) > 0L) { tmp.OV2[neg.idx] <- as.numeric(NA) } OV <- sqrt(tmp.OV2) } else { OV2 <- ov.var[[g]] OV <- sqrt(OV2) } if (lavmodel@conditional.x) { # extend OV with ov.names.x ov.names.x <- vnames(lavpartable, "ov.x", block = g) ov.names.nox <- vnames(lavpartable, "ov.nox", block = g) ov.names <- c(ov.names.nox, ov.names.x) OV2 <- c(OV2, diag(cov.x[[g]])) OV <- c(OV, sqrt(diag(cov.x[[g]]))) } # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$rhs[idx], ov.names)] # 1b. "=~" regular higher-order lv indicators # 1c. "=~" indicators that are both in ov and lv # idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names & partable$block == g) out[idx] <- out[idx] * OV[match(partable$rhs[idx], ov.names)] # 3a. "~~" ov # ATTENTION: in Mplus 4.1, the off-diagonal residual covariances # were computed by the formula cov(i,j) / sqrt(i.var*j.var) # were i.var and j.var where diagonal elements of OV # # in Mplus 6.1 (but also AMOS and EQS), the i.var and j.var # elements are the 'THETA' diagonal elements!! # variances rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs == partable$rhs & partable$block == g) # out[rv.idx] <- ( out[rv.idx] / OV[ match(partable$lhs[rv.idx], ov.names) ] # / OV[ match(partable$rhs[rv.idx], ov.names) ] ) out[rv.idx] <- (out[rv.idx] / OV2[match(partable$lhs[rv.idx], ov.names)]) # covariances ov # three types: # - only lhs is OV (and fixed.x = FALSE) # - only rhs is OV (and fixed.x = FALSE) # - both lhs and rhs are OV (regular case) if (cov.std) { if (!is.complex(est[rv.idx])) { RV <- sqrt(abs(est[rv.idx])) } else { RV <- sqrt(est[rv.idx]) } rv.names <- partable$lhs[rv.idx] } # left idx.lhs <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs != partable$rhs & partable$block == g) if (length(idx.lhs) > 0L) { if (cov.std == FALSE) { out[idx.lhs] <- (out[idx.lhs] / OV[match(partable$lhs[idx.lhs], ov.names)]) } else { out[idx.lhs] <- (out[idx.lhs] / RV[match(partable$lhs[idx.lhs], rv.names)]) } } # right idx.rhs <- which(partable$op == "~~" & !(partable$rhs %in% lv.names) & partable$lhs != partable$rhs & partable$block == g) if (length(idx.rhs) > 0L) { if (cov.std == FALSE) { out[idx.rhs] <- (out[idx.rhs] / OV[match(partable$rhs[idx.rhs], ov.names)]) } else { out[idx.rhs] <- (out[idx.rhs] / RV[match(partable$rhs[idx.rhs], rv.names)]) } } # 3b. "~~" lv # idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] # 4b. "~1" lv # idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & # partable$block == g) # 4c. "|" thresholds idx <- which(partable$op == "|" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] # 4d. "~*~" scales idx <- which(partable$op == "~*~" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- 1.0 } # 5a ":=" idx <- which(partable$op == ":=") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@def.function(x) } # 5b "==" idx <- which(partable$op == "==") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@ceq.function(x) } # 5c. "<" or ">" idx <- which((partable$op == "<" | partable$op == ">")) if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@cin.function(x) } out } lav_standardize_all_nox <- function(lavobject = NULL, partable = NULL, est = NULL, est.std = NULL, GLIST = NULL, cov.std = TRUE, ov.var = NULL, lv.var = NULL, lavmodel = NULL, lavpartable = NULL, cov.x = NULL) { if (is.null(lavobject)) { stopifnot(!is.null(lavmodel)) stopifnot(!is.null(lavpartable)) if (is.null(est)) { if (!is.null(lavpartable$est)) { est <- lavpartable$est # if this changes, tag @TDJorgensen in commit message } else { lav_msg_stop(gettext("could not find `est' in lavpartable")) } } } else { lavmodel <- lavobject@Model lavpartable <- lavobject@ParTable if (is.null(est)) { est <- lav_object_inspect_est(lavobject) } if (lavmodel@conditional.x) { if (is.null(cov.x)) { # try SampleStats slot # if("SampleStats" %in% slotNames(lavobject)) { # cov.x <- lavobject@SampleStats@cov.x if (!is.null(lavobject@implied$cov.x[[1]])) { cov.x <- lavobject@implied$cov.x # if this changes, tag @TDJorgensen in commit message } else { # perhaps lavaanList object # extract it from GLIST per block cov.x <- vector("list", length = lavmodel@nblocks) for (b in seq_len(lavmodel@nblocks)) { # which mm belong to block b? mm.in.block <- (seq_len(lavmodel@nmat[b]) + cumsum(c(0, lavmodel@nmat))[b]) MLIST <- lavmodel@GLIST[mm.in.block] cov.x[[b]] <- MLIST[["cov.x"]] } } } } } if (is.null(partable)) { partable <- lavpartable } if (is.null(GLIST)) { GLIST <- lavmodel@GLIST } if (is.null(est.std)) { est.std <- lav_standardize_lv( lavobject = lavobject, partable = partable, est = est, GLIST = GLIST, cov.std = cov.std, lv.var = lv.var, lavmodel = lavmodel, lavpartable = lavpartable ) } out <- est.std N <- length(est.std) stopifnot(N == length(partable$lhs)) VY <- computeVY( lavmodel = lavmodel, GLIST = GLIST, diagonal.only = TRUE ) for (g in 1:lavmodel@nblocks) { ov.names <- vnames(lavpartable, "ov", block = g) ov.names.x <- vnames(lavpartable, "ov.x", block = g) ov.names.nox <- vnames(lavpartable, "ov.nox", block = g) lv.names <- vnames(lavpartable, "lv", block = g) if (is.null(ov.var)) { OV2 <- VY[[g]] # replace zero values by NA (but keep negative values) zero.idx <- which(abs(OV2) < .Machine$double.eps) if (length(zero.idx) > 0L) { OV2[zero.idx] <- as.numeric(NA) } # replace negative values by NA (for sqrt) tmp.OV2 <- OV2 neg.idx <- which(tmp.OV2 < 0) if (length(neg.idx) > 0L) { tmp.OV2[neg.idx] <- as.numeric(NA) } OV <- sqrt(tmp.OV2) } else { OV2 <- ov.var[[g]] OV <- sqrt(OV2) } if (lavmodel@conditional.x) { # extend OV with ov.names.x ov.names.x <- vnames(lavpartable, "ov.x", block = g) ov.names <- c(ov.names.nox, ov.names.x) OV2 <- c(OV2, diag(cov.x[[g]])) OV <- c(OV, sqrt(diag(cov.x[[g]]))) } # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$rhs[idx], ov.names)] # 1b. "=~" regular higher-order lv indicators # 1c. "=~" indicators that are both in ov and lv # idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names.nox & partable$block == g) out[idx] <- out[idx] * OV[match(partable$rhs[idx], ov.names.nox)] # 3a. "~~" ov # ATTENTION: in Mplus 4.1, the off-diagonal residual covariances # were computed by the formula cov(i,j) / sqrt(i.var*j.var) # were i.var and j.var where diagonal elements of OV # # in Mplus 6.1 (but also AMOS and EQS), the i.var and j.var # elements are the 'THETA' diagonal elements!! # variances rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & partable$lhs == partable$rhs & partable$block == g) # out[rv.idx] <- ( out[rv.idx] / OV[ match(partable$lhs[rv.idx], ov.names) ] # / OV[ match(partable$rhs[rv.idx], ov.names) ] ) out[rv.idx] <- (out[rv.idx] / OV2[match(partable$lhs[rv.idx], ov.names)]) # covariances ov # three types: # - only lhs is OV (and fixed.x = FALSE) # - only rhs is OV (and fixed.x = FALSE) # - both lhs and rhs are OV (regular case) if (cov.std) { if (!is.complex(est[rv.idx])) { RV <- sqrt(abs(est[rv.idx])) } else { RV <- sqrt(est[rv.idx]) } rv.names <- partable$lhs[rv.idx] } # left idx.lhs <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & partable$lhs != partable$rhs & partable$block == g) if (length(idx.lhs) > 0L) { if (cov.std == FALSE) { out[idx.lhs] <- (out[idx.lhs] / OV[match(partable$lhs[idx.lhs], ov.names)]) } else { out[idx.lhs] <- (out[idx.lhs] / RV[match(partable$lhs[idx.lhs], rv.names)]) } } # right idx.rhs <- which(partable$op == "~~" & !(partable$rhs %in% lv.names) & !(partable$rhs %in% ov.names.x) & partable$lhs != partable$rhs & partable$block == g) if (length(idx.rhs) > 0L) { if (cov.std == FALSE) { out[idx.rhs] <- (out[idx.rhs] / OV[match(partable$rhs[idx.rhs], ov.names)]) } else { out[idx.rhs] <- (out[idx.rhs] / RV[match(partable$rhs[idx.rhs], rv.names)]) } } # 3b. "~~" lv # idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & !(partable$lhs %in% ov.names.x) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] # 4b. "~1" lv # idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & # partable$block == g) # 4c. "|" thresholds idx <- which(partable$op == "|" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] / OV[match(partable$lhs[idx], ov.names)] # 4d. "~*~" scales idx <- which(partable$op == "~*~" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- 1.0 } # 5a ":=" idx <- which(partable$op == ":=") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@def.function(x) } # 5b "==" idx <- which(partable$op == "==") if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@ceq.function(x) } # 5c. "<" or ">" idx <- which((partable$op == "<" | partable$op == ">")) if (length(idx) > 0L) { x <- out[partable$free & !duplicated(partable$free)] out[idx] <- lavmodel@cin.function(x) } out } lav_unstandardize_ov <- function(partable, ov.var = NULL, cov.std = TRUE) { # check if ustart is missing; if so, look for est if (is.null(partable$ustart)) { partable$ustart <- partable$est } # check if block is missing if (is.null(partable$block)) { partable$block <- rep(1L, length(partable$ustart)) } stopifnot(!any(is.na(partable$ustart))) est <- out <- partable$ustart N <- length(est) # nblocks nblocks <- lav_partable_nblocks(partable) # if ov.var is NOT a list, make a list if (!is.list(ov.var)) { tmp <- ov.var ov.var <- vector("list", length = nblocks) ov.var[1:nblocks] <- list(tmp) } for (g in 1:nblocks) { ov.names <- vnames(partable, "ov", block = g) # not user lv.names <- vnames(partable, "lv", block = g) OV <- sqrt(ov.var[[g]]) # 1a. "=~" regular indicators idx <- which(partable$op == "=~" & !(partable$rhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] * OV[match(partable$rhs[idx], ov.names)] # 1b. "=~" regular higher-order lv indicators # 1c. "=~" indicators that are both in ov and lv # idx <- which(partable$op == "=~" & partable$rhs %in% ov.names # & partable$rhs %in% lv.names & # partable$block == g) # 2. "~" regressions (and "<~") idx <- which((partable$op == "~" | partable$op == "<~") & partable$lhs %in% ov.names & partable$block == g) out[idx] <- out[idx] * OV[match(partable$lhs[idx], ov.names)] idx <- which((partable$op == "~" | partable$op == "<~") & partable$rhs %in% ov.names & partable$block == g) out[idx] <- out[idx] / OV[match(partable$rhs[idx], ov.names)] # 3a. "~~" ov # ATTENTION: in Mplus 4.1, the off-diagonal residual covariances # were computed by the formula cov(i,j) / sqrt(i.var*j.var) # were i.var and j.var where diagonal elements of OV # # in Mplus 6.1 (but also AMOS and EQS), the i.var and j.var # elements are the 'THETA' diagonal elements!! # variances rv.idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs == partable$rhs & partable$block == g) out[rv.idx] <- (out[rv.idx] * OV[match(partable$lhs[rv.idx], ov.names)] * OV[match(partable$rhs[rv.idx], ov.names)]) # covariances idx <- which(partable$op == "~~" & !(partable$lhs %in% lv.names) & partable$lhs != partable$rhs & partable$block == g) if (length(idx) > 0L) { if (cov.std == FALSE) { out[idx] <- (out[idx] * OV[match(partable$lhs[idx], ov.names)] * OV[match(partable$rhs[idx], ov.names)]) } else { if (!is.complex(out[rv.idx])) { RV <- sqrt(abs(out[rv.idx])) } else { RV <- sqrt(out[rv.idx]) } rv.names <- partable$lhs[rv.idx] out[idx] <- (out[idx] * RV[match(partable$lhs[idx], rv.names)] * RV[match(partable$rhs[idx], rv.names)]) } } # 3b. "~~" lv # idx <- which(partable$op == "~~" & partable$rhs %in% lv.names & # partable$block == g) # 4a. "~1" ov idx <- which(partable$op == "~1" & !(partable$lhs %in% lv.names) & partable$block == g) out[idx] <- out[idx] * OV[match(partable$lhs[idx], ov.names)] # 4b. "~1" lv # idx <- which(partable$op == "~1" & partable$lhs %in% lv.names & # partable$block == g) } # 5a ":=" # 5b "==" # 5c. "<" or ">" out } lavaan/R/lav_partable_vnames.R0000644000176200001440000012663514627656441016101 0ustar liggesusers# lav_partable_names # # YR. 29 june 2013 # - as separate file; used to be in utils-user.R # - lav_partable_names (aka 'vnames') allows multiple options in 'type' # returning them all as a list (or just a vector if only 1 type is needed) # public version lavNames <- function(object, type = "ov", ...) { # nolint if (inherits(object, "lavaan") || inherits(object, "lavaanList")) { partable <- object@ParTable } else if (inherits(object, "list") || inherits(object, "data.frame")) { partable <- object } else if (inherits(object, "character")) { # just a model string? partable <- lavParseModelString(object) } lav_partable_vnames(partable, type = type, ...) } # alias for backwards compatibility lavaanNames <- lavNames # nolint # return variable names in a partable # - the 'type' argument determines the status of the variable: observed, # latent, endo/exo/...; default = "ov", but most used is type = "all" # LDW 30/1/24: there is no default and most used is a single type: "all" is # rarely used in other code # - the 'group' argument either selects a single group (if group is an integer) # or returns a list per group # - the 'level' argument either selects a single level (if level is an integer) # or returns a list per level # LDW 30/1/24: 'level' argument not explicitly tested !? # - the 'block' argument either selects a single block (if block is an integer) # or returns a list per block # LDW 30/1/24: 'block' argument not explicitly tested !? # LDW 29/2/24: ov.order = "data" via attribute "ovda" lav_partable_vnames <- function(partable, type = NULL, ..., # nolint force.warn = FALSE, ov.x.fatal = FALSE) { # This function derives the names of some types of variable (as specified # in type) from a 'partable'. The 'warn' parameter needs no explanation. # The ov.x.fatal parameter implies, when set to TRUE, that the function # issues a 'stop' when there are exogenious variables present in variance/ # covariance or intercept formulas. # The call of this function can also contain extra parameters (...) which # have to be the name(s) of blockvariable(s) to be used to select names. # If more than 1 blockvariable given, all must be satisfied to select! # The 'partable' must be a list with minimum members lhs, op, rhs. # Other members of 'partable' used (if present): block, ustart, free, exo, # user, efa, rv, 'blockname' # If the 'partable' contains an attribute vnames and the type is not "*", # the 'type' elements of this attribute are used. # ----- lav_partable_vnames ---- common ---------------------------------- # sanity check stopifnot(is.list(partable), !missing(type)) # this is a special fuunction where the default is to suppress warnings, # overwritten if parameter force.warn TRUE (used in lav_partable function) current.warn <- lav_warn() if (force.warn) { if (lav_warn(TRUE)) on.exit(lav_warn(current.warn)) } else { if (lav_warn(FALSE)) on.exit(lav_warn(current.warn)) } # check for empty table if (length(partable$lhs) == 0) { return(character(0L)) } # dotdotdot dotdotdot <- list(...) type.list <- c( "ov", # observed variables (ov) "ov.x", # (pure) exogenous observed variables "ov.nox", # non-exogenous observed variables "ov.model", # modeled observed variables (joint vs cond) "ov.y", # (pure) endogenous variables (dependent only) "ov.num", # numeric observed variables "ov.ord", # ordinal observed variables "ov.ind", # observed indicators of latent variables "ov.orphan", # lonely observed intercepts/variances "ov.interaction", # interaction terms (with colon) "ov.efa", # indicators involved in efa "th", # thresholds ordinal only "th.mean", # thresholds ordinal + numeric variables "lv", # latent variables "lv.regular", # latent variables (defined by =~ only) "lv.formative", # latent variables (defined by <~ only) "lv.x", # (pure) exogenous variables "lv.y", # (pure) endogenous variables "lv.nox", # non-exogenous latent variables "lv.nonnormal", # latent variables with non-normal indicators "lv.interaction", # interaction terms "lv.efa", # latent variables involved in efa "lv.rv", # random slopes, random variables "lv.ind", # latent indicators (higher-order cfa) "lv.marker", # marker indicator per lv "eqs.y", # y's in regression "eqs.x" # x's in regression ) if (type[1L] != "all" && type[1L] != "*" && !all(type %in% type.list)) { wrongtypes <- type[!(type %in% type.list)] lav_msg_stop(sprintf( ngettext(length(wrongtypes), "type = %s is not a valid option", "type = %s are not valid options"), lav_msg_view(wrongtypes, "none", FALSE))) } return.value <- NULL if (type[1L] != "*" && !is.null(attr(partable, "vnames"))) { # ----- lav_partable_vnames ---- cached data -------------------------- # uncomment/comment following line to enable/disable trace # ldw_trace(paste("cached:", paste(type, collapse = ","))) if (type[1L] == "all") { return.value <- attr(partable, "vnames") } else { return.value <- attr(partable, "vnames")[type] } } # ----- lav_partable_vnames ---- common ---------------------------------- if (type[1L] == "all" || type[1L] == "*") { type <- type.list } # ALWAYS need `block' column -- create one if missing if (is.null(partable$block)) { partable$block <- rep(1L, length(partable$lhs)) } # per default, use full partable block.select <- lav_partable_block_values(partable) # check for ... selection argument(s) ndotdotdot <- length(dotdotdot) if (ndotdotdot > 0L) { dot.names <- names(dotdotdot) row.select <- rep(TRUE, length(partable$lhs)) for (dot in seq_len(ndotdotdot)) { # selection variable? block.var <- dot.names[dot] block.val <- dotdotdot[[block.var]] # do we have this 'block.var' in partable? if (is.null(partable[[block.var]])) { # for historical reasons, treat "group = 1" special if (block.var == "group" && block.val == 1L) { partable$group <- rep(1L, length(partable$lhs)) # remove block == 0 idx <- which(partable$block == 0L) if (length(idx) > 0L) { partable$group[idx] <- 0L } row.select <- (row.select & partable[[block.var]] %in% block.val) } else { lav_msg_stop(gettextf( "selection variable '%s' not found in the parameter table.", block.var)) } } else { if (!all(block.val %in% partable[[block.var]])) { lav_msg_stop(gettextf( "%1$s column does not contain value `%2$s'", block.var, block.val)) } row.select <- (row.select & !partable$op %in% c("==", "<", ">", ":=") & partable[[block.var]] %in% block.val) } } # dot block.select <- unique(partable$block[row.select]) if (length(block.select) == 0L) { lav_msg_warn(gettext("no blocks selected.")) } } if (is.null(return.value)) { # ----- lav_partable_vnames ---- no cache ------------------------ # uncomment/comment following line to enable/disable trace # ldw_trace(paste("computed:", paste(type, collapse = ","))) # random slope names, if any (new in 0.6-7) if (!is.null(partable$rv) && any(nchar(partable$rv) > 0L)) { rv.names <- unique(partable$rv[nchar(partable$rv) > 0L]) } else { rv.names <- character(0L) } # output: list per block return.value <- lapply(type, function(x) { vector("list", length = length(block.select)) }) names(return.value) <- type for (b in block.select) { # indices for this block block.ind <- partable$block == b # always compute lv.names lv.names <- unique(partable$lhs[block.ind & (partable$op == "=~" | partable$op == "<~")]) # including random slope names lv.names2 <- unique(c(lv.names, rv.names)) # determine lv interactions int.names <- unique(partable$rhs[block.ind & grepl(":", partable$rhs, fixed = TRUE)]) n.int <- length(int.names) if (n.int > 0L) { ok.idx <- logical(n.int) for (iv in seq_len(n.int)) { tmp.names <- strsplit(int.names[iv], ":", fixed = TRUE)[[1L]] # three scenario's: # - both variables are latent (ok) # - both variables are observed (ignore) # - only one latent (warn??) -> upgrade observed to latent # thus if at least one is in lv.names, we treat it as a # latent interaction if (any(tmp.names %in% lv.names)) { ok.idx[iv] <- TRUE } } lv.interaction <- int.names[ok.idx] lv.names <- c(lv.names, lv.interaction) lv.names2 <- c(lv.names2, lv.interaction) } else { lv.interaction <- character(0L) } if (length(type) == 1L) { # ----- lav_partable_vnames ---- no cache ----- 1 type ----------- # store lv if ("lv" == type) { # check if FLAT for random slopes # if (!is.null(partable$rv) && any(nchar(partable$rv) > 0L) && # !is.null(partable$block)) { # return.value$lv[[b]] <- lv.names2 # } else { # here, they will be 'defined' at level 2 as regular =~ lvs return.value$lv[[b]] <- lv.names # } next } # regular latent variables ONLY (ie defined by =~ only) if ("lv.regular" == type) { out <- unique(partable$lhs[block.ind & partable$op == "=~" & !partable$lhs %in% rv.names]) return.value$lv.regular[[b]] <- out next } # interaction terms involving latent variables (only) if ("lv.interaction" == type) { return.value$lv.interaction[[b]] <- lv.interaction next } # formative latent variables ONLY (ie defined by <~ only) if ("lv.formative" == type) { out <- unique(partable$lhs[block.ind & partable$op == "<~"]) return.value$lv.formative[[b]] <- out next } # lv's involved in efa if (any(type == c("lv.efa", "ov.efa"))) { if (is.null(partable$efa)) { out <- character(0L) } else { set.names <- lav_partable_efa_values(partable) out <- unique(partable$lhs[partable$op == "=~" & block.ind & partable$efa %in% set.names]) } if (type == "ov.efa") ov_efa <- out if (type == "lv.efa") { return.value$lv.efa[[b]] <- out next } } # lv's that are random slopes if ("lv.rv" == type) { if (is.null(partable$rv)) { out <- character(0L) } else { out <- unique(partable$lhs[partable$op == "=~" & block.ind & partable$lhs %in% rv.names]) } return.value$lv.rv[[b]] <- out next } # lv's that are indicators of a higher-order factor if ("lv.ind" == type) { out <- unique(partable$rhs[block.ind & partable$op == "=~" & partable$rhs %in% lv.names]) return.value$lv.ind[[b]] <- out next } # eqs.y if (!(any(type == c("lv", "lv.regular")))) { eqs.y <- unique(partable$lhs[block.ind & partable$op == "~"]) } # store eqs.y if ("eqs.y" == type) { return.value$eqs.y[[b]] <- eqs.y next } # eqs.x if (!(any(type == c("lv", "lv.regular", "lv.x")))) { eqs.x <- unique(partable$rhs[block.ind & (partable$op == "~" | partable$op == "<~")]) } # store eqs.x if ("eqs.x" == type) { return.value$eqs.x[[b]] <- eqs.x next } # v.ind -- indicators of latent variables if (!(any(type == c("lv", "lv.regular")))) { v.ind <- unique(partable$rhs[block.ind & partable$op == "=~"]) } # ov.* if (!(any(type == c("lv", "lv.regular", "lv.x", "lv.y")))) { # 1. indicators, which are not latent variables themselves ov.ind <- v.ind[!v.ind %in% lv.names2] # 2. dependent ov's ov.y <- eqs.y[!eqs.y %in% c(lv.names2, ov.ind)] # 3. independent ov's if (lav_partable_nlevels(partable) > 1L && b > 1L) { # NEW in 0.6-8: if an 'x' was an 'y' in a previous level, # treat it as 'y' tmp.eqs.y <- unique(partable$lhs[partable$op == "~"]) # all blocks ov.x <- eqs.x[!eqs.x %in% c(lv.names2, ov.ind, tmp.eqs.y)] } else { ov.x <- eqs.x[!eqs.x %in% c(lv.names2, ov.ind, ov.y)] } # new in 0.6-12: if we have interaction terms in ov.x, check # if some terms are in eqs.y; if so, remove the interaction term # from ov.x int.idx <- which(grepl(":", ov.x, fixed = TRUE)) bad.idx <- integer(0L) for (iv in int.idx) { tmp.names <- strsplit(ov.x[iv], ":", fixed = TRUE)[[1L]] if (any(tmp.names %in% eqs.y)) { bad.idx <- c(bad.idx, iv) } } if (length(bad.idx) > 0L) { ov.y <- unique(c(ov.y, ov.x[bad.idx])) # it may be removed later, but needed to construct ov.names ov.x <- ov.x[-bad.idx] } } # observed variables # easy approach would be: everything that is not in lv.names, # but the main purpose here is to 'order' the observed variables # according to 'type' (indicators, ov.y, ov.x, orphans) if (!(any(type == c("lv", "lv.regular", "lv.x", "lv.y")))) { # 4. orphaned covariances ov.cov <- c( partable$lhs[block.ind & partable$op == "~~" & !partable$lhs %in% lv.names2], partable$rhs[block.ind & partable$op == "~~" & !partable$rhs %in% lv.names2] ) # 5. orphaned intercepts/thresholds ov.int <- partable$lhs[block.ind & (partable$op == "~1" | partable$op == "|") & !partable$lhs %in% lv.names2] ov.tmp <- c(ov.ind, ov.y, ov.x) ov.extra <- unique(c(ov.cov, ov.int)) # must be in this order! # so that # lav_partable_independence # retains the same order ov.names <- c(ov.tmp, ov.extra[!ov.extra %in% ov.tmp]) } # store ov? if ("ov" == type) { return.value$ov[[b]] <- ov.names next } if ("ov.ind" == type) { return.value$ov.ind[[b]] <- ov.ind next } if ("ov.interaction" == type) { ov.int.names <- ov.names[grepl(":", ov.names, fixed = TRUE)] n.int <- length(ov.int.names) if (n.int > 0L) { ov.names.noint <- ov.names[!ov.names %in% ov.int.names] ok.idx <- logical(n.int) for (iv in seq_len(n.int)) { tmp.names <- strsplit(ov.int.names[iv], ":", fixed = TRUE)[[1L]] # two scenario's: # - both variables are in ov.names.noint (ok) # - at least one variables is NOT in ov.names.noint (ignore) if (all(tmp.names %in% ov.names.noint)) { ok.idx[iv] <- TRUE } } ov.interaction <- ov.int.names[ok.idx] } else { ov.interaction <- character(0L) } return.value$ov.interaction[[b]] <- ov.interaction next } if ("ov.efa" == type) { ov.efa <- partable$rhs[partable$op == "=~" & block.ind & partable$rhs %in% ov.ind & partable$lhs %in% ov_efa] return.value$ov.efa[[b]] <- unique(ov.efa) next } # exogenous `x' covariates if (any(type == c( "ov.x", "ov.nox", "ov.model", "th.mean", "lv.nonnormal" ))) { # correction: is any of these ov.names.x mentioned as a variance, # covariance, or intercept? # this should trigger a warning in lavaanify() if (is.null(partable$user)) { # FLAT! partable$user <- rep(1L, length(partable$lhs)) } vars <- c( partable$lhs[block.ind & partable$op == "~1" & partable$user == 1], partable$lhs[block.ind & partable$op == "~~" & partable$user == 1], partable$rhs[block.ind & partable$op == "~~" & partable$user == 1] ) idx.no.x <- which(ov.x %in% vars) if (length(idx.no.x)) { if (ov.x.fatal) { lav_msg_stop(gettextf( "model syntax contains variance/covariance/intercept formulas involving (an) exogenous variable(s): [%s]; Please remove them and try again.", lav_msg_view(ov.x[idx.no.x], "none"))) } lav_msg_warn(gettextf( "model syntax contains variance/covariance/intercept formulas involving (an) exogenous variable(s): [%s]; these variables will now be treated as random introducing additional free parameters. If you wish to treat those variables as fixed, remove these formulas from the model syntax. Otherwise, consider adding the fixed.x = FALSE option.", lav_msg_view(ov.x[idx.no.x], "none"))) ov.x <- ov.x[-idx.no.x] } ov.tmp.x <- ov.x # extra if (!is.null(partable$exo)) { ov.cov <- c( partable$lhs[block.ind & partable$op == "~~" & partable$exo == 1L], partable$rhs[block.ind & partable$op == "~~" & partable$exo == 1L] ) ov.int <- partable$lhs[block.ind & partable$op == "~1" & partable$exo == 1L] ov.extra <- unique(c(ov.cov, ov.int)) ov.tmp.x <- c(ov.tmp.x, ov.extra[!ov.extra %in% ov.tmp.x]) } ov.names.x <- ov.tmp.x } # store ov.x? if ("ov.x" == type) { return.value$ov.x[[b]] <- ov.names.x next } # story ov.orphan? if ("ov.orphan" == type) { return.value$ov.orphan[[b]] <- ov.extra next } # ov's withouth ov.x if (any(type == c( "ov.nox", "ov.model", "th.mean", "lv.nonnormal" ))) { ov.names.nox <- ov.names[!ov.names %in% ov.names.x] } # store ov.nox if ("ov.nox" == type) { return.value$ov.nox[[b]] <- ov.names.nox next } # store ov.model if ("ov.model" == type) { # if no conditional.x, this is just ov # else, this is ov.nox if (any(block.ind & partable$op == "~" & partable$exo == 1L)) { return.value$ov.model[[b]] <- ov.names.nox } else { return.value$ov.model[[b]] <- ov.names } next } # ov's strictly ordered if (any(type == c( "ov.ord", "th", "th.mean", "ov.num", "lv.nonnormal" ))) { tmp <- unique(partable$lhs[block.ind & partable$op == "|"]) ord.names <- ov.names[ov.names %in% tmp] } if ("ov.ord" == type) { return.value$ov.ord[[b]] <- ord.names next } # ov's strictly numeric if (any(type == c("ov.num", "lv.nonnormal"))) { ov.num <- ov.names[!ov.names %in% ord.names] } if ("ov.num" == type) { return.value$ov.num[[b]] <- ov.num next } # nonnormal lv's if ("lv.nonnormal" == type) { # regular lv's lv.reg <- unique(partable$lhs[block.ind & partable$op == "=~"]) if (length(lv.reg) > 0L) { out <- unlist(lapply(lv.reg, function(x) { # get indicators for this lv tmp.ind <- unique(partable$rhs[block.ind & partable$op == "=~" & partable$lhs == x]) if (!all(tmp.ind %in% ov.num)) { return(x) } else { return(character(0)) } }), use.names = FALSE) return.value$lv.nonnormal[[b]] <- out } else { return.value$lv.nonnormal[[b]] <- character(0) } next } if (any(c("th", "th.mean") == type)) { tmp.th.lhs <- partable$lhs[block.ind & partable$op == "|"] tmp.th.rhs <- partable$rhs[block.ind & partable$op == "|"] } # threshold if ("th" == type) { if (length(ord.names) > 0L) { # return in the right order (following ord.names!) out <- unlist(lapply(ord.names, function(x) { idx <- which(x == tmp.th.lhs) tmp.th <- unique(paste(tmp.th.lhs[idx], "|", tmp.th.rhs[idx], sep = "" )) # make sure the th's are in increasing order # sort(tmp.th) # NO!, don't do that; t10 will be before t2 # fixed in 0.6-1 (bug report from Myrsini) # in 0.6-12, we do this anyway like this: # get var name tmp.th1 <- sapply( strsplit(tmp.th, split = "\\|t"), "[[", 1 ) # get number, and sort tmp.th2 <- as.character(sort(as.integer(sapply( strsplit(tmp.th, split = "\\|t"), "[[", 2 )))) # paste back togehter in the right order paste(tmp.th1, tmp.th2, sep = "|t") }), use.names = FALSE) } else { out <- character(0L) } return.value$th[[b]] <- out next } # thresholds and mean/intercepts of numeric variables if ("th.mean" == type) { # if fixed.x -> use ov.names.nox # else -> use ov.names if (is.null(partable$exo) || all(partable$exo == 0L)) { tmp.ov.names <- ov.names } else { tmp.ov.names <- ov.names.nox } if (length(tmp.ov.names) > 0L) { # return in the right order (following ov.names.nox!) out <- unlist(lapply(tmp.ov.names, function(x) { if (x %in% ord.names) { idx <- which(x == tmp.th.lhs) tmp.th <- unique( paste(tmp.th.lhs[idx], "|", tmp.th.rhs[idx], sep = "" ), use.names = FALSE ) # make sure the th's are in increasing order # get var name tmp.th1 <- sapply( strsplit(tmp.th, split = "\\|t"), "[[", 1 ) # get number, and sort tmp.th2 <- as.character(sort(as.integer(sapply( strsplit(tmp.th, split = "\\|t"), "[[", 2 )))) # paste back togehter in the right order paste(tmp.th1, tmp.th2, sep = "|t") } else { x } })) } else { out <- character(0L) } return.value$th.mean[[b]] <- out next } # exogenous lv's if (any(c("lv.x", "lv.nox") == type)) { tmp <- lv.names[!lv.names %in% c(v.ind, eqs.y)] lv.names.x <- lv.names[lv.names %in% tmp] } if ("lv.x" == type) { return.value$lv.x[[b]] <- lv.names.x next } # dependent ov (but not also indicator or x) if ("ov.y" == type) { tmp <- eqs.y[!eqs.y %in% c(v.ind, eqs.x, lv.names)] return.value$ov.y[[b]] <- ov.names[ov.names %in% tmp] next } # dependent lv (but not also indicator or x) if ("lv.y" == type) { tmp <- eqs.y[!eqs.y %in% c(v.ind, eqs.x) & eqs.y %in% lv.names] return.value$lv.y[[b]] <- lv.names[lv.names %in% tmp] next } # non-exogenous latent variables if ("lv.nox" == type) { return.value$lv.nox[[b]] <- lv.names[!lv.names %in% lv.names.x] next } # marker indicator (if any) for each lv if ("lv.marker" == type) { # default: "" per lv out <- character(length(lv.names)) names(out) <- lv.names for (l in seq_len(length(lv.names))) { this.lv.name <- lv.names[l] # try to see if we can find a 'marker' indicator for this factor marker.idx <- which(block.ind & partable$op == "=~" & partable$lhs == this.lv.name & partable$rhs %in% v.ind & partable$ustart == 1L & partable$free == 0L) if (length(marker.idx) == 1L) { # unique only!! # check if 'other' loadings are fixed to zero other.idx <- which(block.ind & partable$op == "=~" & partable$lhs != this.lv.name & partable$rhs == partable$rhs[marker.idx] & partable$free == 0L) if (length(other.idx) == 0L) { out[l] <- partable$rhs[marker.idx] } else if (all(partable$ustart[other.idx] == 0)) { out[l] <- partable$rhs[marker.idx] } } } return.value$lv.marker[[b]] <- out } } else { # ----- lav_partable_vnames ---- no cache ----- more than 1 type ------ # store lv if (any("lv" == type)) { # check if FLAT for random slopes # if (!is.null(partable$rv) && any(nchar(partable$rv) > 0L) && # !is.null(partable$block)) { # return.value$lv[[b]] <- lv.names2 # } else { # here, they will be 'defined' at level 2 as regular =~ lvs return.value$lv[[b]] <- lv.names # } } # regular latent variables ONLY (ie defined by =~ only) if (any("lv.regular" == type)) { out <- unique(partable$lhs[block.ind & partable$op == "=~" & !partable$lhs %in% rv.names]) return.value$lv.regular[[b]] <- out } # interaction terms involving latent variables (only) if (any("lv.interaction" == type)) { return.value$lv.interaction[[b]] <- lv.interaction } # formative latent variables ONLY (ie defined by <~ only) if (any("lv.formative" == type)) { out <- unique(partable$lhs[block.ind & partable$op == "<~"]) return.value$lv.formative[[b]] <- out } # lv's involved in efa if (any(type %in% c("lv.efa", "ov.efa"))) { if (is.null(partable$efa)) { out <- character(0L) } else { set.names <- lav_partable_efa_values(partable) out <- unique(partable$lhs[partable$op == "=~" & block.ind & partable$efa %in% set.names]) } if (any(type == "ov.efa")) ov_efa <- out if (any(type == "lv.efa")) return.value$lv.efa[[b]] <- out } # lv's that are random slopes if (any("lv.rv" == type)) { if (is.null(partable$rv)) { out <- character(0L) } else { out <- unique(partable$lhs[partable$op == "=~" & block.ind & partable$lhs %in% rv.names]) } return.value$lv.rv[[b]] <- out } # lv's that are indicators of a higher-order factor if (any("lv.ind" == type)) { out <- unique(partable$rhs[block.ind & partable$op == "=~" & partable$rhs %in% lv.names]) return.value$lv.ind[[b]] <- out } # eqs.y eqs.y <- unique(partable$lhs[block.ind & partable$op == "~"]) # store eqs.y if (any("eqs.y" == type)) { return.value$eqs.y[[b]] <- eqs.y } # eqs.x eqs.x <- unique(partable$rhs[block.ind & (partable$op == "~" | partable$op == "<~")]) # store eqs.x if (any("eqs.x" == type)) { return.value$eqs.x[[b]] <- eqs.x } # v.ind -- indicators of latent variables v.ind <- unique(partable$rhs[block.ind & partable$op == "=~"]) # ov.* # 1. indicators, which are not latent variables themselves ov.ind <- v.ind[!v.ind %in% lv.names2] # 2. dependent ov's ov.y <- eqs.y[!eqs.y %in% c(lv.names2, ov.ind)] # 3. independent ov's if (lav_partable_nlevels(partable) > 1L && b > 1L) { # NEW in 0.6-8: if an 'x' was an 'y' in a previous level, # treat it as 'y' tmp.eqs.y <- unique(partable$lhs[partable$op == "~"]) # all blocks ov.x <- eqs.x[!eqs.x %in% c(lv.names2, ov.ind, tmp.eqs.y)] } else { ov.x <- eqs.x[!eqs.x %in% c(lv.names2, ov.ind, ov.y)] } # new in 0.6-12: if we have interaction terms in ov.x, check # if some terms are in eqs.y; if so, remove the interaction term # from ov.x int.idx <- which(grepl(":", ov.x, fixed = TRUE)) bad.idx <- integer(0L) for (iv in int.idx) { tmp.names <- strsplit(ov.x[iv], ":", fixed = TRUE)[[1L]] if (any(tmp.names %in% eqs.y)) { bad.idx <- c(bad.idx, iv) } } if (length(bad.idx) > 0L) { ov.y <- unique(c(ov.y, ov.x[bad.idx])) # it may be removed later, but needed to construct ov.names ov.x <- ov.x[-bad.idx] } # observed variables # easy approach would be: everything that is not in lv.names, # but the main purpose here is to 'order' the observed variables # according to 'type' (indicators, ov.y, ov.x, orphans) # 4. orphaned covariances ov.cov <- c( partable$lhs[block.ind & partable$op == "~~" & !partable$lhs %in% lv.names2], partable$rhs[block.ind & partable$op == "~~" & !partable$rhs %in% lv.names2] ) # 5. orphaned intercepts/thresholds ov.int <- partable$lhs[block.ind & (partable$op == "~1" | partable$op == "|") & !partable$lhs %in% lv.names2] ov.tmp <- c(ov.ind, ov.y, ov.x) ov.extra <- unique(c(ov.cov, ov.int)) # must be in this order! # so that # lav_partable_independence # retains the same order ov.names <- c(ov.tmp, ov.extra[!ov.extra %in% ov.tmp]) # store ov? if (any("ov" == type)) { return.value$ov[[b]] <- ov.names } if (any("ov.ind" == type)) { return.value$ov.ind[[b]] <- ov.ind } if (any("ov.interaction" == type)) { ov.int.names <- ov.names[grepl(":", ov.names, fixed = TRUE)] n.int <- length(ov.int.names) if (n.int > 0L) { ov.names.noint <- ov.names[!ov.names %in% ov.int.names] ok.idx <- logical(n.int) for (iv in seq_len(n.int)) { tmp.names <- strsplit(ov.int.names[iv], ":", fixed = TRUE)[[1L]] # two scenario's: # - both variables are in ov.names.noint (ok) # - at least one variables is NOT in ov.names.noint (ignore) if (all(tmp.names %in% ov.names.noint)) { ok.idx[iv] <- TRUE } } ov.interaction <- ov.int.names[ok.idx] } else { ov.interaction <- character(0L) } return.value$ov.interaction[[b]] <- ov.interaction } if (any("ov.efa" == type)) { ov.efa <- partable$rhs[partable$op == "=~" & block.ind & partable$rhs %in% ov.ind & partable$lhs %in% ov_efa] return.value$ov.efa[[b]] <- unique(ov.efa) } # exogenous `x' covariates if (any(type %in% c( "ov.x", "ov.nox", "ov.model", "th.mean", "lv.nonnormal" ))) { # correction: is any of these ov.names.x mentioned as a variance, # covariance, or intercept? # this should trigger a warning in lavaanify() if (is.null(partable$user)) { # FLAT! partable$user <- rep(1L, length(partable$lhs)) } vars <- c( partable$lhs[block.ind & partable$op == "~1" & partable$user == 1], partable$lhs[block.ind & partable$op == "~~" & partable$user == 1], partable$rhs[block.ind & partable$op == "~~" & partable$user == 1] ) idx.no.x <- which(ov.x %in% vars) if (length(idx.no.x)) { if (ov.x.fatal) { lav_msg_stop(gettextf( "model syntax contains variance/covariance/intercept formulas involving (an) exogenous variable(s): [%s]; Please remove them and try again.", lav_msg_view(ov.x[idx.no.x], "none"))) } lav_msg_warn(gettextf( "model syntax contains variance/covariance/intercept formulas involving (an) exogenous variable(s): [%s]; these variables will now be treated as random introducing additional free parameters. If you wish to treat those variables as fixed, remove these formulas from the model syntax. Otherwise, consider adding the fixed.x = FALSE option.", lav_msg_view(ov.x[idx.no.x], "none"))) ov.x <- ov.x[-idx.no.x] } ov.tmp.x <- ov.x # extra if (!is.null(partable$exo)) { ov.cov <- c( partable$lhs[block.ind & partable$op == "~~" & partable$exo == 1L], partable$rhs[block.ind & partable$op == "~~" & partable$exo == 1L] ) ov.int <- partable$lhs[block.ind & partable$op == "~1" & partable$exo == 1L] ov.extra <- unique(c(ov.cov, ov.int)) ov.tmp.x <- c(ov.tmp.x, ov.extra[!ov.extra %in% ov.tmp.x]) } ov.names.x <- ov.tmp.x } # store ov.x? if (any("ov.x" == type)) { return.value$ov.x[[b]] <- ov.names.x } # story ov.orphan? if (any("ov.orphan" == type)) { return.value$ov.orphan[[b]] <- ov.extra } # ov's withouth ov.x if (any(type %in% c( "ov.nox", "ov.model", "th.mean", "lv.nonnormal" ))) { ov.names.nox <- ov.names[!ov.names %in% ov.names.x] } # store ov.nox if (any("ov.nox" == type)) { return.value$ov.nox[[b]] <- ov.names.nox } # store ov.model if (any("ov.model" == type)) { # if no conditional.x, this is just ov # else, this is ov.nox if (any(block.ind & partable$op == "~" & partable$exo == 1L)) { return.value$ov.model[[b]] <- ov.names.nox } else { return.value$ov.model[[b]] <- ov.names } } # ov's strictly ordered if (any(type %in% c( "ov.ord", "th", "th.mean", "ov.num", "lv.nonnormal" ))) { tmp <- unique(partable$lhs[block.ind & partable$op == "|"]) ord.names <- ov.names[ov.names %in% tmp] } if (any("ov.ord" == type)) { return.value$ov.ord[[b]] <- ord.names } # ov's strictly numeric if (any(type %in% c("ov.num", "lv.nonnormal"))) { ov.num <- ov.names[!ov.names %in% ord.names] } if (any("ov.num" == type)) { return.value$ov.num[[b]] <- ov.num } # nonnormal lv's if (any("lv.nonnormal" == type)) { # regular lv's lv.reg <- unique(partable$lhs[block.ind & partable$op == "=~"]) if (length(lv.reg) > 0L) { out <- unlist(lapply(lv.reg, function(x) { # get indicators for this lv tmp.ind <- unique(partable$rhs[block.ind & partable$op == "=~" & partable$lhs == x]) if (!all(tmp.ind %in% ov.num)) { return(x) } else { return(character(0)) } }), use.names = FALSE) return.value$lv.nonnormal[[b]] <- out } else { return.value$lv.nonnormal[[b]] <- character(0) } } if (any(c("th", "th.mean") %in% type)) { tmp.th.lhs <- partable$lhs[block.ind & partable$op == "|"] tmp.th.rhs <- partable$rhs[block.ind & partable$op == "|"] } # threshold if (any("th" == type)) { if (length(ord.names) > 0L) { # return in the right order (following ord.names!) out <- unlist(lapply(ord.names, function(x) { idx <- which(x == tmp.th.lhs) tmp.th <- unique(paste(tmp.th.lhs[idx], "|", tmp.th.rhs[idx], sep = "" )) # make sure the th's are in increasing order # sort(tmp.th) # NO!, don't do that; t10 will be before t2 # fixed in 0.6-1 (bug report from Myrsini) # in 0.6-12, we do this anyway like this: # get var name tmp.th1 <- sapply( strsplit(tmp.th, split = "\\|t"), "[[", 1 ) # get number, and sort tmp.th2 <- as.character(sort(as.integer(sapply( strsplit(tmp.th, split = "\\|t"), "[[", 2 )))) # paste back togehter in the right order paste(tmp.th1, tmp.th2, sep = "|t") }), use.names = FALSE) } else { out <- character(0L) } return.value$th[[b]] <- out } # thresholds and mean/intercepts of numeric variables if (any("th.mean" == type)) { # if fixed.x -> use ov.names.nox # else -> use ov.names if (is.null(partable$exo) || all(partable$exo == 0L)) { tmp.ov.names <- ov.names } else { tmp.ov.names <- ov.names.nox } if (length(tmp.ov.names) > 0L) { # return in the right order (following ov.names.nox!) out <- unlist(lapply(tmp.ov.names, function(x) { if (x %in% ord.names) { idx <- which(x == tmp.th.lhs) tmp.th <- unique(paste(tmp.th.lhs[idx], "|", tmp.th.rhs[idx], sep = "" )) # make sure the th's are in increasing order # get var name tmp.th1 <- sapply( strsplit(tmp.th, split = "\\|t"), "[[", 1 ) # get number, and sort tmp.th2 <- as.character(sort(as.integer(sapply( strsplit(tmp.th, split = "\\|t"), "[[", 2 )))) # paste back togehter in the right order paste(tmp.th1, tmp.th2, sep = "|t") } else { x } }), use.names = FALSE) } else { out <- character(0L) } return.value$th.mean[[b]] <- out } # exogenous lv's if (any(c("lv.x", "lv.nox") %in% type)) { tmp <- lv.names[!lv.names %in% c(v.ind, eqs.y)] lv.names.x <- lv.names[lv.names %in% tmp] } if (any("lv.x" == type)) { return.value$lv.x[[b]] <- lv.names.x } # dependent ov (but not also indicator or x) if (any("ov.y" == type)) { tmp <- eqs.y[!eqs.y %in% c(v.ind, eqs.x, lv.names)] return.value$ov.y[[b]] <- ov.names[ov.names %in% tmp] } # dependent lv (but not also indicator or x) if (any("lv.y" == type)) { tmp <- eqs.y[!eqs.y %in% c(v.ind, eqs.x) & eqs.y %in% lv.names] return.value$lv.y[[b]] <- lv.names[lv.names %in% tmp] } # non-exogenous latent variables if (any("lv.nox" == type)) { return.value$lv.nox[[b]] <- lv.names[!lv.names %in% lv.names.x] } # marker indicator (if any) for each lv if (any("lv.marker" == type)) { # default: "" per lv out <- character(length(lv.names)) names(out) <- lv.names for (l in seq_len(length(lv.names))) { this.lv.name <- lv.names[l] # try to see if we can find a 'marker' indicator for this factor marker.idx <- which(block.ind & partable$op == "=~" & partable$lhs == this.lv.name & partable$rhs %in% v.ind & partable$ustart == 1L & partable$free == 0L) if (length(marker.idx) == 1L) { # unique only!! # check if 'other' loadings are fixed to zero other.idx <- which(block.ind & partable$op == "=~" & partable$lhs != this.lv.name & partable$rhs == partable$rhs[marker.idx] & partable$free == 0L) if (length(other.idx) == 0L) { out[l] <- partable$rhs[marker.idx] } else if (all(partable$ustart[other.idx] == 0)) { out[l] <- partable$rhs[marker.idx] } } } return.value$lv.marker[[b]] <- out } } } # b # ----- lav_partable_vnames ---- no cache ------------------------ # new in 0.6-14: if 'da' operator, change order! (for ov.order = "data") # now via attribute "ovda" ov.names.data <- attr(partable, "ovda") if (!is.null(ov.names.data)) { return.value <- lapply(return.value, function(x) { for (b in seq_len(length(x))) { m <- match(x[[b]], ov.names.data) target.idx <- which(!is.na(m)) if (length(target.idx) > 1L) { x[[b]][target.idx] <- x[[b]][target.idx][order(m[target.idx])] } } x }) } } # ----- lav_partable_vnames ---- common ------ 1 type -------- # to mimic old behaviour, if length(type) == 1L if (length(type) == 1L) { return.value <- return.value[[type]] # to mimic old behaviour, if specific block is requested if (ndotdotdot == 0L) { if (type == "lv.marker") { return.value <- unlist(return.value) # no unique(), as unique() drops attributes, and reduces # c("", "", "") to a single "" # (but, say for 2 groups, you get 2 copies) # as this is only for 'display', we leave it like that } else { return.value <- unique(unlist(return.value)) } } else if (length(block.select) == 1L) { return.value <- return.value[[block.select]] } else { return.value <- return.value[block.select] } } # ----- lav_partable_vnames ---- common ------------------------ return.value } # alias for backward compatibility vnames <- lav_partable_vnames lavaan/R/lav_object_generate.R0000644000176200001440000003421414627656441016045 0ustar liggesusers# here, we generate new models based on the original model in lavobject # 1. the independence model # 2. the unrestricted model # 3. model + extra parameters (for modindices/lavTestScore) # 4. catML fit based on DWLS fit (for robust RMSEA/CFI) # 1. fit an 'independence' model # note that for ML (and ULS and DWLS), the 'estimates' of the # independence model are simply the observed variances # but for GLS and WLS, this is not the case!! lav_object_independence <- function(object = NULL, # or lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, lavoptions = NULL, lavpartable = NULL, lavh1 = NULL, # local options se = FALSE) { # object or slots? if (!is.null(object)) { stopifnot(inherits(object, "lavaan")) # extract needed slots lavsamplestats <- object@SampleStats lavdata <- object@Data lavcache <- object@Cache lavoptions <- object@Options lavpta <- object@pta if (.hasSlot(object, "h1")) { lavh1 <- object@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = object@Data, lavsamplestats = object@SampleStats, lavoptions = object@Options ) } if (is.null(lavoptions$estimator.args)) { lavoptions$estimator.args <- list() } } else { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } # if two-level, force conditional.x = FALSE (for now) if (lavdata@nlevels > 1L && lavoptions$conditional.x) { lavoptions$conditional.x <- FALSE } # construct parameter table for independence model lavpartable <- lav_partable_indep_or_unrestricted( lavobject = NULL, lavdata = lavdata, lavpta = lavpta, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = lavh1, independent = TRUE ) # new in 0.6-6: add lower bounds for ov.var if (!is.null(lavoptions$optim.bounds)) { lavoptions$bounds <- "doe.maar" lavoptions$effect.coding <- "" # to avoid warning lavoptions$optim.bounds <- list(lower = "ov.var") lavpartable <- lav_partable_add_bounds( partable = lavpartable, lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } # new in 0.6-8: if DLS, change to sample-based if (lavoptions$estimator == "DLS") { if (lavoptions$estimator.args$dls.GammaNT == "sample") { # nothing to do } else { lavoptions$estimator.args$dls.GammaNT <- "sample" dls.a <- lavoptions$estimator.args$dls.a for (g in 1:lavsamplestats@ngroups) { GammaNT <- lav_samplestats_Gamma_NT( COV = lavsamplestats@cov[[g]], MEAN = lavsamplestats@mean[[g]], rescale = FALSE, x.idx = lavsamplestats@x.idx[[g]], fixed.x = lavoptions$fixed.x, conditional.x = lavoptions$conditional.x, meanstructure = lavoptions$meanstructure, slopestructure = lavoptions$conditional.x ) W.DLS <- (1 - dls.a) * lavsamplestats@NACOV[[g]] + dls.a * GammaNT # overwrite lavsamplestats@WLS.V[[g]] <- lav_matrix_symmetric_inverse(W.DLS) } } } # se if (se) { if (lavoptions$se == "none") { lavoptions$se <- "standard" } } else { # 0.6-18: slower, but safer to just keep it ## FIXME: if test = scaled, we need it anyway? # if(lavoptions$missing %in% c("two.stage", "two.stage.robust")) { # don't touch it # } else { # lavoptions$se <- "none" # } } # change options lavoptions$h1 <- FALSE # already provided by lavh1 lavoptions$baseline <- FALSE # of course lavoptions$loglik <- TRUE # eg for multilevel lavoptions$implied <- TRUE # needed for loglik (multilevel) lavoptions$check.start <- FALSE lavoptions$check.gradient <- FALSE lavoptions$check.post <- FALSE lavoptions$check.vcov <- FALSE lavoptions$optim.bounds <- list() # we already have the bounds lavoptions$rstarts <- 0L # no random starts # ALWAYS do.fit and set optim.method = "nlminb" (if npar > 0) npar <- lav_partable_npar(lavpartable) if (npar > 0L) { lavoptions$do.fit <- TRUE lavoptions$optim.method <- "nlminb" } else { # perhaps a correlation structure? lavoptions$optim.method <- "none" lavoptions$optim.force.converged <- TRUE } # needed? if (any(lavpartable$op == "~1")) lavoptions$meanstructure <- TRUE # FIXME: it is crucial that the order of the ov's, as returned by # lavNames() remains the same # so lavNames(object) should equal lavNames(lavpartable) # otherwise, we will use the wrong sample statistics!!! # # this seems ok now, because we first generate the covariances in # lavpartable, and they should be in the right order (unlike the # intercepts) FIT <- lavaan(lavpartable, slotOptions = lavoptions, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache, sloth1 = lavh1 ) FIT } # 2. unrestricted model lav_object_unrestricted <- function(object, se = FALSE) { # construct parameter table for unrestricted model lavpartable <- lav_partable_unrestricted(object) # adapt options lavoptions <- object@Options # se if (se) { if (lavoptions$se == "none") { lavoptions$se <- "standard" } } else { ## FIXME: if test = scaled, we need it anyway? lavoptions$se <- "none" } # ALWAYS do.fit lavoptions$do.fit <- TRUE # needed? if (any(lavpartable$op == "~1")) lavoptions$meanstructure <- TRUE if (.hasSlot(object, "h1")) { lavh1 <- object@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = object@Data, lavsamplestats = object@SampleStats, lavoptions = object@Options ) } FIT <- lavaan(lavpartable, slotOptions = lavoptions, slotSampleStats = object@SampleStats, slotData = object@Data, slotCache = object@Cache, sloth1 = lavh1 ) FIT } # 3. extended model lav_object_extended <- function(object, add = NULL, remove.duplicated = TRUE, all.free = FALSE, do.fit = FALSE) { # partable original model partable <- object@ParTable[c( "lhs", "op", "rhs", "free", "exo", "label", "plabel" )] # new in 0.6-3: check for non-parameters nonpar.idx <- which(partable$op %in% c("==", ":=", "<", ">")) # always add block/group/level if (!is.null(object@ParTable$group)) { partable$group <- object@ParTable$group } else { partable$group <- rep(1L, length(partable$lhs)) if (length(nonpar.idx) > 0L) { partable$group[nonpar.idx] <- 0L } } if (!is.null(object@ParTable$level)) { partable$level <- object@ParTable$level } else { partable$level <- rep(1L, length(partable$lhs)) if (length(nonpar.idx) > 0L) { partable$level[nonpar.idx] <- 0L } } if (!is.null(object@ParTable$block)) { partable$block <- object@ParTable$block } else { partable$block <- rep(1L, length(partable$lhs)) if (length(nonpar.idx) > 0L) { partable$block[nonpar.idx] <- 0L } } # TDJ: Added to prevent error when lav_partable_merge() is called below. # Problematic if object@ParTable is missing one of the requested slots, # which returns a NULL slot with a missing name. For example: # example(cfa) # lav_partable_independence(lavdata = fit@Data, lavpta = fit@pta, # lavoptions = lavInspect(fit, "options")) # Has no "label" or "plabel" elements. empties <- which(sapply(partable, is.null)) if (length(empties)) { partable[empties] <- NULL } if (all.free) { partable$user <- rep(1L, length(partable$lhs)) non.free.idx <- which(partable$free == 0L & partable$op != "==" & partable$op != ":=" & partable$op != "<" & partable$op != ">") partable$free[non.free.idx] <- 1L partable$user[non.free.idx] <- 10L } # replace 'start' column, since lav_model will fill these in in GLIST partable$start <- parameterEstimates(object, remove.system.eq = FALSE, remove.def = FALSE, remove.eq = FALSE, remove.ineq = FALSE, remove.nonfree = FALSE, remove.unused = FALSE )$est # add new parameters, extend model if (is.list(add)) { stopifnot( !is.null(add$lhs), !is.null(add$op), !is.null(add$rhs) ) ADD <- add } else if (is.character(add)) { ngroups <- lav_partable_ngroups(partable) ADD.orig <- lavaanify(add, ngroups = ngroups) ADD <- ADD.orig[, c("lhs", "op", "rhs", "user", "label")] # minimum # always add block/group/level if (!is.null(ADD.orig$group)) { ADD$group <- ADD.orig$group } else { ADD$group <- rep(1L, length(ADD$lhs)) } if (!is.null(ADD.orig$level)) { ADD$level <- ADD.orig$level } else { ADD$level <- rep(1L, length(ADD$lhs)) } if (!is.null(ADD.orig$block)) { ADD$block <- ADD.orig$block } else { ADD$block <- rep(1L, length(ADD$lhs)) } remove.idx <- which(ADD$user == 0) if (length(remove.idx) > 0L) { ADD <- ADD[-remove.idx, ] } ADD$start <- rep(0, nrow(ADD)) ADD$free <- rep(1, nrow(ADD)) ADD$user <- rep(10, nrow(ADD)) } # merge LIST <- lav_partable_merge(partable, ADD, remove.duplicated = remove.duplicated, warn = FALSE ) # remove nonpar? # if(remove.nonpar) { # nonpar.idx <- which(LIST$op %in% c("==", ":=", "<", ">")) # if(length(nonpar.idx) > 0L) { # LIST <- LIST[-nonpar.idx,] # } # } # redo 'free' free.idx <- which(LIST$free > 0) LIST$free[free.idx] <- 1:length(free.idx) # adapt options lavoptions <- object@Options # do.fit? lavoptions$do.fit <- do.fit # needed? if (any(LIST$op == "~1")) lavoptions$meanstructure <- TRUE if (.hasSlot(object, "h1")) { lavh1 <- object@h1 } else { # old object -- for example 'usemmodelfit' in package 'pompom' # add a few fields lavoptions$h1 <- FALSE lavoptions$implied <- FALSE lavoptions$baseline <- FALSE lavoptions$loglik <- FALSE lavoptions$estimator.args <- list() # add a few slots object@Data@weights <- vector("list", object@Data@ngroups) object@Model@estimator <- object@Options$estimator object@Model@estimator.args <- list() lavh1 <- lav_h1_implied_logl( lavdata = object@Data, lavsamplestats = object@SampleStats, lavoptions = object@Options ) } FIT <- lavaan(LIST, slotOptions = lavoptions, slotSampleStats = object@SampleStats, slotData = object@Data, slotCache = object@Cache, sloth1 = lavh1 ) FIT } # 4. catml model lav_object_catml <- function(lavobject = NULL) { stopifnot(inherits(lavobject, "lavaan")) stopifnot(lavobject@Model@categorical) # extract slots lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavoptions <- lavobject@Options lavpta <- lavobject@pta # if only categorical variables: remove thresholds and intercepts refit <- FALSE if (all(lavdata@ov$type == "ordered")) { partable.catml <- parTable(lavobject) rm.idx <- which(partable.catml$op %in% c("|", "~1")) partable.catml <- partable.catml[-rm.idx, ] partable.catml <- lav_partable_complete(partable.catml) } else { refit <- TRUE partable.catml <- parTable(lavobject) partable.catml$start <- partable.catml$est partable.catml$se <- NULL rm.idx <- which(partable.catml$op %in% c("|", "~1")) partable.catml <- partable.catml[-rm.idx, ] partable.catml$ustart <- partable.catml$est for (b in seq_len(lavpta$nblocks)) { ov.names.num <- lavpta$vnames$ov.num[[b]] ov.var.idx <- which(partable.catml$op == "~~" & partable.catml$lhs %in% ov.names.num & partable.catml$lhs == partable.catml$rhs) partable.catml$free[ov.var.idx] <- 0L } partable.catml <- lav_partable_complete(partable.catml) } # adapt lavsamplestats for (g in seq_len(lavdata@ngroups)) { lavsamplestats@WLS.V[[g]] <- NULL lavsamplestats@WLS.VD[[g]] <- NULL COR <- lavsamplestats@cov[[g]] # check if COV is pd or not ev <- eigen(COR, symmetric = TRUE, only.values = TRUE)$values if (any(ev < .Machine$double.eps^(1 / 2))) { # not PD! COV <- cov2cor(lav_matrix_symmetric_force_pd(COR, tol = 1e-04)) lavsamplestats@cov[[g]] <- COV lavsamplestats@var[[g]] <- diag(COV) refit <- TRUE } else { COV <- COR } current.warn <- lav_warn() if (lav_warn(FALSE)) on.exit(lav_warn(current.warn), TRUE) out <- lav_samplestats_icov( COV = COV, ridge = 1e-05, x.idx = lavsamplestats@x.idx[[g]], ngroups = lavdata@ngroups, g = g ) lav_warn(current.warn) lavsamplestats@icov[[g]] <- out$icov lavsamplestats@cov.log.det[[g]] <- out$cov.log.det # NACOV <- lavsamplestats@NACOV[[g]] # nvar <- nrow(COV) # ntotal <- nrow(NACOV) # pstar <- nvar*(nvar-1)/2 # nocor <- ntotal - pstar # if(length(nocor) > 0L) { # lavsamplestats@NACOV[[g]] <- NACOV[-seq_len(nocor), # -seq_len(nocor)] # } } # adapt lavoptions lavoptions$estimator <- "catML" lavoptions$.categorical <- FALSE lavoptions$categorical <- FALSE lavoptions$correlation <- TRUE lavoptions$meanstructure <- FALSE lavoptions$conditional.x <- FALSE # fixme lavoptions$information <- c("expected", "expected") lavoptions$h1.information <- c("structured", "structured") # unlike DWLS lavoptions$se <- "none" lavoptions$test <- "standard" # always for now lavoptions$baseline <- TRUE if (!refit) { lavoptions$optim.method <- "none" lavoptions$optim.force.converged <- TRUE } else { lavoptions$optim.gradient <- "numerical" } # dummy fit FIT <- lavaan( slotParTable = partable.catml, slotSampleStats = lavsamplestats, slotData = lavdata, slotOptions = lavoptions ) FIT } lavaan/R/lav_syntax.R0000644000176200001440000006656414627656441014270 0ustar liggesusers# parse lavaan syntax # YR 14 Jan 2014: move to lav_syntax.R # YR 17 Oct 2023: add ldw parser lavParseModelString <- function(model.syntax = "", as.data.frame. = FALSE, parser = "new", warn = TRUE, debug = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } if (!missing(warn)) { current.warn <- lav_warn() if (lav_warn(warn)) on.exit(lav_warn(current.warn), TRUE) } parser <- tolower(parser) if (!parser %in% c("old", "new")) { lav_msg_stop(gettext("parser= argument should be \"old\" or \"new\"")) } if (parser == "old") { # original/classic parser out <- lav_parse_model_string_orig( model.syntax = model.syntax, as.data.frame. = as.data.frame. ) } else { # new parser out <- ldw_parse_model_string( model.syntax = model.syntax, as.data.frame. = as.data.frame. ) } out } # the 'original' parser (up to 0.6-17) lav_parse_model_string_orig <- function(model.syntax = "", as.data.frame. = FALSE) { # check for empty syntax if (length(model.syntax) == 0) { stop("lavaan ERROR: empty model syntax") } # remove comments prior to split: # match from comment character to newline, but don't eliminate newline model.syntax <- gsub("[#!].*(?=\n)", "", model.syntax, perl = TRUE) # replace semicolons with newlines prior to split model.syntax <- gsub(";", "\n", model.syntax, fixed = TRUE) # remove all whitespace prior to split model.syntax <- gsub("[ \t]+", "", model.syntax, perl = TRUE) # remove any occurrence of >= 2 consecutive newlines to eliminate # blank statements; this retains a blank newline at the beginning, # if such exists, but parser will not choke because of start.idx model.syntax <- gsub("\n{2,}", "\n", model.syntax, perl = TRUE) # replace 'strange' tildes (in some locales) (new in 0.6-6) model.syntax <- gsub(pattern = "\u02dc", replacement = "~", model.syntax) # break up in lines model <- unlist(strsplit(model.syntax, "\n")) # check for multi-line formulas: they contain no operator symbol # but before we do that, we remove all strings between double quotes # to avoid confusion with for example equal("f1=~x1") statements # model.simple <- gsub("\\(.*\\)\\*", "MODIFIER*", model) model.simple <- gsub("\\\".[^\\\"]*\\\"", "LABEL", model) # start.idx <- grep("[~=<>:|%]", model.simple) operators <- c( "=~", "<~", "~*~", "~~", "~", "==", "<", ">", ":=", ":", "\\|", "%" ) lhs.modifiers <- c("efa") operators.extra <- c(operators, lhs.modifiers) start.idx <- grep(paste(operators.extra, collapse = "|"), model.simple) # check for empty start.idx: no operator found (new in 0.6-1) if (length(start.idx) == 0L) { stop("lavaan ERROR: model does not contain lavaan syntax (no operators found)") } # check for lonely lhs modifiers (only efa() for now): # if found, remove the following start.idx efa.idx <- grep("efa\\(", model.simple) op.idx <- grep(paste(operators, collapse = "|"), model.simple) both.idx <- which(efa.idx %in% op.idx) if (length(both.idx) > 0L) { efa.idx <- efa.idx[-which(efa.idx %in% op.idx)] } if (length(efa.idx) > 0L) { start.idx <- start.idx[-(match(efa.idx, start.idx) + 1L)] } # check for non-empty string, without an operator in the first lines # (new in 0.6-1) if (start.idx[1] > 1L) { # two possibilities: # - we have an empty line (ok) # - the element contains no operator (warn!) for (el in 1:(start.idx[1] - 1L)) { # not empty? if (nchar(model.simple[el]) > 0L) { warning("lavaan WARNING: no operator found in this syntax line: ", model.simple[el], "\n", " This syntax line will be ignored!") } } } end.idx <- c(start.idx[-1] - 1, length(model)) model.orig <- model model <- character(length(start.idx)) for (i in 1:length(start.idx)) { model[i] <- paste(model.orig[start.idx[i]:end.idx[i]], collapse = "") } # ok, in all remaining lines, we should have an operator outside the "" model.simple <- gsub("\\\".[^\\\"]*\\\"", "LABEL", model) idx.wrong <- which(!grepl( paste(operators, collapse = "|"), model.simple )) # idx.wrong <- which(!grepl("[~=<>:|%]", model.simple)) if (length(idx.wrong) > 0) { cat("lavaan: missing operator in formula(s):\n") print(model[idx.wrong]) stop("lavaan ERROR: syntax error in lavaan model syntax") } # but perhaps we have a '+' as the first character? idx.wrong <- which(grepl("^\\+", model)) if (length(idx.wrong) > 0) { cat("lavaan: some formula(s) start with a plus (+) sign:\n") print(model[idx.wrong]) stop("lavaan ERROR: syntax error in lavaan model syntax") } # main operation: flatten formulas into single bivariate pieces # with a left-hand-side (lhs), an operator (eg "=~"), and a # right-hand-side (rhs) # both lhs and rhs can have a modifier FLAT.lhs <- character(0) FLAT.op <- character(0) FLAT.rhs <- character(0) FLAT.rhs.mod.idx <- integer(0) FLAT.block <- integer(0) # keep track of groups using ":" operator FLAT.fixed <- character(0) # only for display purposes! FLAT.start <- character(0) # only for display purposes! FLAT.lower <- character(0) # only for display purposes! FLAT.upper <- character(0) # only for display purposes! FLAT.label <- character(0) # only for display purposes! FLAT.prior <- character(0) FLAT.efa <- character(0) FLAT.rv <- character(0) FLAT.idx <- 0L MOD.idx <- 0L CON.idx <- 0L MOD <- vector("list", length = 0L) CON <- vector("list", length = 0L) BLOCK <- 1L BLOCK_OP <- FALSE for (i in 1:length(model)) { x <- model[i] if (lav_debug()) { cat("formula to parse:\n") print(x) cat("\n") } # 1. which operator is used? line.simple <- gsub("\\\".[^\\\"]*\\\"", "LABEL", x) # "=~" operator? if (grepl("=~", line.simple, fixed = TRUE)) { op <- "=~" # "<~" operator? } else if (grepl("<~", line.simple, fixed = TRUE)) { op <- "<~" } else if (grepl("~*~", line.simple, fixed = TRUE)) { op <- "~*~" # "~~" operator? } else if (grepl("~~", line.simple, fixed = TRUE)) { op <- "~~" # "~" operator? } else if (grepl("~", line.simple, fixed = TRUE)) { op <- "~" # "==" operator? } else if (grepl("==", line.simple, fixed = TRUE)) { op <- "==" # "<" operator? } else if (grepl("<", line.simple, fixed = TRUE)) { op <- "<" # ">" operator? } else if (grepl(">", line.simple, fixed = TRUE)) { op <- ">" # ":=" operator? } else if (grepl(":=", line.simple, fixed = TRUE)) { op <- ":=" # ":" operator? } else if (grepl(":", line.simple, fixed = TRUE)) { op <- ":" # "|" operator? } else if (grepl("|", line.simple, fixed = TRUE)) { op <- "|" # "%" operator? } else if (grepl("%", line.simple, fixed = TRUE)) { op <- "%" } else { stop("unknown operator in ", model[i]) } # 2. split by operator (only the *first* occurence!) # check first if equal/label modifier has been used on the LEFT! if (substr(x, 1, 6) == "label(") { stop("label modifier can not be used on the left-hand side of the operator") } if (op == "|") { op.idx <- regexpr("\\|", x) } else if (op == "~*~") { op.idx <- regexpr("~\\*~", x) } else { op.idx <- regexpr(op, x) } lhs <- substr(x, 1L, op.idx - 1L) # right-hand side string rhs <- substr(x, op.idx + attr(op.idx, "match.length"), nchar(x)) # check if first character of rhs is '+'; if so, remove silently # (for those who copied multiline R input from a website/pdf) if (substr(rhs, 1, 1) == "+") { rhs <- substr(rhs, 2, nchar(rhs)) } # 2b. if operator is "==" or "<" or ">" or ":=", put it in CON if (op == "==" || op == "<" || op == ">" || op == ":=") { # remove quotes, if any lhs <- gsub("\\\"", "", lhs) rhs <- gsub("\\\"", "", rhs) CON.idx <- CON.idx + 1L CON[[CON.idx]] <- list(op = op, lhs = lhs, rhs = rhs, user = 1L) next } # 2c if operator is ":", put it in BLOCK if (op == ":") { # check if rhs is empty (new in 0.6-4) if (nchar(rhs) == 0L) { lav_msg_stop(gettextf( "syntax contains block identifier %s with missing number/label. The correct syntax is: \"LHS: RHS\", where LHS is a block identifier (eg group or level), and RHS is the group/level/block number or label.", dQuote(lhs)) ) } # check lhs (new in 0.6-4) - note: class is for nlsem lhs.orig <- lhs lhs <- tolower(lhs) if (!lhs %in% c("group", "level", "block", "class")) { lav_msg_stop(gettextf( "unknown block identifier: %s. Block identifier should be group, level or block.", dQuote(lhs.orig)) ) } FLAT.idx <- FLAT.idx + 1L FLAT.lhs[FLAT.idx] <- lhs FLAT.op[FLAT.idx] <- op FLAT.rhs[FLAT.idx] <- rhs FLAT.fixed[FLAT.idx] <- "" FLAT.start[FLAT.idx] <- "" FLAT.lower[FLAT.idx] <- "" FLAT.upper[FLAT.idx] <- "" FLAT.label[FLAT.idx] <- "" FLAT.prior[FLAT.idx] <- "" FLAT.efa[FLAT.idx] <- "" FLAT.rv[FLAT.idx] <- "" FLAT.rhs.mod.idx[FLAT.idx] <- 0L if (BLOCK_OP) { BLOCK <- BLOCK + 1L } FLAT.block[FLAT.idx] <- BLOCK BLOCK_OP <- TRUE next } # 3. parse left hand # new in 0.6-3 # first check if all lhs names are valid (in R); see ?make.names # and ?reserved # for example, 'NA' is a reserved keyword, and should not be used # this usually only happens for latent variable names # # check should not come earlier, as we do not need it for :,==,<,>,:= LHS <- strsplit(lhs, split = "+", fixed = TRUE)[[1]] # remove modifiers LHS <- gsub("^\\S*\\*", "", LHS) if (!all(make.names(LHS) == LHS)) { lav_msg_stop(gettextf( "lavaan ERROR: left hand side (lhs) of this formula: %1$s %2$s %3$s contains either a reserved word (in R) or an illegal character: %4$s. See ?reserved for a list of reserved words in R. Please use a variable name that is not a reserved word in R and use only characters, digits, or the dot symbol.", lhs, op, rhs, dQuote(LHS[!make.names(LHS) == LHS]) )) } lhs.formula <- as.formula(paste("~", lhs)) lhs.out <- lav_syntax_parse_rhs(rhs = lhs.formula[[2L]], op = op) lhs.names <- names(lhs.out) # new in 0.6-4 # handle LHS modifiers (if any) # if(sum(sapply(lhs.out, length)) > 0L) { # warning("lavaan WARNING: left-hand side of formula below contains modifier:\n", x,"\n") # } # 4. lav_syntax_parse_rhs (as rhs of a single-sided formula) # new 0.5-12: before we do this, replace '0.2?' by 'start(0.2)*' # requested by the simsem folks rhs <- gsub("\\(?([-]?[0-9]*\\.?[0-9]*)\\)?\\?", "start(\\1)\\*", rhs) # new in 0.6-6, check for rhs NAMES that are reserved names # like in foo =~ in + out RHS <- strsplit(rhs, split = "+", fixed = TRUE)[[1]] RHS.names <- gsub("^\\S*\\*", "", RHS) BAD <- c("if", "else", "repeat", "while", "function", "for", "in") if (any(RHS.names %in% c(BAD, "NA"))) { # "NA" added in 0.6-8 stop( "lavaan ERROR: right hand side (rhs) of this formula:\n ", lhs, " ", op, " ", rhs, "\n contains either a reserved word (in R) or an illegal character: ", dQuote(RHS.names[which(RHS.names %in% BAD)[1]]), "\n See ?reserved for a list of reserved words in R", "\n Please use a variable name that is not a reserved word in R", "\n and use only characters, digits, or the dot symbol." ) } # new in 0.6-6, check for rhs LABELS that are reserved names # like in foo =~ in*bar RHS <- strsplit(rhs, split = "+", fixed = TRUE)[[1]] RHS.labels <- gsub("\\*\\S*$", "", RHS) if (any(RHS.labels %in% BAD)) { stop( "lavaan ERROR: right hand side (rhs) of this formula:\n ", lhs, " ", op, " ", rhs, "\n contains either a reserved word (in R) or an illegal character: ", dQuote(RHS.names[which(RHS.labels %in% BAD)[1]]), "\n See ?reserved for a list of reserved words in R", "\n Please use a variable name that is not a reserved word in R", "\n and use only characters, digits, or the dot symbol." ) } # new in 0.6-12: check for three-way interaction terms (which we do # NOT support) if (any(grepl(":", RHS.names))) { ncolon <- sapply(gregexpr(":", RHS.names), length) if (any(ncolon > 1L)) { idx <- which(ncolon > 1L) lav_msg_stop(gettext( "Three-way or higher-order interaction terms (using multiple colons) are not supported in the lavaan syntax; please manually construct the product terms yourself in the data.frame, give them an appropriate name, and then you can use these interaction variables as any other (observed) variable in the model syntax. Problematic term is: "), RHS.names[idx[1]] ) } } rhs.formula <- as.formula(paste("~", rhs)) out <- lav_syntax_parse_rhs(rhs = rhs.formula[[2L]], op = op) if (lav_debug()) print(out) # for each lhs element for (l in 1:length(lhs.names)) { # for each rhs element for (j in 1:length(out)) { # catch intercepts if (names(out)[j] == "intercept") { if (op == "~") { rhs.name <- "" } else { # either number (1), or reserved name? stop("lavaan ERROR: right-hand side of formula contains an invalid variable name:\n ", x) } } else if (names(out)[j] == "..zero.." && op == "~") { rhs.name <- "" } else if (names(out)[j] == "..constant.." && op == "~") { rhs.name <- "" } else { rhs.name <- names(out)[j] } # move this 'check' to post-parse # if(op == "|") { # th.name <- paste("t", j, sep="") # if(names(out)[j] != th.name) { # stop("lavaan ERROR: threshold ", j, " of variable ", # sQuote(lhs.names[1]), " should be named ", # sQuote(th.name), "; found ", # sQuote(names(out)[j]), "\n") # } # } # catch lhs = rhs and op = "=~" if (op == "=~" && lhs.names[l] == names(out)[j]) { stop("lavaan ERROR: latent variable `", lhs.names[l], "' can not be measured by itself") } # check if we not already have this combination (in this group) # 1. asymmetric (=~, ~, ~1) if (op != "~~") { idx <- which(FLAT.lhs == lhs.names[l] & FLAT.op == op & FLAT.block == BLOCK & FLAT.rhs == rhs.name) if (length(idx) > 0L) { stop("lavaan ERROR: duplicate model element in: ", model[i]) } } else { # 2. symmetric (~~) idx <- which(FLAT.lhs == rhs.name & FLAT.op == "~~" & FLAT.block == BLOCK & FLAT.rhs == lhs.names[l]) if (length(idx) > 0L) { stop("lavaan ERROR: duplicate model element in: ", model[i]) } } # check if we have a self-loop (y ~ y) if (op %in% c("~", "<~") && rhs.name == lhs.names[l]) { # stop("lavaan ERROR: lhs and rhs are the same in: ", # model[i]) # this breaks pompom package, example uSEM warning( "lavaan WARNING: lhs and rhs are the same in: ", model[i] ) } FLAT.idx <- FLAT.idx + 1L FLAT.lhs[FLAT.idx] <- lhs.names[l] FLAT.op[FLAT.idx] <- op FLAT.rhs[FLAT.idx] <- rhs.name FLAT.block[FLAT.idx] <- BLOCK FLAT.fixed[FLAT.idx] <- "" FLAT.start[FLAT.idx] <- "" FLAT.label[FLAT.idx] <- "" FLAT.lower[FLAT.idx] <- "" FLAT.upper[FLAT.idx] <- "" FLAT.prior[FLAT.idx] <- "" FLAT.efa[FLAT.idx] <- "" FLAT.rv[FLAT.idx] <- "" mod <- list() rhs.mod <- 0L if (length(lhs.out[[l]]$efa) > 0L) { mod$efa <- lhs.out[[l]]$efa FLAT.efa[FLAT.idx] <- paste(mod$efa, collapse = ";") rhs.mod <- 1L # despite being a LHS modifier } if (length(out[[j]]$fixed) > 0L) { mod$fixed <- out[[j]]$fixed FLAT.fixed[FLAT.idx] <- paste(mod$fixed, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$start) > 0L) { mod$start <- out[[j]]$start FLAT.start[FLAT.idx] <- paste(mod$start, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$lower) > 0L) { mod$lower <- out[[j]]$lower FLAT.lower[FLAT.idx] <- paste(mod$lower, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$upper) > 0L) { mod$upper <- out[[j]]$upper FLAT.upper[FLAT.idx] <- paste(mod$upper, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$label) > 0L) { mod$label <- out[[j]]$label FLAT.label[FLAT.idx] <- paste(mod$label, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$rv) > 0L) { mod$rv <- out[[j]]$rv FLAT.rv[FLAT.idx] <- paste(mod$rv, collapse = ";") rhs.mod <- 1L } if (length(out[[j]]$prior) > 0L) { mod$prior <- out[[j]]$prior FLAT.prior[FLAT.idx] <- paste(mod$prior, collapse = ";") rhs.mod <- 1L } # if(op == "~1" && rhs == "0") { # mod$fixed <- 0 # FLAT.fixed[FLAT.idx] <- paste(mod$fixed, collapse=";") # rhs.mod <- 1L # } if (op == "=~" && rhs == "0") { mod$fixed <- 0 FLAT.rhs[FLAT.idx] <- FLAT.lhs[FLAT.idx] FLAT.fixed[FLAT.idx] <- paste(mod$fixed, collapse = ";") rhs.mod <- 1L } FLAT.rhs.mod.idx[FLAT.idx] <- rhs.mod if (rhs.mod > 0L) { MOD.idx <- MOD.idx + 1L MOD[[MOD.idx]] <- mod } } # rhs elements } # lhs elements } # model elements # enumerate modifier indices mod.idx <- which(FLAT.rhs.mod.idx > 0L) FLAT.rhs.mod.idx[mod.idx] <- 1:length(mod.idx) FLAT <- list( lhs = FLAT.lhs, op = FLAT.op, rhs = FLAT.rhs, mod.idx = FLAT.rhs.mod.idx, block = FLAT.block, fixed = FLAT.fixed, start = FLAT.start, lower = FLAT.lower, upper = FLAT.upper, label = FLAT.label, prior = FLAT.prior, efa = FLAT.efa, rv = FLAT.rv ) # change op for intercepts (for convenience only) int.idx <- which(FLAT$op == "~" & FLAT$rhs == "") if (length(int.idx) > 0L) { FLAT$op[int.idx] <- "~1" } # new in 0.6, reorder covariances here! FLAT <- lav_partable_covariance_reorder(FLAT) if (as.data.frame.) { FLAT <- as.data.frame(FLAT, stringsAsFactors = FALSE) } # new in 0.6-4: check for 'group' within 'level' if (any(FLAT$op == ":")) { op.idx <- which(FLAT$op == ":") if (length(op.idx) < 2L) { # only 1 block identifier? this is weird -> give warning warning("lavaan WARNING: syntax contains only a single block identifier: ", FLAT$lhs[op.idx]) } else { first.block <- FLAT$lhs[op.idx[1L]] second.block <- FLAT$lhs[op.idx[2L]] if (first.block == "level" && second.block == "group") { stop("lavaan ERROR: groups can not be nested within levels") } } } attr(FLAT, "modifiers") <- MOD attr(FLAT, "constraints") <- CON FLAT } lav_syntax_parse_rhs <- function(rhs, op = "") { # new version YR 15 dec 2011! # - no 'equal' field anymore (only labels!) # - every modifier is evaluated # - unquoted labels are allowed (eg. x1 + x2 + c(v1,v2,v3)*x3) # fill in rhs list out <- list() repeat { if (length(rhs) == 1L) { # last one and only a single element out <- c(vector("list", 1L), out) NAME <- all.vars(rhs) if (length(NAME) > 0L) { names(out)[1L] <- NAME } else { # intercept or zero? if (as.character(rhs) == "1") { names(out)[1L] <- "intercept" } else if (as.character(rhs) == "0") { names(out)[1L] <- "..zero.." out[[1L]]$fixed <- 0 } else { names(out)[1L] <- "..constant.." out[[1L]]$fixed <- 0 } } break } else if (rhs[[1L]] == "*") { # last one, but with modifier out <- c(vector("list", 1L), out) NAME <- all.vars(rhs[[3L]]) if (length(NAME) > 0L) { # not an intercept # catch interaction term rhs3.names <- all.names(rhs[[3L]]) if (rhs3.names[1L] == ":") { if (length(NAME) == 1) { NAME <- paste(NAME[1L], ":", NAME[1L], sep = "") } else { NAME <- paste(NAME[1L], ":", NAME[2L], sep = "") } } names(out)[1L] <- NAME } else { # intercept names(out)[1L] <- "intercept" } i.var <- all.vars(rhs[[2L]], unique = FALSE) if (length(i.var) > 0L) { # modifier are unquoted labels out[[1L]]$label <- i.var } else { # modifer is something else out[[1L]] <- lav_syntax_get_modifier(rhs[[2L]]) } break } else if (rhs[[1L]] == ":") { # last one, but interaction term out <- c(vector("list", 1L), out) NAME <- all.vars(rhs) if (length(NAME) == 1) { NAME <- paste(NAME[1L], ":", NAME[1L], sep = "") } else { NAME <- paste(NAME[1L], ":", NAME[2L], sep = "") } names(out)[1L] <- NAME break } else if (rhs[[1L]] == "+") { # not last one! # three possibilities: # 1. length(rhs[[3]] == 3), and rhs[[3L]][[1]] == "*" -> modifier # 2. length(rhs[[3]] == 3), and rhs[[3L]][[1]] == ":" -> interaction # 3. length(rhs[[3]] == 1) -> single element out <- c(vector("list", 1L), out) # modifier or not? if (length(rhs[[3L]]) == 3L && rhs[[3L]][[1]] == "*") { # modifier!! NAME <- all.vars(rhs[[3L]][[3]]) if (length(NAME) > 0L) { # not an intercept # catch interaction term rhs3.names <- all.names(rhs[[3L]][[3]]) if (rhs3.names[1L] == ":") { if (length(NAME) == 1) { NAME <- paste(NAME[1L], ":", NAME[1L], sep = "") } else { NAME <- paste(NAME[1L], ":", NAME[2L], sep = "") } } names(out)[1L] <- NAME } else { # intercept names(out)[1L] <- "intercept" } i.var <- all.vars(rhs[[3]][[2L]], unique = FALSE) if (length(i.var) > 0L) { # modifier are unquoted labels out[[1L]]$label <- i.var } else { # modifer is something else out[[1L]] <- lav_syntax_get_modifier(rhs[[3]][[2L]]) } # interaction term? } else if (length(rhs[[3L]]) == 3L && rhs[[3L]][[1]] == ":") { # interaction term, without modifier NAME <- all.vars(rhs[[3L]]) if (length(NAME) == 1) { NAME <- paste(NAME[1L], ":", NAME[1L], sep = "") } else { NAME <- paste(NAME[1L], ":", NAME[2L], sep = "") } names(out)[1L] <- NAME } else { # no modifier!! NAME <- all.vars(rhs[[3]]) if (length(NAME) > 0L) { names(out)[1L] <- NAME } else { # intercept or zero? if (as.character(rhs[[3]]) == "1") { names(out)[1L] <- "intercept" } else if (as.character(rhs[[3]]) == "0") { names(out)[1L] <- "..zero.." out[[1L]]$fixed <- 0 } else { names(out)[1L] <- "..constant.." out[[1L]]$fixed <- 0 } } } # next element rhs <- rhs[[2L]] } else { stop("lavaan ERROR: I'm confused parsing this line: ", rhs, "\n") } } # if multiple elements, check for duplicated elements and merge if found if (length(out) > 1L) { rhs.names <- names(out) while (!is.na(idx <- which(duplicated(rhs.names))[1L])) { dup.name <- rhs.names[idx] orig.idx <- match(dup.name, rhs.names) merged <- c(out[[orig.idx]], out[[idx]]) if (!is.null(merged)) { # be careful, NULL will delete element out[[orig.idx]] <- merged } out <- out[-idx] rhs.names <- names(out) } } # if thresholds, check order and reorder if necessary # if(op == "|") { # t.names <- names(out) # idx <- match(sort(t.names), t.names) # out <- out[idx] # } out } lav_syntax_get_modifier <- function(mod) { if (length(mod) == 1L) { # three possibilites: 1) numeric, 2) NA, or 3) quoted character if (is.numeric(mod)) { return(list(fixed = mod)) } if (is.na(mod)) { return(list(fixed = as.numeric(NA))) } if (is.character(mod)) { return(list(label = mod)) } } else if (mod[[1L]] == "start") { cof <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(start = cof)) } else if (mod[[1L]] == "lower") { cof <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(lower = cof)) } else if (mod[[1L]] == "upper") { cof <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(upper = cof)) } else if (mod[[1L]] == "equal") { label <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(label = label)) } else if (mod[[1L]] == "label") { label <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) label[is.na(label)] <- "" # catch 'NA' elements in a label return(list(label = label)) } else if (mod[[1L]] == "rv") { rv <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) if (anyNA(rv)) { stop("lavaan ERROR: some rv() labels are NA") } return(list(rv = rv)) } else if (mod[[1L]] == "prior") { prior <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(prior = prior)) } else if (mod[[1L]] == "efa") { efa <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) return(list(efa = efa)) } else if (mod[[1L]] == "c") { # vector: we allow numeric and character only! cof <- unlist(lapply(as.list(mod)[-1], eval, envir = NULL, enclos = NULL )) if (all(is.na(cof))) { return(list(fixed = rep(as.numeric(NA), length(cof)))) } else if (is.numeric(cof)) { return(list(fixed = cof)) } else if (is.character(cof)) { cof[is.na(cof)] <- "" # catch 'NA' elements in a label return(list(label = cof)) } else { stop("lavaan ERROR: can not parse modifier:", mod, "\n") } } else { # unknown expression # as a final attempt, we will evaluate it and coerce it # to either a numeric or character (vector) cof <- try(eval(mod, envir = NULL, enclos = NULL), silent = TRUE) if (inherits(cof, "try-error")) { stop( "lavaan ERROR: evaluating modifier failed: ", paste(as.character(mod)[[1]], "()*", sep = ""), "\n" ) } else if (is.numeric(cof)) { return(list(fixed = cof)) } else if (is.character(cof)) { return(list(label = cof)) } else { stop( "lavaan ERROR: can not parse modifier: ", paste(as.character(mod)[[1]], "()*", sep = ""), "\n" ) } } } lavaan/R/zzz.R0000644000176200001440000000043614627656440012716 0ustar liggesusers.onAttach <- function(libname, pkgname) { version <- read.dcf( file = system.file("DESCRIPTION", package = pkgname), fields = "Version" ) packageStartupMessage( "This is ", paste(pkgname, version), "\n", pkgname, " is FREE software! Please report any bugs." ) } lavaan/R/ctr_pairwise_table.R0000644000176200001440000001772514627656441015735 0ustar liggesusers# this function is written by Myrsini Katsikatsou ############################## pairwiseTables FUNCTION ######################## # This function can be public. It gets as an input a raw data set of ordinal # variables and it returns a list of all pairwise frequency tables. # # The input arguments of the function: # data : matrix or data frame containing the data. The rows correspond to # different observations and the columns to different observed categorical # (ordinal or nominal) variables. No continuous variables or covariates # should be contained in data. If the variables contained in the data are # distinguished into indicators of exogenous latent variables (lv) and # indicators of endogenous latent variables, those for exogenous lv should # be presented first (in the first columns of data) followed by the # indicators for endogenous lv. # var.levels: NULL or vector or list, specifies the levels (response categories) # for each categorical variable contained in data. # If NULL, the levels encoutered in data are used. If a response # category is not observed in the data, then var.levels should be # defined. # If vector, that implies that all variables have the same levels as # given in the vector. # If list, the components of the list are vectors, as many as the # number of variables in data. Each vector gives the levels of # the corresponding categorical variable in data. # no.x : NULL or integer, gives the number of indicators for exogenous lv. # The default value is NULL indicating that data contains only # indicators of exogenous latent variables. # perc : TRUE/FALSE. If FALSE the observed frequencies are reported, otherwise # the observed percentages are given. # na.exclude : TRUE/FALSE. If TRUE, listwise deletion is applied to data. # Otherwise, cases with missing values are preserved and and an # extra level with label NA is included in the tables. # The output of the function: # It is a list of three components: $pairTables, $VarLevels and $Ncases_del. # pairTables : a list of so many tables as the number of variable pairs formed # by data. If there are indicators of both exogenous and endogenous # variables, then first all the matrices referring to pairs of # indicators of exogenous lv are reported, followed by all the # matrices referring to pairs of indicators of endogenous lv, which # in turn folowed by all the matrices of pairs: one indicator of an # exogenous - one indicator of an endogenous lv. # VarLevels : a list of as many vectors as the number of variables in the data. # Each vector gives the levels/ response categories of each variable # Ncases_del : An integer reporting the number of cases deleted by data because # of missing values (listwise deletion) when na.exclude=TRUE. pairwiseTables <- function(data, var.levels = NULL, no.x = NULL, perc = FALSE, na.exclude = TRUE) { # data in right format? if ((!is.matrix(data)) & (!is.data.frame(data))) { lav_msg_stop(gettext("data is neither a matrix nor a data.frame")) } # at least two variables no.var <- dim(data)[2] if (no.var < 2) { lav_msg_stop(gettext("there are less than 2 variables")) } # no.x < no.var ? if (no.x > no.var) { lav_msg_stop(gettext( "number of indicators for exogenous latent variables is larger than the total number of variables in data")) } # if data as matrix, transforma as data.frame if (is.matrix(data)) { data <- as.data.frame(data) } # listwise deletion if (na.exclude) { old.data <- data data <- na.omit(data) } # all columns of data.frame should be of class factor so that function levels # can be applied if (!all(sapply(data, class) == "factor")) { if (nrow(data) > 1) { data <- data.frame(sapply(data, factor)) } else { data <- apply(data, 2, factor) data <- as.data.frame(matrix(data, nrow = 1)) } } # the levels observed for each variable, obs.levels is a list obs.levels <- lapply(data, levels) # number of variables in data same as number of vectors in var.levels if (is.list(var.levels) && no.var != length(var.levels)) { lav_msg_stop(gettext( "the length of var.levels does not match the number of variables of the given data set")) } # create var.levels if a list is not given old.var.levels <- var.levels if (!is.list(old.var.levels)) { if (is.null(old.var.levels)) { var.levels <- obs.levels } else { var.levels <- vector("list", no.var) var.levels <- lapply(var.levels, function(x) { x <- old.var.levels }) } } names(var.levels) <- names(data) # also check that obs.levels exist in the object var.levels given by the user, i.e. old.var.levels if (is.list(old.var.levels)) { for (i in 1:no.var) { if (!all(obs.levels[[i]] %in% old.var.levels[[i]])) { lav_msg_stop(gettext( "levels observed in data are not mentioned in var.levels")) } } } else if (is.vector(old.var.levels)) { if (!all(apply(na.omit(data), 2, function(x) { x %in% old.var.levels }))) { lav_msg_stop(gettext("levels observed in data are not mentioned in var.levels")) } } no.given.levels <- sapply(var.levels, length) # assign the right levels for each variable as given in object var.levels if it is not the case # it is not the case when the observed levels are a subgroup of the var.levels given if (!is.null(old.var.levels)) { no.obs.levels <- sapply(obs.levels, length) if (!all(no.obs.levels == no.given.levels)) { index <- c(1:no.var)[no.obs.levels != no.given.levels] for (i in index) { data[, i] <- factor(data[, i], levels = var.levels[[i]]) } } } # compute the bivariate frequency tables # Split first into two cases: a) only indicators of exogenous latent variables # b) otherwise if (is.null(no.x) || no.x == no.var) { pairs.index <- utils::combn(no.var, 2) no.pairs <- dim(pairs.index)[2] res <- vector("list", no.pairs) for (i in 1:no.pairs) { res[[i]] <- table(data[, pairs.index[, i]], useNA = "ifany") } } else { no.y <- no.var - no.x pairs.xixj.index <- utils::combn(no.x, 2) # row 1 gives i index, row 2 j index, j runs faster than i pairs.yiyj.index <- utils::combn(no.y, 2) pairs.xiyj.index <- expand.grid(1:no.y, 1:no.x) pairs.xiyj.index <- rbind(pairs.xiyj.index[, 2], pairs.xiyj.index[, 1]) # row 1 gives i index, row 2 j index, j runs faster than i no.pairs.xixj <- dim(pairs.xixj.index)[2] no.pairs.yiyj <- dim(pairs.yiyj.index)[2] no.pairs.xiyj <- dim(pairs.xiyj.index)[2] no.all.pairs <- no.pairs.xixj + no.pairs.yiyj + no.pairs.xiyj data.x <- data[, 1:no.x] data.y <- data[, (no.x + 1):no.var] res <- vector("list", no.all.pairs) for (i in 1:no.pairs.xixj) { res[[i]] <- table(data.x[, pairs.xixj.index[, i]], useNA = "ifany") } j <- 0 for (i in (no.pairs.xixj + 1):(no.pairs.xixj + no.pairs.yiyj)) { j <- j + 1 res[[i]] <- table(data.y[, pairs.yiyj.index[, j]], useNA = "ifany") } j <- 0 for (i in (no.pairs.xixj + no.pairs.yiyj + 1):no.all.pairs) { j <- j + 1 res[[i]] <- table( cbind( data.x[, pairs.xiyj.index[1, j], drop = FALSE], data.y[, pairs.xiyj.index[2, j], drop = FALSE] ), useNA = "ifany" ) } } # if percentages are asked if (perc) { Nobs <- dim(data)[1] res <- lapply(res, function(x) { x / Nobs }) } # Ncases_del = the number of cases deleted because they had missing values if (na.exclude) { Ncases_deleted <- dim(old.data)[1] - dim(data)[1] } else { Ncases_deleted <- 0 } list(pairTables = res, VarLevels = var.levels, Ncases_del = Ncases_deleted) } lavaan/R/lav_cfa_utils.R0000644000176200001440000001427114627656441014677 0ustar liggesusers# utility functions needed for lav_cfa_* # compute THETA and PSI, given lambda using either ULS or GLS # this function assumes: # - THETA is diagonal # - PSI is unrestricted # - we assume W = S^{-1} # # YR 17 oct 2022: - add lower/upper bounds for theta (only to compute PSI) # - use 'lambda' correction to ensure PSI is positive definite # YR 02 feb 2023: - add psi.mapping.ML argument lav_cfa_lambda2thetapsi <- function(lambda = NULL, S = NULL, S.inv = NULL, GLS = FALSE, psi.mapping.ML = FALSE, nobs = 20L) { LAMBDA <- as.matrix(lambda) nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) if (GLS) { # see Browne, 1974 section 4 case II if (is.null(S.inv)) { W <- solve(S) } else { W <- S.inv } tLW <- crossprod(LAMBDA, W) M <- solve(tLW %*% LAMBDA, tLW) # GLS mapping # D <- W %*% LAMBDA %*% M # symmmetric D <- crossprod(M, tLW) # theta <- solve(W*W - D*D, diag(W %*% S %*% W - D %*% S %*% D)) theta <- try(solve(W * W - D * D, diag(W - D)), # because W == S^{-1} silent = TRUE ) if (inherits(theta, "try-error")) { # what to do? lav_msg_warn(gettext( "problem computing THETA values; trying pace algorithm")) theta <- lav_efa_pace(S = S, nfactors = nfac, theta.only = TRUE) } } else { # see Hagglund 1982, section 4 M <- solve(crossprod(LAMBDA), t(LAMBDA)) # ULS mapping function D <- LAMBDA %*% M theta <- try(solve(diag(nvar) - D * D, diag(S - (D %*% S %*% D))), silent = TRUE ) if (inherits(theta, "try-error")) { # what to do? lav_msg_warn(gettext( "problem computing THETA values; trying pace algorithm")) theta <- lav_efa_pace(S = S, nfactors = nfac, theta.only = TRUE) } } theta.nobounds <- theta # ALWAYS check bounds for theta (only to to compute PSI)! theta.bounds <- TRUE if (theta.bounds) { diagS <- diag(S) # lower bound lower.bound <- diagS * 0 # * 0.01 too.small.idx <- which(theta < lower.bound) if (length(too.small.idx) > 0L) { theta[too.small.idx] <- lower.bound[too.small.idx] } # upper bound upper.bound <- diagS * 1 # * 0.99 too.large.idx <- which(theta > upper.bound) if (length(too.large.idx) > 0L) { theta[too.large.idx] <- upper.bound[too.large.idx] } } # psi diag.theta <- diag(theta, nvar) lambda <- try(lav_matrix_symmetric_diff_smallest_root(S, diag.theta), silent = TRUE ) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) SminTheta <- S - diag.theta # and hope for the best } else { cutoff <- 1 + 1 / (nobs - 1) if (lambda < cutoff) { lambda.star <- lambda - 1 / (nobs - 1) SminTheta <- S - lambda.star * diag.theta } else { SminTheta <- S - diag.theta } } # just like local SAM if (psi.mapping.ML) { Ti <- 1 / theta zero.theta.idx <- which(abs(theta) < 0.01) # be conservative if (length(zero.theta.idx) > 0L) { Ti[zero.theta.idx] <- 1 } M <- solve(t(LAMBDA) %*% diag(Ti, nvar) %*% LAMBDA) %*% t(LAMBDA) %*% diag(Ti, nvar) PSI <- M %*% SminTheta %*% t(M) # ML } else { PSI <- M %*% SminTheta %*% t(M) # ULS/GLS } # we take care of the bounds later! list(lambda = LAMBDA, theta = theta.nobounds, psi = PSI) } # compute PSI, given lambda and theta using either ULS, GLS, ML # this function assumes: # - THETA is diagonal # - PSI is unrestricted # # YR 08 Mar 2023: - first version lav_cfa_lambdatheta2psi <- function(lambda = NULL, theta = NULL, # vector! S = NULL, S.inv = NULL, mapping = "ML", nobs = 20L) { LAMBDA <- as.matrix(lambda) nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) theta.nobounds <- theta # ALWAYS check bounds for theta to compute PSI diagS <- diag(S) # lower bound lower.bound <- diagS * 0 # * 0.01 too.small.idx <- which(theta < lower.bound) if (length(too.small.idx) > 0L) { theta[too.small.idx] <- lower.bound[too.small.idx] } # upper bound upper.bound <- diagS * 1 # * 0.99 too.large.idx <- which(theta > upper.bound) if (length(too.large.idx) > 0L) { theta[too.large.idx] <- upper.bound[too.large.idx] } # psi diag.theta <- diag(theta, nvar) lambda <- try(lav_matrix_symmetric_diff_smallest_root(S, diag.theta), silent = TRUE ) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) SminTheta <- S - diag.theta # and hope for the best } else { cutoff <- 1 + 1 / (nobs - 1) if (lambda < cutoff) { lambda.star <- lambda - 1 / (nobs - 1) SminTheta <- S - lambda.star * diag.theta } else { SminTheta <- S - diag.theta } } # mapping matrix if (mapping == "ML") { Ti <- 1 / theta zero.theta.idx <- which(abs(theta) < 0.01) # be conservative if (length(zero.theta.idx) > 0L) { Ti[zero.theta.idx] <- 1 } M <- solve(t(LAMBDA) %*% diag(Ti, nvar) %*% LAMBDA) %*% t(LAMBDA) %*% diag(Ti, nvar) } else if (mapping == "GLS") { if (is.null(S.inv)) { S.inv <- try(solve(S), silent = TRUE) } if (inherits(S.inv, "try-error")) { M <- tcrossprod(solve(crossprod(LAMBDA)), LAMBDA) } else { M <- solve(t(LAMBDA) %*% S.inv %*% LAMBDA) %*% t(LAMBDA) %*% S.inv } } else if (mapping == "ULS") { M <- tcrossprod(solve(crossprod(LAMBDA)), LAMBDA) } # compute PSI PSI <- M %*% SminTheta %*% t(M) PSI } # compute theta elements for a 1-factor model lav_cfa_theta_spearman <- function(S, bounds = "wide") { p <- ncol(S) out <- numeric(p) R <- cov2cor(S) for (p.idx in seq_len(p)) { var.p <- R[p.idx, p.idx] x <- R[, p.idx][-p.idx] aa <- lav_matrix_vech(tcrossprod(x), diagonal = FALSE) ss <- lav_matrix_vech(R[-p.idx, -p.idx, drop = FALSE], diagonal = FALSE) h2 <- mean(aa / ss) # communaliteit if (bounds == "standard") { h2[h2 < 0] <- 0 h2[h2 > 1] <- 1 } else if (bounds == "wide") { h2[h2 < -0.05] <- -0.05 # correponds to lower bound ov.var "wide" h2[h2 > +1.20] <- +1.20 # correponds to upper bound ov.var "wide" } out[p.idx] <- (1 - h2) * S[p.idx, p.idx] } out } lavaan/R/lav_efa_print.R0000644000176200001440000001515614627656441014700 0ustar liggesusers# print only (standardized) loadings print.lavaan.efa <- function(x, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, ...) { # unclass y <- unclass(x) if (!y$header$optim.converged) { cat("** WARNING ** Optimizer did not end normally\n") cat("** WARNING ** Estimates below are most likely unreliable\n") } # loadings per block for (b in seq_len(y$efa$nblocks)) { cat("\n") if (length(y$efa$block.label) > 0L) { cat(y$efa$block.label[[b]], ":\n\n", sep = "") } LAMBDA <- unclass(y$efa$lambda[[b]]) lav_print_loadings(LAMBDA, nd = nd, cutoff = cutoff, dot.cutoff = dot.cutoff, alpha.level = alpha.level, x.se = y$efa$lambda.se[[b]] ) cat("\n") } invisible(LAMBDA) } # print efaList print.efaList <- function(x, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, ...) { # unclass y <- unclass(x) # kill loadings element if present y[["loadings"]] <- NULL nfits <- length(y) RES <- vector("list", nfits) for (ff in seq_len(nfits)) { res <- lav_object_summary(y[[ff]], fit.measures = FALSE, estimates = FALSE, modindices = FALSE, efa = TRUE, efa.args = list( lambda = TRUE, theta = FALSE, psi = FALSE, eigenvalues = FALSE, sumsq.table = FALSE, lambda.structure = FALSE, fs.determinacy = FALSE, se = FALSE, zstat = FALSE, pvalue = FALSE ) ) RES[[ff]] <- print.lavaan.efa(res, nd = nd, cutoff = cutoff, dot.cutoff = dot.cutoff, alpha.level = alpha.level, ... ) } invisible(RES) } # print summary efaList print.efaList.summary <- function(x, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, ...) { # unclass y <- unclass(x) # get nd, if it is stored as an attribute ND <- attr(y, "nd") if (!is.null(ND) && is.numeric(ND)) { nd <- as.integer(ND) } # get cutoff, if it is stored as an attribute CT <- attr(y, "cutoff") if (!is.null(CT) && is.numeric(CT)) { cutoff <- CT } # get dot.cutoff, if it is stored as an attribute DC <- attr(y, "dot.cutoff") if (!is.null(DC) && is.numeric(DC)) { dot.cutoff <- DC } # get alpha.level, if it is stored as an attribute AL <- attr(y, "alpha.level") if (!is.null(AL) && is.numeric(AL)) { alpha.level <- AL } cat("This is ", sprintf("lavaan %s", x$lavaan.version), " -- running exploratory factor analysis\n", sep = "" ) # everything converged? if (!x$converged.flag) { cat("lavaan WARNING: not all models did converge!\n") } cat("\n") # estimator c1 <- c("Estimator") # second column tmp.est <- toupper(x$estimator) if (tmp.est == "DLS") { dls.first.letter <- substr( x$estimator.args$dls.GammaNT, 1L, 1L ) tmp.est <- paste("DLS-", toupper(dls.first.letter), sep = "") } c2 <- tmp.est # additional estimator args if (!is.null(x$estimator.args) && length(x$estimator.args) > 0L) { if (x$estimator == "DLS") { c1 <- c(c1, "Estimator DLS value for a") c2 <- c(c2, x$estimator.args$dls.a) } } # rotation method c1 <- c(c1, "Rotation method") if (x$rotation == "none") { MM <- toupper(x$rotation) } else if (x$rotation.args$orthogonal) { MM <- paste(toupper(x$rotation), " ", "ORTHOGONAL", sep = "" ) } else { MM <- paste(toupper(x$rotation), " ", "OBLIQUE", sep = "" ) } c2 <- c(c2, MM) if (x$rotation != "none") { # method options if (x$rotation == "geomin") { c1 <- c(c1, "Geomin epsilon") c2 <- c(c2, x$rotation.args$geomin.epsilon) } else if (x$rotation == "orthomax") { c1 <- c(c1, "Orthomax gamma") c2 <- c(c2, x$rotation.args$orthomax.gamma) } else if (x$rotation == "cf") { c1 <- c(c1, "Crawford-Ferguson gamma") c2 <- c(c2, x$rotation.args$cf.gamma) } else if (x$rotation == "oblimin") { c1 <- c(c1, "Oblimin gamma") c2 <- c(c2, x$rotation.args$oblimin.gamma) } else if (x$rotation == "promax") { c1 <- c(c1, "Promax kappa") c2 <- c(c2, x$rotation.args$promax.kappa) } # rotation algorithm c1 <- c(c1, "Rotation algorithm (rstarts)") tmp <- paste(toupper(x$rotation.args$algorithm), " (", x$rotation.args$rstarts, ")", sep = "" ) c2 <- c(c2, tmp) # Standardized metric (or not) c1 <- c(c1, "Standardized metric") if (x$rotation.args$std.ov) { c2 <- c(c2, "TRUE") } else { c2 <- c(c2, "FALSE") } # Row weights c1 <- c(c1, "Row weights") tmp.txt <- x$rotation.args$row.weights c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } # format c1/c2 c1 <- format(c1, width = 33L) c2 <- format(c2, width = 18L + max(0, (nd - 3L)) * 4L, justify = "right" ) # create character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) # data if (!is.null(x$lavdata)) { cat("\n") lav_data_print_short(x$lavdata, nd = nd) } # number of models nfits <- length(x$model.list) # number of factors nfactors <- x$nfactors # fit measures if (!is.null(x$fit.table)) { cat("\n") if (nfits > 1L) { cat("Overview models:\n") } else { cat("Fit measures:\n") } print(x$fit.table, nd = nd, shift = 2L) } # eigenvalues if (!is.null(x$model.list[[1]]$efa$eigvals[[1]])) { cat("\n") if (x$model.list[[1]]$efa$std.ov) { cat("Eigenvalues correlation matrix:\n") } else { cat("Eigenvalues covariance matrix:\n") } for (b in seq_len(x$model.list[[1]]$efa$nblocks)) { cat("\n") if (length(x$model.list[[1]]$efa$block.label) > 0L) { cat(x$model.list[[1]]$efa$block.label[[b]], ":\n\n", sep = "") } print(x$model.list[[1]]$efa$eigvals[[b]], nd = nd, shift = 2L) } # blocks } # print summary for each model for (f in seq_len(nfits)) { res <- x$model.list[[f]] attr(res, "nd") <- nd attr(res, "cutoff") <- cutoff attr(res, "dot.cutoff") <- dot.cutoff attr(res, "alpha.level") <- alpha.level if (nfits > 1L) { if (f == 1L) { cat("\n") } cat("Number of factors: ", nfactors[f], "\n") } # print.lavaan.summary() prints the $efa element (only) or res print(res) } invisible(y) } lavaan/R/lav_lavaan_step02_options.R0000644000176200001440000001736114627656441017143 0ustar liggesuserslav_lavaan_step02_options <- function(slotOptions = NULL, # nolint slotData = NULL, # nolint flat.model = NULL, ordered = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, ov.names.l = NULL, sampling.weights = NULL, constraints = NULL, group = NULL, ov.names.x = NULL, ov.names.y = NULL, dotdotdot = NULL, cluster = NULL, data = NULL) { # # # # # # # # # # # # # # 2. lavoptions # # # # # # # # # # # # # # # if slotOptions not NULL # copy to lavoptions and modify categorical/clustered/multilevel # inserting a "." in the first position # if necessary, overwrite with values in dotdotdot and issue a warning # check if all names in dotdotdot are possible options, if not *** error *** # create complete option list (lav_options_default) and substitute values # given in dotdotdot # if data, slotData and sample.cov NULL: opt$bounds = FALSE # if slotData$data.type != "full" or (slotData and data = NULL): # opt$missing = "listwise" # set categorical mode ON if # - an operator "|" (threshold) was used # - data not NULL and one or more elements in ordered parameter # - sample.th provided # - at least one of the non-exogenous observed variables is "ordered" # (ordered factor in R) # if opt$estimator == "catml": set categorical mode OFF # TODO: estimator = "CATML" isn't mentioned in lavOptions / estimator # help text !? # if cluster not NULL, set opt$.clustered TRUE and *** error *** if # categorical mode is ON # opt$.multilevel = (length(ov.names.l) > 0L && # length(ov.names.l[[1]]) > 1L) # if sampling.weights not NULL en categorical mode OFF and opt$estimator # in ("default", "ML", "PML") # set opt$estimator to "MLR" # if constraints present and estimator == "ML", set opt$information to # c("observed", "observed") # if there is an operator "~1" in flat.model and sample.mean not NULL, # set opt$meanstructure TRUE # if there are no exogene variables but conditional.x explicitly # requested: ** warning ** # if there are no exogene variables set opt$conditional.x FALSE # if there are no exogene variables and fixed.x not explicitly requested, # set opt$fixed.x to FALSE if (!is.null(slotOptions)) { lavoptions <- slotOptions # backwards compatibility if (!is.null(lavoptions$categorical)) { lavoptions$.categorical <- lavoptions$categorical lavoptions$categorical <- NULL } if (!is.null(lavoptions$clustered)) { lavoptions$.clustered <- lavoptions$clustered lavoptions$clustered <- NULL } if (!is.null(lavoptions$multilevel)) { lavoptions$.multilevel <- lavoptions$multilevel lavoptions$multilevel <- NULL } # but what if other 'options' are given anyway (eg 'start = ')? # give a warning! if (length(dotdotdot) > 0L) { dot.names <- names(dotdotdot) op.idx <- which(dot.names %in% names(slotOptions)) lav_msg_warn(gettext( "the following argument(s) override(s) the options in slotOptions:"), paste(dot.names[op.idx], collapse = " ") ) lavoptions[dot.names[op.idx]] <- dotdotdot[op.idx] } } else { if (lav_verbose()) { cat("lavoptions ...") } # load default options opt <- lav_options_default() # catch unknown options ok.names <- names(opt) dot.names <- names(dotdotdot) wrong.idx <- which(!dot.names %in% ok.names) if (length(wrong.idx) > 0L) { # stop or warning?? stop for now (there could be more) lav_msg_stop(ngettext(length(wrong.idx), "unknown argument:", "unknown arguments:"), lav_msg_view(dot.names[wrong.idx], "none", FALSE) ) } # modifyList opt <- modifyList(opt, dotdotdot) # no data? if (is.null(slotData) && is.null(data) && is.null(sample.cov)) { opt$bounds <- FALSE } # only sample moments? if (!is.null(slotData) && !slotData@data.type == "full") { opt$missing <- "listwise" } else if (is.null(slotData) && is.null(data)) { opt$missing <- "listwise" } # categorical mode? opt$.categorical <- FALSE if (any(flat.model$op == "|")) { opt$.categorical <- TRUE } else if (!is.null(data) && length(ordered) > 0L) { opt$.categorical <- TRUE } else if (!is.null(sample.th)) { opt$.categorical <- TRUE } else if (is.data.frame(data)) { # first check if we can find ov.names.y in Data tmp.ov.names.y <- unique(unlist(ov.names.y)) # remove possible interaction terms involving an y term int.idx <- which(grepl(":", tmp.ov.names.y)) if (length(int.idx) > 0L) { tmp.ov.names.y <- tmp.ov.names.y[-int.idx] } idx.missing <- which(!(tmp.ov.names.y %in% names(data))) if (length(idx.missing)) { lav_msg_stop( gettext("missing observed variables in dataset:"), paste(tmp.ov.names.y[idx.missing], collapse = " ") ) } if (any(sapply(data[, tmp.ov.names.y], inherits, "ordered"))) { opt$.categorical <- TRUE } } if (tolower(opt$estimator) == "catml") { opt$.categorical <- FALSE } # clustered? if (length(cluster) > 0L) { opt$.clustered <- TRUE if (opt$.categorical & opt$estimator != "PML") { lav_msg_stop(gettext("categorical + clustered is not supported yet.")) } } else { opt$.clustered <- FALSE } # multilevel? if (length(ov.names.l) > 0L && length(ov.names.l[[1]]) > 1L) { opt$.multilevel <- TRUE } else { opt$.multilevel <- FALSE } # sampling weights? force MLR # HJ 18/10/23: Except for PML if (!is.null(sampling.weights) && !opt$.categorical && opt$estimator %in% c("default", "ML", "PML")) { opt$estimator <- "MLR" } # constraints if (any(nchar(constraints) > 0L) && opt$estimator %in% c("ML")) { opt$information <- c("observed", "observed") } # meanstructure if (any(flat.model$op == "~1") || !is.null(sample.mean)) { opt$meanstructure <- TRUE } if (!is.null(group) && is.null(dotdotdot$meanstructure)) { opt$meanstructure <- TRUE } # conditional.x if ((is.list(ov.names.x) && sum(sapply(ov.names.x, FUN = length)) == 0L) || (is.character(ov.names.x) && length(ov.names.x) == 0L)) { # if explicitly set to TRUE, give warning if (is.logical(dotdotdot$conditional.x) && dotdotdot$conditional.x) { lav_msg_warn( gettext("no exogenous covariates; conditional.x will be set to FALSE")) } opt$conditional.x <- FALSE } # fixed.x if ((is.list(ov.names.x) && sum(sapply(ov.names.x, FUN = length)) == 0L) || (is.character(ov.names.x) && length(ov.names.x) == 0L)) { # if explicitly set to TRUE, give warning if (is.logical(dotdotdot$fixed.x) && dotdotdot$fixed.x) { # ok, we respect this: keep fixed.x = TRUE } else { opt$fixed.x <- FALSE } } # fill in remaining "default" values lavoptions <- lav_options_set(opt) if (lav_verbose()) { cat(" done.\n") } } lavoptions } lavaan/R/lav_mvnorm_cluster_missing.R0000644000176200001440000010454214627656441017537 0ustar liggesusers# loglikelihood clustered/twolevel data in the presence of missing data # YR: # - objective function: first version around March 2021 (see Psych paper) # - analytic gradient: first version around May 2021 # Mu.W, Mu.B, Sigma.W, Sigma.B are the model-implied statistics lav_mvnorm_cluster_missing_loglik_samplestats_2l <- function(Y1 = NULL, Y2 = NULL, Lp = NULL, Mp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen", log2pi = FALSE, loglik.x = 0, minus.two = TRUE) { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Lp nclusters <- Lp$nclusters[[2]] between.idx <- Lp$between.idx[[2]] both.idx <- Lp$both.idx[[2]] cluster.idx <- Lp$cluster.idx[[2]] # sanity checks if (any(diag(sigma.w) < 0) || any(diag(sigma.b) < 0)) { return(+Inf) } # check is both.idx part of sigma.b is 'too' negative; if so, return +Inf ev <- eigen(sigma.b[both.idx, both.idx, drop = FALSE], symmetric = TRUE, only.values = TRUE )$values if (any(ev < -0.05)) { return(+Inf) } # cat("sigma.w = \n"); print(sigma.w) # cat("sigma.b = \n"); print(sigma.b) # cat("mu.y = \n"); print(mu.y) # global sigma.w.inv <- solve.default(sigma.w) sigma.w.logdet <- log(det(sigma.w)) sigma.b <- sigma.b[both.idx, both.idx] # only both part # y ny <- ncol(sigma.w) if (length(between.idx) > 0L) { Y1w <- Y1[, -between.idx, drop = FALSE] } else { Y1w <- Y1 } Y1w.c <- t(t(Y1w) - mu.y) PIJ <- matrix(0, nrow(Y1w.c), ny) # z nz <- length(between.idx) if (nz > 0L) { # check is sigma.zz is PD; if not, return +Inf ev <- eigen(sigma.zz, symmetric = TRUE, only.values = TRUE)$values if (any(ev < sqrt(.Machine$double.eps))) { return(+Inf) } Z <- Y2[, between.idx, drop = FALSE] Z.c <- t(t(Z) - mu.z) sigma.yz <- sigma.yz[both.idx, , drop = FALSE] # only both part sigma.zy <- t(sigma.yz) sigma.zz.inv <- solve.default(sigma.zz) sigma.zz.logdet <- log(det(sigma.zz)) sigma.zi.zy <- sigma.zz.inv %*% sigma.zy sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy GZ <- Z.c %*% sigma.zz.inv # for complete cases only } # containters per cluster q.yy.b <- q.zy <- q.zz.b <- numeric(nclusters) IBZA.j.logdet <- numeric(nclusters) ALIST <- rep(list(matrix( 0, length(both.idx), length(both.idx) )), nclusters) # Z per missing pattern if (nz > 0L) { Zp <- Mp$Zp ZPAT2J <- integer(nclusters) # which sigma.b.z per cluster SIGMA.B.Z <- vector("list", length = Zp$npatterns + 1L) sigma.j.zz.logdet <- q.zz.a <- 0 for (p in seq_len(Zp$npatterns)) { freq <- Zp$freq[p] z.na.idx <- which(!Zp$pat[p, ]) j.idx <- Zp$case.idx[[p]] # cluster indices with this pattern ZPAT2J[j.idx] <- p if (length(z.na.idx) > 0L) { zp <- sigma.zz[-z.na.idx, -z.na.idx, drop = FALSE] zp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.zz.inv, rm.idx = z.na.idx, logdet = TRUE, S.logdet = sigma.zz.logdet ) zp.logdet <- attr(zp.inv, "logdet") sigma.j.zz.logdet <- sigma.j.zz.logdet + (zp.logdet * freq) GZ[j.idx, -z.na.idx] <- Z.c[j.idx, -z.na.idx] %*% zp.inv yziy <- (sigma.yz[, -z.na.idx, drop = FALSE] %*% zp.inv %*% sigma.zy[-z.na.idx, , drop = FALSE]) SIGMA.B.Z[[p]] <- (sigma.b - yziy) } else { # complete case sigma.j.zz.logdet <- sigma.j.zz.logdet + (sigma.zz.logdet * freq) SIGMA.B.Z[[p]] <- sigma.b.z } } # p # add empty patterns (if any) if (length(Zp$empty.idx) > 0L) { ZPAT2J[Zp$empty.idx] <- p + 1L SIGMA.B.Z[[p + 1L]] <- sigma.b } q.zz.a <- sum(GZ * Z.c, na.rm = TRUE) GZ0 <- GZ GZ0[is.na(GZ0)] <- 0 GJ <- GZ0 %*% sigma.zy # only both part } # Y per missing pattern W.logdet <- 0 MPi <- integer(nrow(Y1)) for (p in seq_len(Mp$npatterns)) { freq <- Mp$freq[p] na.idx <- which(!Mp$pat[p, ]) j.idx <- Mp$j.idx[[p]] j1.idx <- Mp$j1.idx[[p]] TAB <- integer(nclusters) TAB[j1.idx] <- Mp$j.freq[[p]] # compute sigma.w.inv for this pattern if (length(na.idx) > 0L) { MPi[Mp$case.idx[[p]]] <- p wp <- sigma.w[-na.idx, -na.idx, drop = FALSE] wp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.w.inv, rm.idx = na.idx, logdet = TRUE, S.logdet = sigma.w.logdet ) wp.logdet <- attr(wp.inv, "logdet") W.logdet <- W.logdet + (wp.logdet * freq) PIJ[Mp$case.idx[[p]], -na.idx] <- Y1w.c[Mp$case.idx[[p]], -na.idx] %*% wp.inv A.j <- matrix(0, ny, ny) A.j[-na.idx, -na.idx] <- wp.inv for (j in j1.idx) { ALIST[[j]] <- ALIST[[j]] + (A.j[both.idx, both.idx] * TAB[j]) } # WIP[[p]][-na.idx, -na.idx] <- wp.inv } else { # complete case W.logdet <- W.logdet + (sigma.w.logdet * freq) PIJ[Mp$case.idx[[p]], ] <- Y1w.c[Mp$case.idx[[p]], ] %*% sigma.w.inv for (j in j1.idx) { ALIST[[j]] <- ALIST[[j]] + (sigma.w.inv[both.idx, both.idx] * TAB[j]) } } } # p q.yy.a <- sum(PIJ * Y1w.c, na.rm = TRUE) PJ <- rowsum.default(PIJ[, both.idx], cluster.idx, reorder = FALSE, na.rm = TRUE ) # only both part is needed # per cluster both.diag.idx <- lav_matrix_diag_idx(length(both.idx)) for (j in seq_len(nclusters)) { # we only need the 'both.idx' part of A.j, sigma.b.z, p.j, g.j ,... A.j <- ALIST[[j]] p.j <- PJ[j, ] if (nz > 0L) { sigma.b.z <- SIGMA.B.Z[[ZPAT2J[j]]] } else { sigma.b.z <- sigma.b } IBZA.j <- sigma.b.z %*% A.j IBZA.j[both.diag.idx] <- IBZA.j[both.diag.idx] + 1 # logdet IBZA.j tmp <- determinant.matrix(IBZA.j, logarithm = TRUE) IBZA.j.logdet[j] <- tmp$modulus * tmp$sign # IBZA.j.inv.BZ.p IBZA.j.inv.BZ.p <- solve.default(IBZA.j, drop(sigma.b.z %*% p.j)) q.yy.b[j] <- sum(p.j * IBZA.j.inv.BZ.p) if (nz > 0L) { g.j <- GJ[j, ] IBZA.j.inv.g <- solve.default(IBZA.j, g.j) A.IBZA.j.inv.g <- A.j %*% IBZA.j.inv.g q.zz.b[j] <- sum(g.j * A.IBZA.j.inv.g) q.zy[j] <- -sum(p.j * IBZA.j.inv.g) } } if (nz > 0L) { P <- Mp$nel + Zp$nel DIST <- (q.yy.a - sum(q.yy.b)) + 2 * sum(q.zy) + (q.zz.a + sum(q.zz.b)) LOGDET <- W.logdet + sum(IBZA.j.logdet) + sigma.j.zz.logdet } else { P <- Mp$nel DIST <- (q.yy.a - sum(q.yy.b)) LOGDET <- W.logdet + sum(IBZA.j.logdet) } # loglik? if (log2pi && !minus.two) { LOG.2PI <- log(2 * pi) loglik <- -(P * LOG.2PI + LOGDET + DIST) / 2 } else { loglik <- DIST + LOGDET } # loglik.x (only if loglik is requested) if (length(unlist(Lp$ov.x.idx)) > 0L && log2pi && !minus.two) { loglik <- loglik - loglik.x } loglik } # Mu.W, Mu.B, Sigma.W, Sigma.B are the model-implied statistics lav_mvnorm_cluster_missing_dlogl_2l_samplestats <- function( Y1 = NULL, Y2 = NULL, Lp = NULL, Mp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen", return.list = FALSE) { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # containers for dx dx.mu.y <- numeric(length(mu.y)) dx.mu.z <- numeric(length(mu.z)) dx.sigma.zz <- matrix(0, nrow(sigma.zz), ncol(sigma.zz)) dx.sigma.yz <- matrix(0, nrow(sigma.yz), ncol(sigma.yz)) dx.sigma.b <- matrix(0, nrow(sigma.b), ncol(sigma.b)) dx.sigma.w <- matrix(0, nrow(sigma.w), ncol(sigma.w)) # Lp nclusters <- Lp$nclusters[[2]] between.idx <- Lp$between.idx[[2]] cluster.idx <- Lp$cluster.idx[[2]] both.idx <- Lp$both.idx[[2]] # sigma.w sigma.w.inv <- solve.default(sigma.w) sigma.b <- sigma.b[both.idx, both.idx] # only both part # y ny <- ncol(sigma.w) if (length(between.idx) > 0L) { Y1w <- Y1[, -between.idx, drop = FALSE] } else { Y1w <- Y1 } Y1w.c <- t(t(Y1w) - mu.y) PIJ <- matrix(0, nrow(Y1w.c), ny) # z nz <- length(between.idx) if (nz > 0L) { Z <- Y2[, between.idx, drop = FALSE] Z.c <- t(t(Z) - mu.z) sigma.yz <- sigma.yz[both.idx, , drop = FALSE] # only both part sigma.zy <- t(sigma.yz) sigma.zz.inv <- solve.default(sigma.zz) sigma.zz.logdet <- log(det(sigma.zz)) sigma.zi.zy <- sigma.zz.inv %*% sigma.zy sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy GZ <- Z.c %*% sigma.zz.inv # for complete cases only } # containters per cluster # ALIST <- rep(list(matrix(0, length(both.idx), # length(both.idx))), nclusters) ALIST <- rep(list(matrix(0, ny, ny)), nclusters) # Z per missing pattern if (nz > 0L) { Zp <- Mp$Zp ZPAT2J <- integer(nclusters) # which pattern per cluster SIGMA.B.Z <- vector("list", length = Zp$npatterns + 1L) # +1 for empty ZIZY <- rep(list(matrix( 0, nrow(sigma.zy), ncol(sigma.zy) )), Zp$npatterns + 1L) ZIP <- rep(list(matrix( 0, nrow(sigma.zz), ncol(sigma.zz) )), Zp$npatterns + 1L) for (p in seq_len(Zp$npatterns)) { freq <- Zp$freq[p] z.na.idx <- which(!Zp$pat[p, ]) j.idx <- Zp$case.idx[[p]] # cluster indices with this pattern ZPAT2J[j.idx] <- p if (length(z.na.idx) > 0L) { zp <- sigma.zz[-z.na.idx, -z.na.idx, drop = FALSE] zp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.zz.inv, rm.idx = z.na.idx, logdet = FALSE ) ZIP[[p]][-z.na.idx, -z.na.idx] <- zp.inv GZ[j.idx, -z.na.idx] <- Z.c[j.idx, -z.na.idx] %*% zp.inv Z.G.ZY <- zp.inv %*% sigma.zy[-z.na.idx, , drop = FALSE] ZIZY[[p]][-z.na.idx, ] <- zp.inv %*% sigma.zy[-z.na.idx, , drop = FALSE] yziy <- sigma.yz[, -z.na.idx, drop = FALSE] %*% Z.G.ZY SIGMA.B.Z[[p]] <- (sigma.b - yziy) } else { # complete case ZIZY[[p]] <- sigma.zi.zy ZIP[[p]] <- sigma.zz.inv SIGMA.B.Z[[p]] <- sigma.b.z } } # p # add empty patterns (if any) if (length(Zp$empty.idx) > 0L) { ZPAT2J[Zp$empty.idx] <- p + 1L SIGMA.B.Z[[p + 1L]] <- sigma.b } GZ[is.na(GZ)] <- 0 GJ <- GZ %*% sigma.zy } # Y per missing pattern WIP <- rep(list(matrix(0, ny, ny)), Mp$npatterns) MPi <- integer(nrow(Y1)) for (p in seq_len(Mp$npatterns)) { freq <- Mp$freq[p] na.idx <- which(!Mp$pat[p, ]) j.idx <- Mp$j.idx[[p]] j1.idx <- Mp$j1.idx[[p]] TAB <- integer(nclusters) TAB[j1.idx] <- Mp$j.freq[[p]] if (length(na.idx) > 0L) { MPi[Mp$case.idx[[p]]] <- p wp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.w.inv, rm.idx = na.idx, logdet = FALSE ) WIP[[p]][-na.idx, -na.idx] <- wp.inv PIJ[Mp$case.idx[[p]], -na.idx] <- Y1w.c[Mp$case.idx[[p]], -na.idx] %*% wp.inv for (j in j1.idx) { ALIST[[j]] <- # ALIST[[j]] + (WIP[[p]][both.idx, both.idx] * TAB[j]) ALIST[[j]] + (WIP[[p]] * TAB[j]) } } else { # complete case PIJ[Mp$case.idx[[p]], ] <- Y1w.c[Mp$case.idx[[p]], ] %*% sigma.w.inv WIP[[p]] <- sigma.w.inv for (j in j1.idx) { ALIST[[j]] <- # ALIST[[j]] + (sigma.w.inv[both.idx, both.idx] * TAB[j]) ALIST[[j]] + (sigma.w.inv * TAB[j]) } } } # p PJ <- rowsum.default(PIJ[, , drop = FALSE], cluster.idx, reorder = FALSE, na.rm = TRUE ) # per cluster both.diag.idx <- lav_matrix_diag_idx(length(both.idx)) for (j in seq_len(nclusters)) { A.j.full <- ALIST[[j]] A.j <- A.j.full[both.idx, both.idx, drop = FALSE] p.j <- as.matrix(PJ[j, ]) pb.j <- as.matrix(PJ[j, both.idx]) # only both.idx part if (nz > 0L) { sigma.b.z <- SIGMA.B.Z[[ZPAT2J[j]]] } else { sigma.b.z <- sigma.b } IBZA.j <- sigma.b.z %*% A.j IBZA.j[both.diag.idx] <- IBZA.j[both.diag.idx] + 1 IBZA.j.inv.BZ <- solve.default(IBZA.j, sigma.b.z) IBZA.j.inv.BZ.p <- IBZA.j.inv.BZ %*% pb.j A.IBZA.j.inv.BZ <- A.j %*% IBZA.j.inv.BZ A.IBZA.j.inv.BZ.p <- A.IBZA.j.inv.BZ %*% pb.j IBZA.j.inv <- solve.default(IBZA.j) A.IBZA.j.inv <- A.j %*% IBZA.j.inv p.IBZA.j.inv <- t(crossprod(pb.j, IBZA.j.inv)) # only if we have between-only variables if (nz > 0L) { g.j <- as.matrix(GJ[j, ]) zij <- as.matrix(GZ[j, ]) zizy <- ZIZY[[ZPAT2J[j]]] zip <- ZIP[[ZPAT2J[j]]] IBZA.j.inv.zizy <- solve.default(IBZA.j, t(zizy)) IBZA.j.inv.g <- IBZA.j.inv %*% g.j IBZA.j.inv.p <- IBZA.j.inv %*% pb.j A.IBZA.j.inv.g <- A.j %*% IBZA.j.inv.g A.IBZA.j.inv.zizy <- A.j %*% IBZA.j.inv.zizy zizy.A.IBZA.j.inv.g <- zizy %*% A.IBZA.j.inv.g p.IBZA.j.inv.zizy <- crossprod(pb.j, IBZA.j.inv.zizy) ggbzpp <- 2 * A.IBZA.j.inv.g + A.IBZA.j.inv.BZ.p - pb.j ZIJzizyp <- (2 * zij - zizy %*% pb.j) ########### # dx.mu.z # ########### tmp <- 2 * (t(p.IBZA.j.inv.zizy) - zij - zizy.A.IBZA.j.inv.g) dx.mu.z <- dx.mu.z + drop(tmp) ############### # dx.sigma.zz # ############### tmp1 <- (zip + zizy %*% A.IBZA.j.inv.zizy # logdet - tcrossprod(zij) # ZA - tcrossprod(zizy.A.IBZA.j.inv.g)) # ZB-1 d <- (t((2 * zizy.A.IBZA.j.inv.g + zizy %*% A.IBZA.j.inv.BZ.p) %*% p.IBZA.j.inv.zizy) + ZIJzizyp %*% p.IBZA.j.inv.zizy - 2 * tcrossprod(zizy.A.IBZA.j.inv.g, zij)) tmp2 <- (d + t(d)) / 2 tmp <- tmp1 + tmp2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) dx.sigma.zz <- dx.sigma.zz + ZZ ############### # dx.sigma.yz # ############### t0 <- -2 * A.IBZA.j.inv.zizy t1 <- (-2 * tcrossprod(p.IBZA.j.inv, g.j) - 1 * tcrossprod(p.IBZA.j.inv, sigma.b.z %*% pb.j) + 2 * tcrossprod(A.IBZA.j.inv.g, g.j)) %*% A.IBZA.j.inv.zizy t2 <- -ggbzpp %*% p.IBZA.j.inv.zizy t3 <- -tcrossprod(p.IBZA.j.inv, ZIJzizyp) t4 <- 2 * tcrossprod(A.IBZA.j.inv.g, zij) tmp <- t0 + t1 + t2 + t3 + t4 dx.sigma.yz[both.idx, ] <- dx.sigma.yz[both.idx, , drop = FALSE] + tmp ############## # dx.sigma.b # ############## c <- tcrossprod(ggbzpp, p.IBZA.j.inv) tmp <- t(A.IBZA.j.inv) - tcrossprod(A.IBZA.j.inv.g) + (c + t(c)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) dx.sigma.b[both.idx, both.idx] <- dx.sigma.b[both.idx, both.idx, drop = FALSE] + ZZ # for dx.sigma.w PART1.b <- -1 * (IBZA.j.inv.g %*% (2 * t(IBZA.j.inv.BZ.p) + t(g.j) - t(g.j) %*% A.IBZA.j.inv.BZ) + IBZA.j.inv.BZ + tcrossprod(IBZA.j.inv.BZ.p)) PART2.b <- 2 * (IBZA.j.inv.g + IBZA.j.inv.BZ.p) # vector } else { ############## # dx.sigma.b # ############## bzpp <- A.IBZA.j.inv.BZ.p - pb.j c <- tcrossprod(bzpp, p.IBZA.j.inv) tmp <- t(A.IBZA.j.inv) + (c + t(c)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) dx.sigma.b[both.idx, both.idx] <- dx.sigma.b[both.idx, both.idx, drop = FALSE] + ZZ PART1.b <- -1 * (IBZA.j.inv.BZ + tcrossprod(IBZA.j.inv.BZ.p)) PART2.b <- 2 * IBZA.j.inv.BZ.p # vector } ############## # dx.sigma.w # ############## PART1 <- matrix(0, ny, ny) PART1[both.idx, both.idx] <- PART1.b PART2 <- matrix(0, ny, 1L) PART2[both.idx, 1L] <- PART2.b ij.index <- which(cluster.idx == j) pij <- PIJ[ij.index, , drop = FALSE] which.compl <- which(MPi[ij.index] == 0L) which.incompl <- which(MPi[ij.index] != 0L) AP2 <- rep(list(sigma.w.inv %*% PART2), length(ij.index)) AP1A.a <- AP1A.b <- matrix(0, ny, ny) # A.j.full <- matrix(0, ny, ny) if (length(which.compl) > 0L) { tmp <- (sigma.w.inv %*% PART1 %*% sigma.w.inv) AP1A.a <- tmp * length(which.compl) # A.j.full <- A.j.full + sigma.w.inv * length(which.compl) } if (length(which.incompl) > 0L) { p.idx <- MPi[ij.index][which.incompl] tmp <- lapply(WIP[p.idx], function(x) { x %*% PART1 %*% x }) AP1A.b <- Reduce("+", tmp) AP2[which.incompl] <- lapply(WIP[p.idx], function(x) { x %*% PART2 }) # A.j.full <- A.j.full + Reduce("+", WIP[ p.idx ]) } t1 <- AP1A.a + AP1A.b t2 <- (do.call("cbind", AP2) - t(pij)) %*% pij AA.wj <- t1 + t2 tmp <- A.j.full + (AA.wj + t(AA.wj)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) dx.sigma.w <- dx.sigma.w + ZZ ########### # dx.mu.y # ########### tmp <- numeric(ny) if (nz > 0L) { tmp[both.idx] <- IBZA.j.inv.g + IBZA.j.inv.BZ.p } else { tmp[both.idx] <- IBZA.j.inv.BZ.p } gbzpp <- A.j.full %*% tmp - p.j dx.mu.y <- dx.mu.y + drop(2 * gbzpp) } # j # rearrange dout <- lav_mvnorm_cluster_2l2implied( Lp = Lp, sigma.w = dx.sigma.w, sigma.b = dx.sigma.b, sigma.yz = dx.sigma.yz, sigma.zz = dx.sigma.zz, mu.y = dx.mu.y, mu.z = dx.mu.z ) if (return.list) { out <- dout } else { out <- c( dout$Mu.W, lav_matrix_vech(dout$Sigma.W), dout$Mu.B, lav_matrix_vech(dout$Sigma.B) ) } out } # cluster-wise scores -2*logl wrt Mu.W, Mu.B, Sigma.W, Sigma.B lav_mvnorm_cluster_missing_scores_2l <- function( Y1 = NULL, Y2 = NULL, Lp = NULL, Mp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, Sinv.method = "eigen") { # map implied to 2l matrices out <- lav_mvnorm_cluster_implied22l( Lp = Lp, Mu.W = Mu.W, Mu.B = Mu.B, Sigma.W = Sigma.W, Sigma.B = Sigma.B ) mu.y <- out$mu.y mu.z <- out$mu.z sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz # Lp nclusters <- Lp$nclusters[[2]] between.idx <- Lp$between.idx[[2]] cluster.idx <- Lp$cluster.idx[[2]] both.idx <- Lp$both.idx[[2]] # sigma.w sigma.w.inv <- solve.default(sigma.w) sigma.b <- sigma.b[both.idx, both.idx] # only both part # y ny <- ncol(sigma.w) if (length(between.idx) > 0L) { Y1w <- Y1[, -between.idx, drop = FALSE] } else { Y1w <- Y1 } Y1w.c <- t(t(Y1w) - mu.y) PIJ <- matrix(0, nrow(Y1w.c), ny) # z nz <- length(between.idx) if (nz > 0L) { Z <- Y2[, between.idx, drop = FALSE] Z.c <- t(t(Z) - mu.z) sigma.yz <- sigma.yz[both.idx, , drop = FALSE] # only both part sigma.zy <- t(sigma.yz) sigma.zz.inv <- solve.default(sigma.zz) sigma.zz.logdet <- log(det(sigma.zz)) sigma.zi.zy <- sigma.zz.inv %*% sigma.zy sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy GZ <- Z.c %*% sigma.zz.inv # for complete cases only } # containters per cluster # ALIST <- rep(list(matrix(0, length(both.idx), # length(both.idx))), nclusters) ALIST <- rep(list(matrix(0, ny, ny)), nclusters) # both level-1 and level-2 G.muy <- matrix(0, nclusters, length(mu.y)) G.Sigma.w <- matrix(0, nclusters, length(lav_matrix_vech(sigma.w))) G.Sigma.b <- matrix(0, nclusters, length(lav_matrix_vech(out$sigma.b))) G.muz <- matrix(0, nclusters, length(mu.z)) G.Sigma.zz <- matrix(0, nclusters, length(lav_matrix_vech(sigma.zz))) G.Sigma.yz <- matrix(0, nclusters, length(lav_matrix_vec(out$sigma.yz))) # Z per missing pattern if (nz > 0L) { Zp <- Mp$Zp ZPAT2J <- integer(nclusters) # which pattern per cluster SIGMA.B.Z <- vector("list", length = Zp$npatterns + 1L) # +1 for empty ZIZY <- rep(list(matrix( 0, nrow(sigma.zy), ncol(sigma.zy) )), Zp$npatterns + 1L) ZIP <- rep(list(matrix( 0, nrow(sigma.zz), ncol(sigma.zz) )), Zp$npatterns + 1L) for (p in seq_len(Zp$npatterns)) { freq <- Zp$freq[p] z.na.idx <- which(!Zp$pat[p, ]) j.idx <- Zp$case.idx[[p]] # cluster indices with this pattern ZPAT2J[j.idx] <- p if (length(z.na.idx) > 0L) { zp <- sigma.zz[-z.na.idx, -z.na.idx, drop = FALSE] zp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.zz.inv, rm.idx = z.na.idx, logdet = FALSE ) ZIP[[p]][-z.na.idx, -z.na.idx] <- zp.inv GZ[j.idx, -z.na.idx] <- Z.c[j.idx, -z.na.idx] %*% zp.inv Z.G.ZY <- zp.inv %*% sigma.zy[-z.na.idx, , drop = FALSE] ZIZY[[p]][-z.na.idx, ] <- zp.inv %*% sigma.zy[-z.na.idx, , drop = FALSE] yziy <- sigma.yz[, -z.na.idx, drop = FALSE] %*% Z.G.ZY SIGMA.B.Z[[p]] <- (sigma.b - yziy) } else { # complete case ZIZY[[p]] <- sigma.zi.zy ZIP[[p]] <- sigma.zz.inv SIGMA.B.Z[[p]] <- sigma.b.z } } # p # add empty patterns (if any) if (length(Zp$empty.idx) > 0L) { ZPAT2J[Zp$empty.idx] <- p + 1L SIGMA.B.Z[[p + 1L]] <- sigma.b } GZ[is.na(GZ)] <- 0 GJ <- GZ %*% sigma.zy } # Y per missing pattern WIP <- rep(list(matrix(0, ny, ny)), Mp$npatterns) MPi <- integer(nrow(Y1)) for (p in seq_len(Mp$npatterns)) { freq <- Mp$freq[p] na.idx <- which(!Mp$pat[p, ]) j.idx <- Mp$j.idx[[p]] j1.idx <- Mp$j1.idx[[p]] TAB <- integer(nclusters) TAB[j1.idx] <- Mp$j.freq[[p]] if (length(na.idx) > 0L) { MPi[Mp$case.idx[[p]]] <- p wp.inv <- lav_matrix_symmetric_inverse_update( S.inv = sigma.w.inv, rm.idx = na.idx, logdet = FALSE ) WIP[[p]][-na.idx, -na.idx] <- wp.inv PIJ[Mp$case.idx[[p]], -na.idx] <- Y1w.c[Mp$case.idx[[p]], -na.idx] %*% wp.inv for (j in j1.idx) { ALIST[[j]] <- # ALIST[[j]] + (WIP[[p]][both.idx, both.idx] * TAB[j]) ALIST[[j]] + (WIP[[p]] * TAB[j]) } } else { # complete case PIJ[Mp$case.idx[[p]], ] <- Y1w.c[Mp$case.idx[[p]], ] %*% sigma.w.inv WIP[[p]] <- sigma.w.inv for (j in j1.idx) { ALIST[[j]] <- # ALIST[[j]] + (sigma.w.inv[both.idx, both.idx] * TAB[j]) ALIST[[j]] + (sigma.w.inv * TAB[j]) } } } # p PJ <- rowsum.default(PIJ[, , drop = FALSE], cluster.idx, reorder = FALSE, na.rm = TRUE ) # per cluster both.diag.idx <- lav_matrix_diag_idx(length(both.idx)) for (j in seq_len(nclusters)) { A.j.full <- ALIST[[j]] A.j <- A.j.full[both.idx, both.idx, drop = FALSE] p.j <- as.matrix(PJ[j, ]) pb.j <- as.matrix(PJ[j, both.idx]) # only both.idx part if (nz > 0L) { sigma.b.z <- SIGMA.B.Z[[ZPAT2J[j]]] } else { sigma.b.z <- sigma.b } IBZA.j <- sigma.b.z %*% A.j IBZA.j[both.diag.idx] <- IBZA.j[both.diag.idx] + 1 IBZA.j.inv.BZ <- solve.default(IBZA.j, sigma.b.z) IBZA.j.inv.BZ.p <- IBZA.j.inv.BZ %*% pb.j A.IBZA.j.inv.BZ <- A.j %*% IBZA.j.inv.BZ A.IBZA.j.inv.BZ.p <- A.IBZA.j.inv.BZ %*% pb.j IBZA.j.inv <- solve.default(IBZA.j) A.IBZA.j.inv <- A.j %*% IBZA.j.inv p.IBZA.j.inv <- t(crossprod(pb.j, IBZA.j.inv)) # only if we have between-only variables if (nz > 0L) { g.j <- as.matrix(GJ[j, ]) zij <- as.matrix(GZ[j, ]) zizy <- ZIZY[[ZPAT2J[j]]] zip <- ZIP[[ZPAT2J[j]]] IBZA.j.inv.zizy <- solve.default(IBZA.j, t(zizy)) IBZA.j.inv.g <- IBZA.j.inv %*% g.j IBZA.j.inv.p <- IBZA.j.inv %*% pb.j A.IBZA.j.inv.g <- A.j %*% IBZA.j.inv.g A.IBZA.j.inv.zizy <- A.j %*% IBZA.j.inv.zizy zizy.A.IBZA.j.inv.g <- zizy %*% A.IBZA.j.inv.g p.IBZA.j.inv.zizy <- crossprod(pb.j, IBZA.j.inv.zizy) ggbzpp <- 2 * A.IBZA.j.inv.g + A.IBZA.j.inv.BZ.p - pb.j ZIJzizyp <- (2 * zij - zizy %*% pb.j) ########### # dx.mu.z # ########### tmp <- 2 * (t(p.IBZA.j.inv.zizy) - zij - zizy.A.IBZA.j.inv.g) G.muz[j, ] <- drop(tmp) ############### # dx.sigma.zz # ############### tmp1 <- (zip + zizy %*% A.IBZA.j.inv.zizy # logdet - tcrossprod(zij) # ZA - tcrossprod(zizy.A.IBZA.j.inv.g)) # ZB-1 d <- (t((2 * zizy.A.IBZA.j.inv.g + zizy %*% A.IBZA.j.inv.BZ.p) %*% p.IBZA.j.inv.zizy) + ZIJzizyp %*% p.IBZA.j.inv.zizy - 2 * tcrossprod(zizy.A.IBZA.j.inv.g, zij)) tmp2 <- (d + t(d)) / 2 tmp <- tmp1 + tmp2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) G.Sigma.zz[j, ] <- lav_matrix_vech(ZZ) ############### # dx.sigma.yz # ############### t0 <- -2 * A.IBZA.j.inv.zizy t1 <- (-2 * tcrossprod(p.IBZA.j.inv, g.j) - 1 * tcrossprod(p.IBZA.j.inv, sigma.b.z %*% pb.j) + 2 * tcrossprod(A.IBZA.j.inv.g, g.j)) %*% A.IBZA.j.inv.zizy t2 <- -ggbzpp %*% p.IBZA.j.inv.zizy t3 <- -tcrossprod(p.IBZA.j.inv, ZIJzizyp) t4 <- 2 * tcrossprod(A.IBZA.j.inv.g, zij) tmp <- t0 + t1 + t2 + t3 + t4 tmp2 <- matrix(0, nrow(out$sigma.yz), ncol(out$sigma.yz)) tmp2[both.idx, ] <- tmp G.Sigma.yz[j, ] <- lav_matrix_vec(tmp2) ############## # dx.sigma.b # ############## c <- tcrossprod(ggbzpp, p.IBZA.j.inv) tmp <- t(A.IBZA.j.inv) - tcrossprod(A.IBZA.j.inv.g) + (c + t(c)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) ZZ2 <- matrix(0, nrow(out$sigma.b), ncol(out$sigma.b)) ZZ2[both.idx, both.idx] <- ZZ G.Sigma.b[j, ] <- lav_matrix_vech(ZZ2) # dx.sigma.b[both.idx, both.idx] <- # dx.sigma.b[both.idx, both.idx, drop = FALSE] + ZZ # for dx.sigma.w PART1.b <- -1 * (IBZA.j.inv.g %*% (2 * t(IBZA.j.inv.BZ.p) + t(g.j) - t(g.j) %*% A.IBZA.j.inv.BZ) + IBZA.j.inv.BZ + tcrossprod(IBZA.j.inv.BZ.p)) PART2.b <- 2 * (IBZA.j.inv.g + IBZA.j.inv.BZ.p) # vector } else { ############## # dx.sigma.b # ############## bzpp <- A.IBZA.j.inv.BZ.p - pb.j c <- tcrossprod(bzpp, p.IBZA.j.inv) tmp <- t(A.IBZA.j.inv) + (c + t(c)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) ZZ2 <- matrix(0, nrow(out$sigma.b), ncol(out$sigma.b)) ZZ2[both.idx, both.idx] <- ZZ G.Sigma.b[j, ] <- lav_matrix_vech(ZZ2) # dx.sigma.b[both.idx, both.idx] <- # dx.sigma.b[both.idx, both.idx, drop = FALSE] + ZZ PART1.b <- -1 * (IBZA.j.inv.BZ + tcrossprod(IBZA.j.inv.BZ.p)) PART2.b <- 2 * IBZA.j.inv.BZ.p # vector } ############## # dx.sigma.w # ############## PART1 <- matrix(0, ny, ny) PART1[both.idx, both.idx] <- PART1.b PART2 <- matrix(0, ny, 1L) PART2[both.idx, 1L] <- PART2.b ij.index <- which(cluster.idx == j) pij <- PIJ[ij.index, , drop = FALSE] which.compl <- which(MPi[ij.index] == 0L) which.incompl <- which(MPi[ij.index] != 0L) AP2 <- rep(list(sigma.w.inv %*% PART2), length(ij.index)) AP1A.a <- AP1A.b <- matrix(0, ny, ny) # A.j.full <- matrix(0, ny, ny) if (length(which.compl) > 0L) { tmp <- (sigma.w.inv %*% PART1 %*% sigma.w.inv) AP1A.a <- tmp * length(which.compl) # A.j.full <- A.j.full + sigma.w.inv * length(which.compl) } if (length(which.incompl) > 0L) { p.idx <- MPi[ij.index][which.incompl] tmp <- lapply(WIP[p.idx], function(x) { x %*% PART1 %*% x }) AP1A.b <- Reduce("+", tmp) AP2[which.incompl] <- lapply(WIP[p.idx], function(x) { x %*% PART2 }) # A.j.full <- A.j.full + Reduce("+", WIP[ p.idx ]) } t1 <- AP1A.a + AP1A.b t2 <- (do.call("cbind", AP2) - t(pij)) %*% pij AA.wj <- t1 + t2 tmp <- A.j.full + (AA.wj + t(AA.wj)) / 2 # symmetry correction ZZ <- 2 * tmp diag(ZZ) <- diag(tmp) G.Sigma.w[j, ] <- lav_matrix_vech(ZZ) # dx.sigma.w <- dx.sigma.w + ZZ ########### # dx.mu.y # ########### tmp <- numeric(ny) if (nz > 0L) { tmp[both.idx] <- IBZA.j.inv.g + IBZA.j.inv.BZ.p } else { tmp[both.idx] <- IBZA.j.inv.BZ.p } gbzpp <- A.j.full %*% tmp - p.j # dx.mu.y <- dx.mu.y + drop(2 * gbzpp) G.muy[j, ] <- drop(2 * gbzpp) } # j # browser() # rearrange columns to Mu.W, Mu.B, Sigma.W, Sigma.B ov.idx <- Lp$ov.idx p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # Mu.W (for within-only) Mu.W.tilde <- matrix(0, nclusters, p.tilde) Mu.W.tilde[, ov.idx[[1]]] <- G.muy Mu.W.tilde[, Lp$both.idx[[2]]] <- 0 # ZERO!!! Mu.W <- Mu.W.tilde[, ov.idx[[1]], drop = FALSE] # Mu.B Mu.B.tilde <- matrix(0, nclusters, p.tilde) Mu.B.tilde[, ov.idx[[1]]] <- G.muy if (length(between.idx) > 0L) { Mu.B.tilde[, between.idx] <- G.muz } Mu.B <- Mu.B.tilde[, ov.idx[[2]], drop = FALSE] # Sigma.W Sigma.W <- G.Sigma.w # Sigma.B if (length(between.idx) > 0L) { p.tilde.star <- p.tilde * (p.tilde + 1) / 2 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) Sigma.B.tilde <- matrix(0, nclusters, p.tilde.star) col.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.b col.idx <- lav_matrix_vec(B.tilde[ov.idx[[1]], between.idx, drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.yz col.idx <- lav_matrix_vech(B.tilde[between.idx, between.idx, drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.zz col.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE ]) Sigma.B <- Sigma.B.tilde[, col.idx, drop = FALSE] } else { p.tilde.star <- p.tilde * (p.tilde + 1) / 2 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) Sigma.B.tilde <- matrix(0, nclusters, p.tilde.star) col.idx <- lav_matrix_vech(B.tilde[ov.idx[[1]], ov.idx[[1]], drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.Sigma.b col.idx <- lav_matrix_vech(B.tilde[ov.idx[[2]], ov.idx[[2]], drop = FALSE ]) Sigma.B <- Sigma.B.tilde[, col.idx, drop = FALSE] # Sigma.B <- G.Sigma.b } SCORES <- cbind(Mu.W, Sigma.W, Mu.B, Sigma.B) SCORES } # first-order information: outer crossprod of scores per cluster lav_mvnorm_cluster_missing_information_firstorder <- function( Y1 = NULL, Y2 = NULL, Lp = NULL, Mp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, x.idx = NULL, divide.by.two = FALSE, Sinv.method = "eigen") { N <- NROW(Y1) SCORES <- lav_mvnorm_cluster_missing_scores_2l( Y1 = Y1, Y2 = Y2, Lp = Lp, Mp = Mp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, Sinv.method = Sinv.method ) # divide by 2 (if we want scores wrt objective function) if (divide.by.two) { SCORES <- SCORES / 2 } # unit information information <- crossprod(SCORES) / Lp$nclusters[[2]] # if x.idx, set rows/cols to zero if (length(x.idx) > 0L) { nw <- length(as.vector(Mu.W)) nw.star <- nw * (nw + 1) / 2 nb <- length(as.vector(Mu.B)) ov.idx <- Lp$ov.idx x.idx.w <- which(ov.idx[[1]] %in% x.idx) if (length(x.idx.w) > 0L) { xw.idx <- c( x.idx.w, nw + lav_matrix_vech_which_idx(n = nw, idx = x.idx.w) ) } else { xw.idx <- integer(0L) } x.idx.b <- which(ov.idx[[2]] %in% x.idx) if (length(x.idx.b) > 0L) { xb.idx <- c( x.idx.b, nb + lav_matrix_vech_which_idx(n = nb, idx = x.idx.b) ) } else { xb.idx <- integer(0L) } all.idx <- c(xw.idx, nw + nw.star + xb.idx) information[all.idx, ] <- 0 information[, all.idx] <- 0 } information } # observed information # order: mu.w within, vech(sigma.w) within, mu.b between, vech(sigma.b) between # mu.w rows/cols that are splitted within/between are forced to zero # # numerical approximation (for now) lav_mvnorm_cluster_missing_information_observed <- function( Y1 = NULL, Y2 = NULL, Lp = NULL, Mp = NULL, YLp = NULL, Mu.W = NULL, Sigma.W = NULL, Mu.B = NULL, Sigma.B = NULL, x.idx = integer(0L), Sinv.method = "eigen") { nobs <- Lp$nclusters[[1]] nw <- length(as.vector(Mu.W)) nw.star <- nw * (nw + 1) / 2 nb <- length(as.vector(Mu.B)) nb.star <- nb * (nb + 1) / 2 ov.idx <- Lp$ov.idx p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # Mu.W (for within-only) Mu.W.tilde <- numeric(p.tilde) Mu.W.tilde[ov.idx[[1]]] <- Mu.W # local function -- gradient GRAD <- function(x) { # Mu.W (for within-only) Mu.W.tilde2 <- numeric(p.tilde) Mu.W.tilde2[ov.idx[[1]]] <- x[1:nw] Mu.W.tilde2[Lp$both.idx[[2]]] <- Mu.W.tilde[Lp$both.idx[[2]]] Mu.W2 <- Mu.W.tilde2[ov.idx[[1]]] Sigma.W2 <- lav_matrix_vech_reverse(x[nw + 1:nw.star]) Mu.B2 <- x[nw + nw.star + 1:nb] Sigma.B2 <- lav_matrix_vech_reverse(x[nw + nw.star + nb + 1:nb.star]) dx <- lav_mvnorm_cluster_missing_dlogl_2l_samplestats( Y1 = Y1, Y2 = Y2, Lp = Lp, Mp = Mp, Mu.W = Mu.W2, Sigma.W = Sigma.W2, Mu.B = Mu.B2, Sigma.B = Sigma.B2, return.list = FALSE, Sinv.method = Sinv.method ) # dx is for -2*logl -1 / 2 * dx } # start.x start.x <- c( as.vector(Mu.W), lav_matrix_vech(Sigma.W), as.vector(Mu.B), lav_matrix_vech(Sigma.B) ) # total information information <- -1 * numDeriv::jacobian(func = GRAD, x = start.x) # unit information information <- information / Lp$nclusters[[2]] # if x.idx, set rows/cols to zero if (length(x.idx) > 0L) { x.idx.w <- which(ov.idx[[1]] %in% x.idx) if (length(x.idx.w) > 0L) { xw.idx <- c( x.idx.w, nw + lav_matrix_vech_which_idx(n = nw, idx = x.idx.w) ) } else { xw.idx <- integer(0L) } x.idx.b <- which(ov.idx[[2]] %in% x.idx) if (length(x.idx.b) > 0L) { xb.idx <- c( x.idx.b, nb + lav_matrix_vech_which_idx(n = nb, idx = x.idx.b) ) } else { xb.idx <- integer(0L) } all.idx <- c(xw.idx, nw + nw.star + xb.idx) information[all.idx, ] <- 0 information[, all.idx] <- 0 } information } lavaan/R/lav_object_post_check.R0000644000176200001440000000504214627656441016372 0ustar liggesusers# check if a fitted model is admissible lav_object_post_check <- function(object) { stopifnot(inherits(object, "lavaan")) lavpartable <- object@ParTable lavmodel <- object@Model lavdata <- object@Data var.ov.ok <- var.lv.ok <- result.ok <- TRUE # 1a. check for negative variances ov var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs %in% lavNames(object, "ov") & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L && any(lavpartable$est[var.idx] < 0.0)) { result.ok <- var.ov.ok <- FALSE lav_msg_warn(gettext("some estimated ov variances are negative")) } # 1b. check for negative variances lv var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs %in% lavNames(object, "lv") & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L && any(lavpartable$est[var.idx] < 0.0)) { result.ok <- var.lv.ok <- FALSE lav_msg_warn(gettext("some estimated lv variances are negative")) } # 2. is cov.lv (PSI) positive definite? (only if we did not already warn # for negative variances) if (var.lv.ok && length(lavNames(lavpartable, type = "lv.regular")) > 0L) { ETA <- lavTech(object, "cov.lv") for (g in 1:lavdata@ngroups) { if (nrow(ETA[[g]]) == 0L) next txt.group <- if (lavdata@ngroups > 1L) gettextf("in group %s", g) else "" eigvals <- eigen(ETA[[g]], symmetric = TRUE, only.values = TRUE)$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettextf( "covariance matrix of latent variables is not positive definite %s; use lavInspect(fit, \"cov.lv\") to investigate.", txt.group )) result.ok <- FALSE } } } # 3. is THETA positive definite (but only for numeric variables) # and if we not already warned for negative ov variances if (var.ov.ok) { THETA <- lavTech(object, "theta") for (g in 1:lavdata@ngroups) { num.idx <- lavmodel@num.idx[[g]] if (length(num.idx) > 0L) { txt.group <- if (lavdata@ngroups > 1L) gettextf("in group %s", g) else "" eigvals <- eigen(THETA[[g]][num.idx, num.idx, drop = FALSE], symmetric = TRUE, only.values = TRUE )$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettextf( "the covariance matrix of the residuals of the observed variables (theta) is not positive definite %s; use lavInspect(fit, \"theta\") to investigate.", txt.group)) result.ok <- FALSE } } } } result.ok } lavaan/R/lav_bvreg.R0000644000176200001440000003753714627656440014044 0ustar liggesusers# the weighted bivariate linear regression model # YR 14 March 2020 ((replacing the old lav_pearson.R + lav_binorm.R routines) # # - bivariate standard normal # - pearson correlation # - bivariate linear regression # - using sampling weights wt # density of a bivariate __standard__ normal lav_dbinorm <- dbinorm <- function(u, v, rho, force.zero = FALSE) { # dirty hack to handle extreme large values for rho # note that u, v, and rho are vectorized! RHO.limit <- 0.9999 abs.rho <- abs(rho) idx <- which(abs.rho > RHO.limit) if (length(idx) > 0L) { rho[idx] <- sign(rho[idx]) * RHO.limit } R <- 1 - rho * rho out <- 1 / (2 * pi * sqrt(R)) * exp(-0.5 * (u * u - 2 * rho * u * v + v * v) / R) # if abs(u) or abs(v) are very large (say, >10), set result equal # to exactly zero idx <- which(abs(u) > 10 | abs(v) > 10) if (length(idx) > 0L && force.zero) { out[idx] <- 0 } out } # partial derivative - rho lav_dbinorm_drho <- function(u, v, rho) { R <- 1 - rho * rho dbinorm(u, v, rho) * (u * v * R - rho * (u * u - 2 * rho * u * v + v * v) + rho * R) / (R * R) } # partial derivative - u lav_dbinorm_du <- function(u, v, rho) { R <- 1 - rho * rho -dbinorm(u, v, rho) * (u - rho * v) / R } # partial derivative - v lav_dbinorm_dv <- function(u, v, rho) { R <- 1 - rho * rho -dbinorm(u, v, rho) * (v - rho * u) / R } # CDF of bivariate standard normal # function pbinorm(upper.x, upper.y, rho) # partial derivative pbinorm - upper.x lav_pbinorm_dupperx <- function(upper.x, upper.y, rho = 0.0) { R <- 1 - rho * rho dnorm(upper.x) * pnorm((upper.y - rho * upper.x) / sqrt(R)) } lav_pbinorm_duppery <- function(upper.x, upper.y, rho = 0.0) { R <- 1 - rho * rho dnorm(upper.y) * pnorm((upper.x - rho * upper.y) / sqrt(R)) } lav_pbinorm_drho <- function(upper.x, upper.y, rho = 0.0) { dbinorm(upper.x, upper.y, rho) } # switch between pbivnorm, mnormt, ... pbinorm <- function(upper.x = NULL, upper.y = NULL, rho = 0.0, lower.x = -Inf, lower.y = -Inf, check = FALSE) { pbinorm2( upper.x = upper.x, upper.y = upper.y, rho = rho, lower.x = lower.x, lower.y = lower.y, check = check ) } # using vectorized version (a la pbivnorm) pbinorm2 <- function(upper.x = NULL, upper.y = NULL, rho = 0.0, lower.x = -Inf, lower.y = -Inf, check = FALSE) { N <- length(upper.x) stopifnot(length(upper.y) == N) if (N > 1L) { if (length(rho) == 1L) { rho <- rep(rho, N) } if (length(lower.x) == 1L) { lower.x <- rep(lower.x, N) } if (length(lower.y) == 1L) { lower.y <- rep(lower.y, N) } } upper.only <- all(lower.x == -Inf & lower.y == -Inf) if (upper.only) { upper.x[upper.x == +Inf] <- exp(10) # better pnorm? upper.y[upper.y == +Inf] <- exp(10) upper.x[upper.x == -Inf] <- -exp(10) upper.y[upper.y == -Inf] <- -exp(10) res <- pbivnorm(upper.x, upper.y, rho = rho) } else { # pbivnorm does not handle -Inf well... lower.x[lower.x == -Inf] <- -exp(10) lower.y[lower.y == -Inf] <- -exp(10) res <- pbivnorm(upper.x, upper.y, rho = rho) - pbivnorm(lower.x, upper.y, rho = rho) - pbivnorm(upper.x, lower.y, rho = rho) + pbivnorm(lower.x, lower.y, rho = rho) } res } # pearson correlation # if no missing, solution is just cor(Y1,Y2) or cor(e1,e2) # but if missing, two-step solution is NOT the same as cor(Y1,Y2) or cor(e1,e2) lav_bvreg_cor_twostep_fit <- function(Y1, Y2, eXo = NULL, wt = NULL, fit.y1 = NULL, fit.y2 = NULL, Y1.name = NULL, Y2.name = NULL, optim.method = "nlminb1", # optim.method = "none", optim.scale = 1, init.theta = NULL, control = list()) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvreg_fit(y = Y2, X = eXo, wt = wt) } # create cache environment cache <- lav_bvreg_init_cache(fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt) # the complete case is trivial if (!anyNA(fit.y1$y) && !anyNA(fit.y2$y)) { return(cache$theta[1L]) } # optim.method minObjective <- lav_bvreg_min_objective minGradient <- lav_bvreg_min_gradient minHessian <- lav_bvreg_min_hessian if (optim.method == "nlminb" || optim.method == "nlminb2") { # nothing to do } else if (optim.method == "nlminb0") { minGradient <- minHessian <- NULL } else if (optim.method == "nlminb1") { minHessian <- NULL } else if (optim.method == "none") { return(cache$theta[1L]) } # optimize if (is.null(control$trace)) { control$trace <- ifelse(lav_verbose(), 1, 0) } # init theta? if (!is.null(init.theta)) { start.x <- init.theta } else { start.x <- cache$theta } # try 1 optim <- nlminb( start = start.x, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control, scale = optim.scale, lower = -0.999, upper = +0.999, cache = cache ) # try 2 (scale = 10) if (optim$convergence != 0L) { optim <- nlminb( start = start.x, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control, scale = 10, lower = -0.999, upper = +0.999, cache = cache ) } # try 3 (start = 0, step.min = 0.1) if (optim$convergence != 0L) { control$step.min <- 0.1 minGradient <- lav_bvreg_min_gradient # try again, with different starting value optim <- nlminb( start = 0, objective = minObjective, gradient = minGradient, hessian = NULL, control = control, scale = optim.scale, lower = -0.999, upper = +0.999, cache = cache ) } # check convergence if (optim$convergence != 0L) { if (!is.null(Y1.name) && !is.null(Y2.name)) { lav_msg_warn(gettextf( "estimation pearson correlation did not converge for variables %1$s and %2$s.", Y1.name, Y2.name)) } else { lav_msg_warn(gettext( "estimation pearson correlation(s) did not always converge")) } # use init (as we always did in < 0.6-6; this is also what Mplus does) rho <- start.x } else { # store result rho <- optim$par } rho } # Y1 = linear # Y2 = linear lav_bvreg_init_cache <- function(fit.y1 = NULL, fit.y2 = NULL, wt = NULL, scores = FALSE, parent = parent.frame()) { # data Y1 <- fit.y1$y Y2 <- fit.y2$y eXo <- fit.y1$X # Y1 Y1c <- Y1 - fit.y1$yhat evar.y1 <- fit.y1$theta[fit.y1$var.idx] sd.y1 <- sqrt(evar.y1) eta.y1 <- fit.y1$yhat # Y2 Y2c <- Y2 - fit.y2$yhat evar.y2 <- fit.y2$theta[fit.y1$var.idx] sd.y2 <- sqrt(evar.y2) eta.y2 <- fit.y2$yhat # exo? if (is.null(eXo)) { nexo <- 0L } else { nexo <- ncol(eXo) } # nobs if (is.null(wt)) { N <- length(Y1) } else { N <- sum(wt) } # starting value if (fit.y1$nexo > 0L) { E1 <- Y1 - fit.y1$yhat E2 <- Y2 - fit.y2$yhat if (is.null(wt)) { rho.init <- cor(E1, E2, use = "pairwise.complete.obs") } else { tmp <- na.omit(cbind(E1, E2, wt)) rho.init <- cov.wt(tmp[, 1:2], wt = tmp[, 3], cor = TRUE)$cor[2, 1] } } else { if (is.null(wt)) { rho.init <- cor(Y1, Y2, use = "pairwise.complete.obs") } else { tmp <- na.omit(cbind(Y1, Y2, wt)) rho.init <- cov.wt(tmp[, 1:2], wt = tmp[, 3], cor = TRUE)$cor[2, 1] } } # sanity check if (is.na(rho.init) || abs(rho.init) >= 1.0) { rho.init <- 0.0 } # parameter vector theta <- rho.init # only # different cache if scores or not if (scores) { out <- list2env( list( nexo = nexo, theta = theta, N = N, Y1c = Y1c, Y2c = Y2c, eXo = eXo, evar.y1 = evar.y1, sd.y1 = sd.y1, eta.y1 = eta.y1, evar.y2 = evar.y2, sd.y2 = sd.y2, eta.y2 = eta.y2 ), parent = parent ) } else { out <- list2env( list( nexo = nexo, theta = theta, N = N, Y1c = Y1c, Y2c = Y2c, evar.y1 = evar.y1, sd.y1 = sd.y1, eta.y1 = eta.y1, evar.y2 = evar.y2, sd.y2 = sd.y2, eta.y2 = eta.y2 ), parent = parent ) } out } # casewise likelihoods, unweighted! lav_bvreg_lik_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] cov.y12 <- rho * sqrt(evar.y1) * sqrt(evar.y2) sigma <- matrix(c(evar.y1, cov.y12, cov.y12, evar.y2), 2L, 2L) lik <- exp(lav_mvnorm_loglik_data( Y = cbind(Y1c, Y2c), wt = NULL, Mu = c(0, 0), Sigma = sigma, casewise = TRUE )) # catch very small values lik.toosmall.idx <- which(lik < sqrt(.Machine$double.eps)) lik[lik.toosmall.idx] <- as.numeric(NA) return(lik) }) } lav_bvreg_logl_cache <- function(cache = NULL) { with(cache, { lik <- lav_bvreg_lik_cache(cache) # unweighted! if (!is.null(wt)) { logl <- sum(wt * log(lik), na.rm = TRUE) } else { logl <- sum(log(lik), na.rm = TRUE) } return(logl) }) } lav_bvreg_gradient_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] R <- (1 - rho * rho) sd.y1.y2 <- sd.y1 * sd.y2 t1 <- (Y1c * Y2c) / sd.y1.y2 t2 <- (Y1c * Y1c) / evar.y1 - (2 * rho * t1) + (Y2c * Y2c) / evar.y2 dx <- (rho + t1 - t2 * rho / R) / R # to be consistent with (log)lik_cache if (length(lik.toosmall.idx) > 0L) { dx[lik.toosmall.idx] <- as.numeric(NA) } if (is.null(wt)) { dx.rho <- sum(dx, na.rm = TRUE) } else { dx.rho <- sum(wt * dx, na.rm = TRUE) } return(dx.rho) }) } lav_bvreg_hessian_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] rho2 <- rho * rho R2 <- R * R R3 <- R * R * R h <- 1 / R - (2 * rho2 * t2) / R3 + 2 * rho2 * (1 - t2 / R) / R2 + 4 * rho * t1 / R2 - t2 / R2 # to be consistent with (log)lik_cache if (length(lik.toosmall.idx) > 0L) { h[lik.toosmall.idx] <- as.numeric(NA) } if (is.null(wt)) { H <- sum(h, na.rm = TRUE) } else { H <- sum(wt * h, na.rm = TRUE) } dim(H) <- c(1L, 1L) # for nlminb return(H) }) } # compute total (log)likelihood, for specific 'x' (nlminb) lav_bvreg_min_objective <- function(x, cache = NULL) { cache$theta <- x -1 * lav_bvreg_logl_cache(cache = cache) / cache$N } # compute gradient, for specific 'x' (nlminb) lav_bvreg_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_bvreg_logl_cache(cache = cache) } -1 * lav_bvreg_gradient_cache(cache = cache) / cache$N } # compute hessian, for specific 'x' (nlminb) lav_bvreg_min_hessian <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { tmp <- lav_bvreg_logl_cache(cache = cache) tmp <- lav_bvreg_gradient_cache(cache = cache) } -1 * lav_bvreg_hessian_cache(cache = cache) / cache$N } # casewise scores - cache # FIXME: should we also set 'lik.toosmall.idx' cases to NA? lav_bvreg_cor_scores_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] R <- (1 - rho * rho) # mu.y1 dx.mu.y1 <- (2 * Y1c / evar.y1 - 2 * rho * Y2c / (sd.y1 * sd.y2)) / (2 * R) if (!is.null(wt)) { dx.mu.y1 <- wt * dx.mu.y1 } # mu.y2 dx.mu.y2 <- -(2 * rho * Y1c / (sd.y1 * sd.y2) - 2 * Y2c / evar.y2) / (2 * R) if (!is.null(wt)) { dx.mu.y2 <- wt * dx.mu.y2 } # evar.y1 dx.var.y1 <- -(0.5 / evar.y1 - ((Y1c * Y1c) / (evar.y1 * evar.y1) - rho * Y1c * Y2c / (evar.y1 * sd.y1 * sd.y2)) / (2 * R)) if (!is.null(wt)) { dx.var.y1 <- wt * dx.var.y1 } # var.y2 dx.var.y2 <- -(0.5 / evar.y2 + (rho * Y1c * Y2c / (evar.y2 * sd.y1 * sd.y2) - (Y2c * Y2c) / (evar.y2 * evar.y2)) / (2 * R)) if (!is.null(wt)) { dx.var.y2 <- wt * dx.var.y2 } # sl.y1 dx.sl.y1 <- NULL if (nexo > 0L) { dx.sl.y1 <- dx.mu.y1 * eXo # weights already included in dx.mu.y1 } # sl.y2 dx.sl.y2 <- NULL if (nexo > 0L) { dx.sl.y2 <- dx.mu.y2 * eXo # weights already included in dx.mu.y2 } # rho z <- (Y1c * Y1c) / evar.y1 - 2 * rho * Y1c * Y2c / (sd.y1 * sd.y2) + (Y2c * Y2c) / evar.y2 dx.rho <- rho / R + (Y1c * Y2c / (sd.y1 * sd.y2 * R) - z * rho / (R * R)) if (!is.null(wt)) { dx.rho <- wt * dx.rho } out <- list( dx.mu.y1 = dx.mu.y1, dx.var.y1 = dx.var.y1, dx.mu.y2 = dx.mu.y2, dx.var.y2 = dx.var.y2, dx.sl.y1 = dx.sl.y1, dx.sl.y2 = dx.sl.y2, dx.rho = dx.rho ) return(out) }) } # casewise scores # # Y1 = linear # Y2 = linear lav_bvreg_cor_scores <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, evar.y2 = NULL, beta.y2 = NULL) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvreg_fit(y = Y2, X = eXo, wt = wt) } # user specified parameters if (!is.null(evar.y1) || !is.null(beta.y1)) { fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) } if (!is.null(evar.y2) || !is.null(beta.y2)) { fit.y2 <- lav_uvreg_update_fit( fit.y = fit.y2, evar.new = evar.y2, beta.new = beta.y2 ) } # create cache environment cache <- lav_bvreg_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho SC <- lav_bvreg_cor_scores_cache(cache = cache) SC } # logl - no cache lav_bvreg_logl <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, evar.y2 = NULL, beta.y2 = NULL) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvreg_fit(y = Y2, X = eXo, wt = wt) } # user specified parameters if (!is.null(evar.y1) || !is.null(beta.y1)) { fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) } if (!is.null(evar.y2) || !is.null(beta.y2)) { fit.y2 <- lav_uvreg_update_fit( fit.y = fit.y2, evar.new = evar.y2, beta.new = beta.y2 ) } # create cache environment cache <- lav_bvreg_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho lav_bvreg_logl_cache(cache = cache) } # lik - no cache lav_bvreg_lik <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, evar.y1 = NULL, beta.y1 = NULL, evar.y2 = NULL, beta.y2 = NULL, .log = FALSE) { if (is.null(fit.y1)) { fit.y1 <- lav_uvreg_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvreg_fit(y = Y2, X = eXo, wt = wt) } # user specified parameters if (!is.null(evar.y1) || !is.null(beta.y1)) { fit.y1 <- lav_uvreg_update_fit( fit.y = fit.y1, evar.new = evar.y1, beta.new = beta.y1 ) } if (!is.null(evar.y2) || !is.null(beta.y2)) { fit.y2 <- lav_uvreg_update_fit( fit.y = fit.y2, evar.new = evar.y2, beta.new = beta.y2 ) } # create cache environment cache <- lav_bvreg_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho lik <- lav_bvreg_lik_cache(cache = cache) if (.log) { lik <- log(lik) } if (!is.null(wt)) { if (.log) { lik <- wt * lik } else { tmp <- wt * log(lik) lik <- exp(tmp) } } lik } lavaan/R/lav_lavaan_step17_lavaan.R0000644000176200001440000000660314627656441016715 0ustar liggesuserslav_lavaan_step17_lavaan <- function(lavmc = NULL, timing = NULL, lavoptions = NULL, lavpartable = NULL, lavdata = NULL, lavsamplestats = NULL, lavmodel = NULL, lavcache = NULL, lavfit = NULL, lavboot = NULL, lavoptim = NULL, lavimplied = NULL, lavloglik = NULL, lavvcov = NULL, lavtest = NULL, lavh1 = NULL, lavbaseline = NULL, start.time0 = NULL) { # # # # # # # # # # # # 17. lavaan # # # # # # # # # # # # # stop timer # create lavaan object # if lavmodel@nefa > 0 # compute standardizedSolution and store in ParTable slot in lavaan object # if post-checking demanded and converged, execute # lavInspect(lavaan, "post.check") # timing$total <- (proc.time()[3] - start.time0) timing$start.time <- NULL lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_remove_cache(lavpartable) lavaan <- new("lavaan", # type_of_slot - where created or modified ? # ------------ ------------------------- - version = packageDescription("lavaan", fields = "Version"), call = lavmc, # match.call - ldw_adapt_match_call timing = timing, # list - ldw_add_timing Options = lavoptions, # list - options (2) / data (3) / partable (4) ParTable = lavpartable, # list - partable/bounds/start/model/estoptim/vcovboot/rotation pta = lavpta, # list - lav_partable_attributes Data = lavdata, # S4 class - data (3) SampleStats = lavsamplestats, # S4 class - samplestats (5) Model = lavmodel, # S4 class - model (9) / estoptim (11) / vcovboot (13) Cache = lavcache, # list - cache (10) Fit = lavfit, # S4 class - lav_model_fit (14bis) boot = lavboot, # list - vcovboot (13) optim = lavoptim, # list - estoptim (11) implied = lavimplied, # list - lav_model_implied (12) loglik = lavloglik, # list - lav_model_loglik (12) vcov = lavvcov, # list - vcovboot (13) test = lavtest, # list - test (14) h1 = lavh1, # list - h1 (6) baseline = lavbaseline, # list - baseline (15) internal = list(), # empty list external = list() # empty list ) # if model.type = "efa", add standardized solution to partable if ((.hasSlot(lavmodel, "nefa")) && (lavmodel@nefa > 0L)) { if (lav_verbose()) { cat("computing standardized solution ... ") } std <- standardizedSolution(lavaan, remove.eq = FALSE, remove.ineq = FALSE, remove.def = FALSE ) if (lav_verbose()) { cat(" done.\n") } lavaan@ParTable$est.std <- std$est.std lavaan@ParTable$se.std <- std$se } # post-fitting check of parameters if (!is.null(lavoptions$check.post) && lavoptions$check.post && lavTech(lavaan, "converged")) { if (lav_verbose()) { cat("post check ...") } lavInspect(lavaan, "post.check") if (lav_verbose()) { cat(" done.\n") } } lavaan } lavaan/R/lav_data_update.R0000644000176200001440000001171214627656441015176 0ustar liggesusers# update lavdata object # - new dataset (lav_data_update) # - only subset of data (lav_data_update_subset) (for sam()) # YR - 18 Jan 2021 (so we don't need to export lav_data_*_patterns functions) # - 28 May 2023 lav_data_update_subset() # update lavdata object with new dataset # - assuming everything else stays the same # - optionally, also provide boot.idx (per group) to adapt internal slots lav_data_update <- function(lavdata = NULL, newX = NULL, BOOT.idx = NULL, lavoptions = NULL) { stopifnot(length(newX) == lavdata@ngroups) stopifnot(!is.null(lavoptions)) newdata <- lavdata # replace data 'X' slot for each group for (g in 1:lavdata@ngroups) { # replace raw data newdata@X[[g]] <- newX[[g]] # Mp + nobs if (lavoptions$missing != "listwise") { newdata@Mp[[g]] <- lav_data_missing_patterns(newX[[g]], sort.freq = FALSE, coverage = FALSE ) newdata@nobs[[g]] <- (nrow(newdata@X[[g]]) - length(newdata@Mp[[g]]$empty.idx)) } # Rp if (length(lavdata@ov.names.x[[g]]) == 0L && all(lavdata@ov.names[[g]] %in% lavdata@ov$name[lavdata@ov$type == "ordered"])) { newdata@Rp[[g]] <- lav_data_resp_patterns(newX[[g]]) } # Lp if (lavdata@nlevels > 1L) { # CHECKME! # extract cluster variable(s), for this group clus <- matrix(0, nrow(newX[[g]]), lavdata@nlevels - 1L) for (l in 2:lavdata@nlevels) { clus[, (l - 1L)] <- lavdata@Lp[[g]]$cluster.idx[[l]] } newdata@Lp[[g]] <- lav_data_cluster_patterns( Y = newX[[g]], clus = clus, cluster = lavdata@cluster, ov.names = lavdata@ov.names[[g]], ov.names.l = lavdata@ov.names.l[[g]] ) } } # if boot.idx if provided, also adapt eXo and WT if (!is.null(BOOT.idx)) { boot.idx <- BOOT.idx[[g]] # eXo if (!is.null(lavdata@eXo[[g]])) { newdata@eXo[[g]] <- lavdata@eXo[[g]][boot.idx, , drop = FALSE] } # sampling weights if (!is.null(lavdata@weights[[g]])) { newdata@weights[[g]] <- lavdata@weights[[g]][boot.idx] } } # g # return update data object newdata } # update lavdata, keeping only a subset of the observed variables # (assuming everything else stays the same) lav_data_update_subset <- function(lavdata = NULL, ov.names = NULL) { stopifnot(length(ov.names) == length(lavdata@ov.names)) newdata <- lavdata # replace ov.names newdata@ov.names <- ov.names # ordered? if (length(lavdata@ordered) > 0L) { newdata@ordered <- lavdata@ordered[lavdata@ordered %in% ov.names] } # replace/update slots for each group for (g in 1:lavdata@ngroups) { # sanity check: if (all(lavdata@ov.names[[g]] %in% ov.names[[g]])) { # nothing to do next } # replace ov.names.x if (length(lavdata@ov.names.x[[g]]) > 0L) { newdata@ov.names.x[[g]] <- lavdata@ov.names.x[[g]][lavdata@ov.names.x[[g]] %in% ov.names[[g]]] } # replace ov.names.l if (newdata@nlevels > 1L) { for (l in 1:newdata@nlevels) { newdata@ov.names.l[[g]][[l]] <- lavdata@ov.names.l[[g]][[l]][lavdata@ov.names.l[[g]][[l]] %in% ov.names[[g]]] } } # ov table keep.idx <- which(lavdata@ov$name %in% unlist(ov.names)) newdata@ov <- lapply(lavdata@ov, "[", keep.idx) # replace raw data newdata@X[[g]] <- lavdata@X[[g]][, lavdata@ov.names[[g]] %in% ov.names[[g]], drop = FALSE] # eXo if (length(newdata@ov.names.x[[g]]) == 0L) { newdata@eXo[g] <- list(NULL) } else { newdata@eXo[[g]] <- lavdata@eXo[[g]][, lavdata@ov.names.x[[g]] %in% ov.names[[g]], drop = FALSE] } # Mp + nobs if (lavdata@missing != "listwise") { newdata@Mp[[g]] <- lav_data_missing_patterns(newdata@X[[g]], sort.freq = FALSE, coverage = FALSE ) newdata@nobs[[g]] <- (nrow(newdata@X[[g]]) - length(newdata@Mp[[g]]$empty.idx)) } # Rp if (length(newdata@ordered) == 0L) { # nothing to do } else if (length(newdata@ov.names.x[[g]]) == 0L && all(newdata@ov.names[[g]] %in% newdata@ov$name[newdata@ov$type == "ordered"])) { newdata@Rp[[g]] <- lav_data_resp_patterns(newdata@X[[g]]) } # Lp if (length(newdata@cluster) > 0L) { # extract cluster variable(s), for this group clus <- matrix(0, nrow(newdata@X[[g]]), lavdata@nlevels - 1L) for (l in 2:lavdata@nlevels) { clus[, (l - 1L)] <- lavdata@Lp[[g]]$cluster.idx[[l]] } if (newdata@nlevels > 1L) { multilevel <- TRUE } else { multilevel <- FALSE } OV.NAMES <- unique(c(ov.names[[g]], newdata@ov.names.x[[g]])) newdata@Lp[[g]] <- lav_data_cluster_patterns( Y = newdata@X[[g]], clus = clus, cluster = newdata@cluster, multilevel = multilevel, ov.names = OV.NAMES, ov.names.x = newdata@ov.names.x[[g]], ov.names.l = newdata@ov.names.l[[g]] ) } } # g # return update data object newdata } lavaan/R/lav_optim_gn.R0000644000176200001440000002047514627656441014545 0ustar liggesusers# Gauss-Newton style optimization # # Initial version needed for DLS - model based # YR - 19 Jan 2021 # # TODo: # - what to do if the function value goes up? # - handle general (nonlinear) equality constraints # - handle general (nonlinear) inequality constraints # - better approach for simple bounds # ... # YR - 04 Nov 2023: add huber = TRUE option to get 'outlier-robust' estimates # (see Yuan and Zhong 2008, where they call this IRLS_r) # objective function, plus 'extra' information # needed for a Gauss Newton step lav_objective_GN <- function(x, lavsamplestats = NULL, lavmodel = NULL, lavoptions = NULL, lavdata = NULL, extra = FALSE, lambda = NULL) { # evaluate objective function lavmodel <- lav_model_set_parameters(lavmodel = lavmodel, x = x) obj <- lav_model_objective( lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats ) attributes(obj) <- NULL # monitoring obj only if (!extra) { # handle linear equality constraints if (lavmodel@eq.constraints) { hx <- lavmodel@ceq.function(x) obj <- obj + max(abs(lambda)) * sum(abs(hx)) } return(list(obj = obj, U.invQ = NULL, lambda = lambda)) } # model implied statistics lavimplied <- lav_model_implied(lavmodel = lavmodel) wls.est <- lav_model_wls_est(lavmodel = lavmodel, lavimplied = lavimplied) # observed statistics wls.obs <- lavsamplestats@WLS.obs # always use expected information A1 <- lav_model_h1_information_expected( lavobject = NULL, lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavimplied = lavimplied, lavh1 = NULL, lavcache = NULL ) # Delta Delta <- computeDelta(lavmodel = lavmodel) # first group g <- 1L if (lavmodel@estimator == "DWLS") { PRE.g <- t(Delta[[g]] * A1[[g]]) } else { PRE.g <- t(Delta[[g]]) %*% A1[[g]] } Q.g <- PRE.g %*% (wls.obs[[g]] - wls.est[[g]]) U.g <- PRE.g %*% Delta[[g]] # additional groups (if any) if (lavsamplestats@ngroups > 1L) { fg <- lavsamplestats@nobs[[1]] / lavsamplestats@ntotal Q <- fg * Q.g U <- fg * U.g for (g in 2:lavsamplestats@ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal if (lavmodel@estimator == "DWLS") { PRE.g <- t(Delta[[g]] * A1[[g]]) } else { PRE.g <- t(Delta[[g]]) %*% A1[[g]] } Q.g <- PRE.g %*% (wls.obs[[g]] - wls.est[[g]]) U.g <- PRE.g %*% Delta[[g]] Q <- Q + fg * Q.g U <- U + fg * U.g } } else { Q <- Q.g U <- U.g } # handle equality constraints # this can be made more efficient; see Jamshidian & Bentler 1993 # where instead of inverting a p+r matrix, they use a p-r matrix # (if the eq constraints are linear) if (lavmodel@eq.constraints) { hx <- lavmodel@ceq.function(x) npar <- nrow(U) H <- lavmodel@con.jac U <- U + crossprod(H) U <- rbind( cbind(U, t(H)), cbind(H, matrix(0, nrow(H), nrow(H))) ) Q <- rbind(Q, matrix(-hx, nrow(H), 1)) } # compute step # note, we could use U + k*I for a given scalar 'k' (Levenberg, 1944) # or U + k*(diag(U) (Marquardt, 1963) U.invQ <- drop(solve(U, Q)) if (lavmodel@eq.constraints) { # merit function lambda <- U.invQ[-seq_len(npar)] obj <- obj + max(abs(lambda)) * sum(abs(hx)) } else { lambda <- NULL } list(obj = obj, U.invQ = U.invQ, lambda = lambda) } lav_optim_gn <- function(lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL) { # no support (yet) for nonlinear constraints nonlinear.idx <- c( lavmodel@ceq.nonlinear.idx, lavmodel@cin.nonlinear.idx ) if (length(nonlinear.idx) > 0L) { lav_msg_stop(gettext( "nonlinear constraints not supported (yet) with optim.method = \"GN\".")) } # no support (yet) for inequality constraints if (!is.null(body(lavmodel@cin.function))) { lav_msg_stop(gettext( "inequality constraints not supported (yet) with optim.method = \"GN\".")) } # extract current set of free parameters x <- lav_model_get_parameters(lavmodel) npar <- length(x) # extract bounds (if any) lb <- ub <- NULL if (!is.null(lavpartable) && !is.null(lavpartable$lower)) { lb <- lavpartable$lower[lavpartable$free > 0] stopifnot(length(x) == length(lb)) lb.idx <- which(x < lb) if (length(lb.idx) > 0L) { x[lb.idx] <- lb[lb.idx] } } if (!is.null(lavpartable) && !is.null(lavpartable$upper)) { ub <- lavpartable$upper[lavpartable$free > 0] stopifnot(length(x) == length(ub)) ub.idx <- which(x > ub) if (length(ub.idx) > 0L) { x[ub.idx] <- ub[ub.idx] } } # options iter.max <- lavoptions$optim.gn.iter.max tol.x <- lavoptions$optim.gn.tol.x stephalf.max <- as.integer(lavoptions$optim.gn.stephalf.max) if (stephalf.max < 0L) { stephalf.max <- 0L } # initialize iter <- 0 alpha <- 1.0 old.x <- x # start Gauss-Newton steps for (iter in seq_len(iter.max)) { old.out <- lav_objective_GN( x = old.x, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavmodel = lavmodel, extra = TRUE ) old.obj <- old.out$obj U.invQ <- old.out$U.invQ # only the first time if (lav_verbose() && iter == 1L) { cat("iteration = ", sprintf("%2d", iter - 1L), ": objective = ", sprintf("%11.9f", old.obj), "\n", sep = "" ) } # update alpha <- 1.0 step <- U.invQ[seq_len(npar)] # TODO: if step-halving fails, we could also # allow the steps to be negative for (h in 1:max(1L, stephalf.max)) { new.x <- old.x + (alpha * step) # apply simple bounds (if any) if (!is.null(lb)) { lb.idx <- which(new.x < lb) if (length(lb.idx) > 0L) { new.x[lb.idx] <- lb[lb.idx] } } if (!is.null(ub)) { ub.idx <- which(new.x > ub) if (length(ub.idx) > 0L) { new.x[ub.idx] <- ub[ub.idx] } } new.obj <- lav_objective_GN( x = new.x, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavmodel = lavmodel, extra = FALSE, lambda = old.out$lambda )$obj if (is.finite(new.obj) && new.obj < old.obj) { break } else if (stephalf.max == 0L) { # no step-halving! break } else { # step-halving alpha <- alpha / 2.0 # if(verbose) { # cat(" -- step halving -- : alpha = ", alpha, "\n") # } } } # TODO - if this fails, we need to recover somehow # negative steps: if (stephalf.max != 0L && h == stephalf.max) { if (lav_verbose()) { cat(" -- step halving failed; function value may increase.\n") } # forcing step with alpha = 1 new.x <- old.x + (1 * step) } rms.x <- sqrt(mean((old.x - new.x) * (old.x - new.x))) # verbose? if (lav_verbose()) { cat("iteration = ", sprintf("%2d", iter), ": objective = ", sprintf("%11.9f", new.obj), " alpha = ", sprintf("%6.5f", alpha), " rms.x = ", sprintf("%9.9f", rms.x), "\n", sep = "" ) # print(new.x) } # check for convergence if (rms.x < tol.x) { old.x <- new.x old.obj <- new.obj if (lav_verbose()) { cat("Gauss-Newton algorithm converged: rms.x = ", sprintf("%12.12f", rms.x), " < ", sprintf("%12.12f", tol.x), "\n", sep = "" ) } break } else { old.x <- new.x old.obj <- new.obj } } # iter x <- new.x # one last evaluation, to get fx.group attribute lavmodel <- lav_model_set_parameters(lavmodel = lavmodel, x = x) fx <- lav_model_objective( lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats ) # add attributes if (iter < iter.max) { attr(x, "converged") <- TRUE attr(x, "warn.txt") <- "" } else { attr(x, "converged") <- FALSE attr(x, "warn.txt") <- paste("maxmimum number of iterations (", iter.max, ") ", "was reached without convergence.\n", sep = "" ) } attr(x, "iterations") <- iter attr(x, "control") <- list( iter.max = iter.max, tol.x = tol.x ) attr(x, "fx") <- fx x } lavaan/R/lav_test_Wald.R0000644000176200001440000000526014627656441014652 0ustar liggesusers# classic Wald test # # NOTE: does not handle redundant constraints yet! # lavTestWald <- function(object, constraints = NULL, verbose = FALSE) { if (!missing(verbose)) { current.verbose <- lav_verbose() if (lav_verbose(verbose)) on.exit(lav_verbose(current.verbose), TRUE) } if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_stop(gettext("model did not converge")) } if (is.null(constraints) || all(nchar(constraints) == 0L)) { lav_msg_stop(gettext("constraints are empty")) } # extract slots lavoptions <- object@Options lavmodel <- object@Model lavpartable <- data.frame(object@ParTable) # remove == constraints from parTable eq.idx <- which(lavpartable$op == "==") if (length(eq.idx) > 0L) { lavpartable <- lavpartable[-eq.idx, ] } partable <- as.list(lavpartable) # parse constraints FLAT <- lavParseModelString(constraints, parser = lavoptions$parser) CON <- attr(FLAT, "constraints") LIST <- list() if (length(CON) > 0L) { lhs <- unlist(lapply(CON, "[[", "lhs")) op <- unlist(lapply(CON, "[[", "op")) rhs <- unlist(lapply(CON, "[[", "rhs")) LIST$lhs <- c(LIST$lhs, lhs) LIST$op <- c(LIST$op, op) LIST$rhs <- c(LIST$rhs, rhs) } else { lav_msg_stop(gettext( "no equality constraints found in constraints argument")) } # theta = free parameters only theta <- lav_model_get_parameters(lavmodel) # build constraint function ceq.function <- lav_partable_constraints_ceq( partable = partable, con = LIST, debug = FALSE ) # compute jacobian restrictions JAC <- try(lav_func_jacobian_complex(func = ceq.function, x = theta), silent = TRUE ) if (inherits(JAC, "try-error")) { # eg. pnorm() JAC <- lav_func_jacobian_simple(func = ceq.function, x = theta) } if (lav_verbose()) { cat("Restriction matrix (jacobian):\n") print(JAC) cat("\n") } # linear restriction theta.r <- ceq.function(theta) if (lav_verbose()) { cat("Restricted theta values:\n") print(theta.r) cat("\n") } # get VCOV # VCOV <- vcov(object, labels = FALSE) # avoid S4 dispatch VCOV <- lav_object_inspect_vcov(object, standardized = FALSE, free.only = TRUE, add.labels = FALSE, add.class = FALSE, remove.duplicated = FALSE ) # restricted vcov VCOV.r <- JAC %*% VCOV %*% t(JAC) # fixme: what if VCOV.r is singular? # Wald test statistic Wald <- as.numeric(t(theta.r) %*% solve(VCOV.r) %*% theta.r) # df Wald.df <- nrow(JAC) # p-value based on chisq Wald.pvalue <- 1 - pchisq(Wald, df = Wald.df) # prepare output out <- list( stat = Wald, df = Wald.df, p.value = Wald.pvalue, se = lavoptions$se ) out } lavaan/R/lav_partable_random.R0000644000176200001440000001541014627656441016054 0ustar liggesusers# create random starting values starting from a parameter table # - using the lower/upper bounds and runif() for factor loadings # and variances # - using runif(,-1,+1) for correlations; rescale using variances # - check if Sigma.hat is PD; if not, try again # # YR 26 Feb 2024 lav_partable_random <- function(lavpartable = NULL, # needed if we still need to compute bounds: lavh1 = NULL, lavdata = NULL, lavsamplestats = NULL, lavoptions = NULL) { # check if we have bounds; if not, add them lavpta <- lav_partable_attributes(lavpartable) if (is.null(lavpartable$lower) || is.null(lavpartable$upper)) { lavoptions2 <- lavoptions lavoptions2$bounds <- "standard" lavoptions2$optim.bounds <- list( lower = c( "ov.var", "lv.var", "loadings", "covariances" ), upper = c( "ov.var", "lv.var", "loadings", "covariances" ), lower.factor = c(1.0, 1.0, 1.0, 0.999), upper.factor = c(1.0, 1.0, 1.0, 0.999), min.reliability.marker = 0.1, min.var.lv.endo = 0.005 ) lavpartable <- lav_partable_add_bounds( partable = lavpartable, lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions2 ) } # replace -Inf/Inf by -1/1 * .Machine$double.eps (for runif) inf.idx <- which(lavpartable$lower < -1e+16) if (length(inf.idx) > 0L) { lavpartable$lower[inf.idx] <- -1e+16 } inf.idx <- which(lavpartable$upper > 1e+16) if (length(inf.idx) > 0L) { lavpartable$upper[inf.idx] <- 1e+16 } # empty lavpartable$start? if (is.null(lavpartable$start)) { START <- numeric(length(lavpartable$lhs)) # set loadings to 0.7 loadings.idx <- which(lavpartable$free > 0L & lavpartable$op == "=~") if (length(loadings.idx) > 0L) { START[loadings.idx] <- 0.7 } # set (only) variances to 1 var.idx <- which(lavpartable$free > 0L & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L) { START[var.idx] <- 1 } lavpartable$start <- START } # initial values START <- lavpartable$start nblocks <- lav_partable_nblocks(lavpartable) block.values <- lav_partable_block_values(lavpartable) for (b in 1:nblocks) { ov.names <- lavpta$vnames$ov[[b]] lv.names <- lavpta$vnames$lv[[b]] ov.ind.names <- lavpta$vnames$ov.ind[[b]] # start with the lv (residual) variances lv.var.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs %in% lv.names & lavpartable$rhs %in% lv.names & lavpartable$lhs == lavpartable$rhs) if (length(lv.var.idx) > 0L) { for (i in lv.var.idx) { if (lavpartable$free[i] > 0L && (lavpartable$lower[i] < lavpartable$upper[i])) { START[i] <- runif( n = 1L, min = lavpartable$lower[i], max = lavpartable$upper[i] ) } } } # first, we generate lv correlations, and then rescale to covariances lv.cov.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs %in% lv.names & lavpartable$rhs %in% lv.names & lavpartable$lhs != lavpartable$rhs) if (length(lv.cov.idx) > 0L) { for (i in lv.cov.idx) { if (lavpartable$free[i] > 0L && (lavpartable$lower[i] < lavpartable$upper[i])) { cor.val <- runif(n = 1L, -0.5, +0.5) var1.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$lhs[i] & lavpartable$lhs == lavpartable$rhs) var2.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs[i] & lavpartable$lhs == lavpartable$rhs) START[i] <- cor.val * sqrt(START[var1.idx]) * sqrt(START[var2.idx]) } } } # next, (residual) ov variances ov.var.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names & lavpartable$rhs %in% ov.names & lavpartable$lhs == lavpartable$rhs) if (length(ov.var.idx) > 0L) { for (i in ov.var.idx) { if (lavpartable$free[i] > 0L && (lavpartable$lower[i] < lavpartable$upper[i])) { START[i] <- runif( n = 1L, min = lavpartable$lower[i], max = lavpartable$upper[i] ) } } } # (residual) ov covariances (if any) ov.cov.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names & lavpartable$rhs %in% ov.names & lavpartable$lhs != lavpartable$rhs) if (length(ov.cov.idx) > 0L) { for (i in ov.cov.idx) { if (lavpartable$free[i] > 0L && (lavpartable$lower[i] < lavpartable$upper[i])) { cor.val <- runif(n = 1L, -0.5, +0.5) var1.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$lhs[i] & lavpartable$lhs == lavpartable$rhs) var2.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs[i] & lavpartable$lhs == lavpartable$rhs) START[i] <- cor.val * sqrt(START[var1.idx]) * sqrt(START[var2.idx]) } } } # finally, the lambda values, keeping in mind that # lambda_p^(u) = sqrt( upper(res.var.indicators_p) / # lower(var.factor) ) lambda.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "=~" & lavpartable$lhs %in% lv.names & lavpartable$rhs %in% ov.ind.names) if (length(lambda.idx)) { for (i in lambda.idx) { if (lavpartable$free[i] > 0L && (lavpartable$lower[i] < lavpartable$upper[i])) { varov.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs[i] & lavpartable$lhs == lavpartable$rhs) varlv.idx <- which(lavpartable$block == block.values[b] & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$lhs[i] & lavpartable$lhs == lavpartable$rhs) lambda.u <- sqrt(START[varov.idx] / START[varlv.idx]) START[i] <- runif(n = 1, -lambda.u, lambda.u) } } } } # sanity check; needed? current.warn <- lav_warn() if (lav_warn(TRUE)) on.exit(lav_warn(current.warn), TRUE) START <- lav_start_check_cov( lavpartable = lavpartable, start = START ) START } lavaan/R/lav_test_diff.R0000644000176200001440000004435514627656441014703 0ustar liggesusers# various ways to compute a (scaled) difference chi-square test statistic # - 0.6-13: fix multiple-group UG^2 bug in Satorra.2000 (reported by # Gronneberg, Foldnes and Moss) when Satterthwaite = TRUE and # ngroups > 1L (use old.approach = TRUE to get the old result) lav_test_diff_Satorra2000 <- function(m1, m0, H1 = TRUE, A.method = "delta", A = NULL, Satterthwaite = FALSE, scaled.shifted = FALSE, old.approach = FALSE) { if (scaled.shifted) { Satterthwaite <- TRUE } # extract information from m1 and m2 T1 <- m1@test[[1]]$stat r1 <- m1@test[[1]]$df T0 <- m0@test[[1]]$stat r0 <- m0@test[[1]]$df # m = difference between the df's m <- r0 - r1 # check for identical df setting if (m == 0L) { return(list( T.delta = (T0 - T1), scaling.factor = as.numeric(NA), df.delta = m, a = as.numeric(NA), b = as.numeric(NA) )) } # bail out here, if m == 0 (but we should catch this earlier) # if(m < 1L) { # txt <- paste("Can not compute (scaled) difference test when ", # "the degrees of freedom (df) are the same for both ", # "models:\n", # "Df model 1 = ", r1, ", and Df model 2 = ", r0, "\n", # sep = "") # stop(lav_txt2message(txt, header = "lavaan ERROR:")) # } Gamma <- lavTech(m1, "Gamma") # the same for m1 and m0 # check for NULL if (is.null(Gamma)) { lav_msg_stop(gettext( "can not compute Gamma matrix; perhaps missing = \"ml\"?")) } if (H1) { WLS.V <- lavTech(m1, "WLS.V") PI <- computeDelta(m1@Model) P <- lavTech(m1, "information") # needed? (yes, if H1 already has eq constraints) P.inv <- lav_model_information_augment_invert(m1@Model, information = P, inverted = TRUE ) # compute 'A' matrix # NOTE: order of parameters may change between H1 and H0, so be # careful! if (is.null(A)) { A <- lav_test_diff_A(m1, m0, method = A.method, reference = "H1") # take into account equality constraints m1 if (A.method == "delta") { if (m1@Model@eq.constraints) { A <- A %*% t(m1@Model@eq.constraints.K) } else if (.hasSlot(m1@Model, "ceq.simple.only") && m1@Model@ceq.simple.only) { A <- A %*% t(m1@Model@ceq.simple.K) } } if (lav_debug()) print(A) } } else { lav_msg_stop(gettext("not ready yet")) WLS.V <- lavTech(m0, "WLS.V") PI <- computeDelta(m0@Model) P <- lavTech(m0, "information") # needed? P.inv <- lav_model_information_augment_invert(m0@Model, information = P, inverted = TRUE ) # compute 'A' matrix # NOTE: order of parameters may change between H1 and H0, so be # careful! if (is.null(A)) { # m1, m0 OR m0, m1 (works for delta, but not for exact) A <- lav_test_diff_A(m1, m0, method = A.method, reference = "H0") # take into account equality constraints m1 if (m0@Model@eq.constraints) { A <- A %*% t(m0@Model@eq.constraints.K) } else if (.hasSlot(m0@Model, "ceq.simple.only") && m0@Model@ceq.simple.only) { A <- A %*% t(m0@Model@ceq.simple.K) } if (lav_debug()) print(A) } } # compute tr UG per group ngroups <- m1@SampleStats@ngroups UG.group <- vector("list", length = ngroups) # safety check: A %*% P.inv %*% t(A) should NOT contain all-zero # rows/columns # FIXME: is this really needed? As we use ginv later on APA <- A %*% P.inv %*% t(A) cSums <- colSums(APA) rSums <- rowSums(APA) empty.idx <- which(abs(cSums) < .Machine$double.eps^0.5 & abs(rSums) < .Machine$double.eps^0.5) if (length(empty.idx) > 0) { A <- A[-empty.idx, , drop = FALSE] } # PAAPAAP PAAPAAP <- P.inv %*% t(A) %*% MASS::ginv(A %*% P.inv %*% t(A)) %*% A %*% P.inv # compute scaling factor fg <- unlist(m1@SampleStats@nobs) / m1@SampleStats@ntotal # this is what we did <0.6-13 if (old.approach) { trace.UGamma <- numeric(ngroups) trace.UGamma2 <- numeric(ngroups) for (g in 1:ngroups) { UG.group <- WLS.V[[g]] %*% Gamma[[g]] %*% WLS.V[[g]] %*% PI[[g]] %*% PAAPAAP %*% t(PI[[g]]) trace.UGamma[g] <- sum(diag(UG.group)) if (Satterthwaite) { trace.UGamma2[g] <- sum(diag(UG.group %*% UG.group)) } } trace.UGamma <- sum(fg * trace.UGamma) if (Satterthwaite) { trace.UGamma2 <- sum(fg * trace.UGamma2) } } else { # for trace.UGamma, we can compute the trace per group # as in Satorra (2000) eq. 23 trace.UGamma <- numeric(ngroups) for (g in 1:ngroups) { UG.group <- WLS.V[[g]] %*% Gamma[[g]] %*% WLS.V[[g]] %*% PI[[g]] %*% PAAPAAP %*% t(PI[[g]]) trace.UGamma[g] <- sum(diag(UG.group)) } trace.UGamma <- sum(fg * trace.UGamma) # but for trace.UGamma2, we can no longer compute the trace per group trace.UGamma2 <- as.numeric(NA) if (Satterthwaite) { # global approach (not group-specific) Gamma.f <- Gamma for (g in seq_along(Gamma)) { Gamma.f[[g]] <- fg[g] * Gamma[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) V.all <- lav_matrix_bdiag(WLS.V) PI.all <- do.call(rbind, PI) U.all <- V.all %*% PI.all %*% PAAPAAP %*% t(PI.all) %*% V.all UG.all <- U.all %*% Gamma.all UG.all2 <- UG.all %*% UG.all trace.UGamma2 <- sum(diag(UG.all2)) } } if (Satterthwaite && !scaled.shifted) { cd <- trace.UGamma2 / trace.UGamma df.delta <- trace.UGamma^2 / trace.UGamma2 T.delta <- (T0 - T1) / cd a <- as.numeric(NA) b <- as.numeric(NA) } else if (Satterthwaite && scaled.shifted) { a <- sqrt(m / trace.UGamma2) # b <- m - sqrt(m * trace.UGamma^2 / trace.UGamma2) b <- m - a * trace.UGamma df.delta <- m T.delta <- (T0 - T1) * a + b cd <- as.numeric(NA) } else { cd <- 1 / m * trace.UGamma df.delta <- m T.delta <- (T0 - T1) / cd a <- as.numeric(NA) b <- as.numeric(NA) } list( T.delta = T.delta, scaling.factor = cd, df.delta = df.delta, trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, a = a, b = b ) } lav_test_diff_SatorraBentler2001 <- function(m1, m0, test = 2) { # extract information from m1 and m2 T1 <- m1@test[[1]]$stat r1 <- m1@test[[1]]$df c1 <- m1@test[[test]]$scaling.factor if (r1 == 0) { # saturated model c1 <- 1 } T0 <- m0@test[[1]]$stat r0 <- m0@test[[1]]$df c0 <- m0@test[[test]]$scaling.factor # m = difference between the df's m <- r0 - r1 # check for identical df setting if (m == 0L) { return(list( T.delta = (T0 - T1), scaling.factor = as.numeric(NA), df.delta = m )) } # compute c_d cd <- (r0 * c0 - r1 * c1) / m # warn if cd is negative if (cd < 0) { lav_msg_warn(gettext("scaling factor is negative")) cd <- as.numeric(NA) } # compute scaled difference test T.delta <- (T0 - T1) / cd list(T.delta = T.delta, scaling.factor = cd, df.delta = m) } lav_test_diff_SatorraBentler2010 <- function(m1, m0, test = 2, H1 = FALSE) { ### FIXME: check if models are nested at the parameter level!!! # extract information from m1 and m2 T1 <- m1@test[[1]]$stat r1 <- m1@test[[1]]$df c1 <- m1@test[[test]]$scaling.factor if (r1 == 0) { # saturated model c1 <- 1 } T0 <- m0@test[[1]]$stat r0 <- m0@test[[1]]$df c0 <- m0@test[[test]]$scaling.factor if (r0 == 0) { # should never happen c0 <- 1 } # m = difference between the df's m <- r0 - r1 # check for identical df setting if (m == 0L) { return(list( T.delta = (T0 - T1), scaling.factor = as.numeric(NA), df.delta = m )) } # generate `M10' model if (H1) { # M0 with M1 parameters M01 <- lav_test_diff_m10(m0, m1, test = TRUE) c01 <- M01@test[[test]]$scaling.factor # check if vcov is positive definite (new in 0.6) # if not, we may get negative values eigvals <- eigen(lavTech(M01, "information"), symmetric = TRUE, only.values = TRUE )$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettext( "information matrix of the M01 model is not positive definite." )) # " As a result, the scale-factor can not be computed.") # cd <- as.numeric(NA) } # else { # compute c_d # cd.01 <- (r0 * c01 - r1 * c0) / m ??? cd <- (r0 * c0 - r1 * c01) / m # } } else { # M1 with M0 parameters (as in Satorra & Bentler 2010) M10 <- lav_test_diff_m10(m1, m0, test = TRUE) c10 <- M10@test[[test]]$scaling.factor # check if vcov is positive definite (new in 0.6) # if not, we may get negative values eigvals <- eigen(lavTech(M10, "information"), symmetric = TRUE, only.values = TRUE )$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettext( "information matrix of the M10 model is not positive definite." )) # " As a result, the scale-factor can not be computed.") # cd <- as.numeric(NA) } # else { # compute c_d cd <- (r0 * c0 - r1 * c10) / m # } } # compute scaled difference test T.delta <- (T0 - T1) / cd list( T.delta = T.delta, scaling.factor = cd, df.delta = m, T.delta.unscaled = (T0 - T1) ) } # create a new model 'm10', where we use model 'm1', but we # inject it with the values of 'm0' lav_test_diff_m10 <- function(m1, m0, test = FALSE) { # switch of verbose/se/test Options <- m1@Options # switch of optim.gradient check Options$check.gradient <- FALSE # should we compute se/test statistics? if (!test) { Options$se <- "none" Options$test <- "none" } PT.M0 <- lav_partable_set_cache(m0@ParTable, m0@pta) PT.M1 <- lav_partable_set_cache(m1@ParTable, m1@pta) # `extend' PT.M1 partable to include all `fixed-to-zero parameters' PT.M1.FULL <- lav_partable_full( partable = PT.M1, free = TRUE, start = TRUE ) PT.M1.extended <- lav_partable_merge(PT.M1, PT.M1.FULL, remove.duplicated = TRUE, warn = FALSE ) # remove most columns PT.M1.extended$start <- NULL # new in 0.6-4! (otherwise, they are used) PT.M1.extended$est <- NULL PT.M1.extended$se <- NULL # in addition, use 'NA' for free parameters in ustart column free.par.idx <- which(PT.M1.extended$free > 0L) PT.M1.extended$ustart[free.par.idx] <- as.numeric(NA) # `extend' PT.M0 partable to include all `fixed-to-zero parameters' PT.M0.FULL <- lav_partable_full( partable = PT.M0, free = TRUE, start = TRUE ) PT.M0.extended <- lav_partable_merge(PT.M0, PT.M0.FULL, remove.duplicated = TRUE, warn = FALSE ) # remove most columns, but not 'est' PT.M0.extended$ustart <- NULL PT.M0.extended$start <- NULL PT.M0.extended$se <- NULL # FIXME: # - check if H0 does not contain additional parameters... Options$optim.method <- "none" Options$optim.force.converged <- TRUE Options$baseline <- FALSE Options$h1 <- TRUE # needed after all (yuan.benter.mplus) Options$start <- PT.M0.extended # new in 0.6! m10 <- lavaan( model = PT.M1.extended, slotOptions = Options, slotSampleStats = m1@SampleStats, slotData = m1@Data, slotCache = m1@Cache, verbose = FALSE ) m10 } # compute the `A' matrix: the jacobian of the constraint function a(\delta) # (see Satorra 2000) # # # lav_test_diff_A <- function(m1, m0, method = "delta", reference = "H1") { # FIXME!!!! if (method == "exact") { if (reference == "H1") { af <- lav_test_diff_af_h1(m1 = m1, m0 = m0) xx <- m1@optim$x } else { # evaluate under H0 lav_msg_stop(gettext("not ready yet")) # af <- .test_compute_partable_A_diff_h0(m1 = m1, m0 = m0) xx <- m0@optim$x } A <- try(lav_func_jacobian_complex(func = af, x = xx), silent = TRUE) if (inherits(A, "try-error")) { A <- lav_func_jacobian_simple(func = af, x = xx) } } else if (method == "delta") { # use a numeric approximation of `A' Delta1.list <- computeDelta(m1@Model) Delta0.list <- computeDelta(m0@Model) Delta1 <- do.call(rbind, Delta1.list) Delta0 <- do.call(rbind, Delta0.list) # take into account equality constraints m0 if (m0@Model@eq.constraints) { Delta0 <- Delta0 %*% m0@Model@eq.constraints.K } else if (.hasSlot(m0@Model, "ceq.simple.only") && m0@Model@ceq.simple.only) { Delta0 <- Delta0 %*% t(m0@Model@ceq.simple.K) } # take into account equality constraints m1 if (m1@Model@eq.constraints) { Delta1 <- Delta1 %*% m1@Model@eq.constraints.K } else if (.hasSlot(m1@Model, "ceq.simple.only") && m1@Model@ceq.simple.only) { Delta1 <- Delta1 %*% t(m1@Model@ceq.simple.K) } # H <- solve(t(Delta1) %*% Delta1) %*% t(Delta1) %*% Delta0 H <- MASS::ginv(Delta1) %*% Delta0 A <- t(lav_matrix_orthogonal_complement(H)) } A } # for each parameter in H1 (m1), see if we have somehow constrained # this parameter under H0 (m0) # # since we work 'under H0', we need to use the labels/constraints/def # as they appear in H0. Unfortunately, the order of the parameters, and # even the (p)labels may be different in the two models... # # Therefore, we will attempt to: # - change the 'order' of the 'free' column in m0, so that they map to # to the 'x' that we will provide from H1 # - the plabels used in "==" constraints must be renamed, if necessary # lav_test_diff_af_h1 <- function(m1, m0) { PT.M0 <- lav_partable_set_cache(parTable(m0), m0@pta) PT.M1 <- lav_partable_set_cache(parTable(m1), m1@pta) # select .p*. parameters only M0.p.idx <- which(grepl("\\.p", PT.M0$plabel)) np0 <- length(M0.p.idx) M1.p.idx <- which(grepl("\\.p", PT.M1$plabel)) np1 <- length(M1.p.idx) # check if parameter space is the same if (np0 != np1) { lav_msg_stop(gettext( "unconstrained parameter set is not the same in m0 and m1")) } # split partable in 'parameter' and 'constraints' section PT.M0.part1 <- PT.M0[M0.p.idx, ] PT.M0.part2 <- PT.M0[-M0.p.idx, ] PT.M1.part1 <- PT.M1[M1.p.idx, ] PT.M1.part2 <- PT.M1[-M1.p.idx, ] # figure out relationship between m0 and m1 p1.id <- lav_partable_map_id_p1_in_p2(PT.M0.part1, PT.M1.part1) p0.free.idx <- which(PT.M0.part1$free > 0) # change 'free' order in m0 # NOTE: this only works all the free parameters in h0 are also free # in h1 (and if not, they will become fixed in h0) PT.M0.part1$free[p0.free.idx] <- PT.M1.part1$free[PT.M0.part1$id[p1.id][p0.free.idx]] # paste back PT.M0 <- rbind(PT.M0.part1, PT.M0.part2) PT.M1 <- rbind(PT.M1.part1, PT.M1.part2) # `extend' PT.M1 partable to include all `fixed-to-zero parameters' PT.M1.FULL <- lav_partable_full( partable = PT.M1, free = TRUE, start = TRUE ) PT.M1.extended <- lav_partable_merge(PT.M1, PT.M1.FULL, remove.duplicated = TRUE, warn = FALSE ) # `extend' PT.M0 partable to include all `fixed-to-zero parameters' PT.M0.FULL <- lav_partable_full( partable = PT.M0, free = TRUE, start = TRUE ) PT.M0.extended <- lav_partable_merge(PT.M0, PT.M0.FULL, remove.duplicated = TRUE, warn = FALSE ) p1 <- PT.M1.extended np1 <- length(p1$lhs) p0 <- PT.M0.extended np0 <- length(p0$lhs) con.function <- function() NULL formals(con.function) <- alist(.x. = , ... = ) BODY.txt <- paste("{\nout <- numeric(0L)\n", sep = "") # first handle def + == constraints # but FIRST, remove == constraints that also appear in H1!!! # remove equivalent eq constraints from p0 P0 <- p0 p0.eq.idx <- which(p0$op == "==") p1.eq.idx <- which(p1$op == "==") p0.remove.idx <- integer(0L) if (length(p0.eq.idx) > 0L) { for (i in seq_along(p0.eq.idx)) { # e0 in p0 e0 <- p0.eq.idx[i] lhs <- p0$lhs[e0] rhs <- p0$rhs[e0] # do we have an equivalent constraint in H1? # NOTE!! the (p)labels may differ # SO, we will use an 'empirical' approach: if we fill in (random) # values, and work out the constraint, do we get identical values? # if yes, constraint is equivalent, and we should NOT add it here if (length(p1.eq.idx) > 0) { # generate random parameter values xx1 <- rnorm(length(M1.p.idx)) xx0 <- xx1[p1.id] con.h0.value <- m0@Model@ceq.function(xx0)[i] con.h1.values <- m1@Model@ceq.function(xx1) if (con.h0.value %in% con.h1.values) { p0.remove.idx <- c(p0.remove.idx, e0) } } } } if (length(p0.remove.idx) > 0L) { P0 <- P0[-p0.remove.idx, ] } # only for the UNIQUE equality constraints in H0, generate syntax DEFCON.txt <- lav_partable_constraints_ceq(P0, txtOnly = TRUE) BODY.txt <- paste(BODY.txt, DEFCON.txt, "\n", sep = "") # for each parameter in p1, we 'check' is it is fixed to a constant in p0 ncon <- length(which(P0$op == "==")) for (i in seq_len(np1)) { # p in p1 lhs <- p1$lhs[i] op <- p1$op[i] rhs <- p1$rhs[i] group <- p1$group[i] # ignore '==', '<', '>' and ':=' for now if (op == "==" || op == ">" || op == "<" || op == ":=") next # search for corresponding parameter in p0 p0.idx <- which(p0$lhs == lhs & p0$op == op & p0$rhs == rhs & p0$group == group) if (length(p0.idx) == 0L) { lav_msg_stop( gettextf("parameter in H1 not found in H0: %s", paste(lhs, op, rhs, "(group = ", group, ")", sep = " ") )) } # 4 possibilities: p is free/fixed in p1, p is free/fixed in p0 if (p1$free[i] == 0L) { if (p0$free[p0.idx] == 0L) { # match, nothing to do } else { lav_msg_warn( gettextf("fixed parameter in H1 is free in H0: %s", paste("\"", lhs, " ", op, " ", rhs, "\" (group = ", group, ")", sep = "" ) )) } } else { if (p0$free[p0.idx] == 0L) { # match, this is a contrained parameter in H0 ncon <- ncon + 1L BODY.txt <- paste(BODY.txt, "out[", ncon, "] = .x.[", p1$free[i], "] - ", p0$ustart[p0.idx], "\n", sep = "" ) next } else { # match, nothing to do } } } # wrap function BODY.txt <- paste(BODY.txt, "return(out)\n}\n", sep = "") body(con.function) <- parse(file = "", text = BODY.txt) con.function } lavaan/R/lav_sam_step2_se.R0000644000176200001440000001156014627656441015310 0ustar liggesusers# compute two-step standard errors for SAM models lav_sam_step2_se <- function(FIT = NULL, JOINT = NULL, STEP1 = NULL, STEP2 = NULL, local.options = list()) { # current approach for se = "twostep": # - create 'global' model, only to get the 'joint' information matrix # - partition information matrix (step 1, step 2) # - apply two-step correction for second step # - 'insert' these corrected SEs (and vcov) in JOINT out <- list() Sigma.11 <- STEP1$Sigma.11 step1.free.idx <- STEP1$step1.free.idx step2.free.idx <- STEP2$step2.free.idx lavoptions <- FIT@Options nlevels <- FIT@pta$nlevels FIT.PA <- STEP2$FIT.PA extra.id <- STEP2$extra.id # catch empty step2.free.idx if (length(step2.free.idx) == 0L) { # no (free) structural parameters at all! out <- list( V1 = matrix(0, 0, 0), V2 = matrix(0, 0, 0), VCOV = matrix(0, 0, 0) ) return(out) } if (!lavoptions$se %in% c("none", "standard", "naive", "twostep", "twostep2")) { lav_msg_warn(gettext( "unknown se= argument: \"%s\". Switching to twostep.", lavoptions$se )) } if (lavoptions$se == "none") { return(out) } if (lav_verbose()) { cat("Computing ", lavoptions$se, " standard errors ... ", sep = "") } INFO <- lavInspect(JOINT, "information") I.12 <- INFO[step1.free.idx, step2.free.idx] I.22 <- INFO[step2.free.idx, step2.free.idx] I.21 <- INFO[step2.free.idx, step1.free.idx] # V2 if (nlevels > 1L) { # FIXME: not ok for multigroup multilevel N <- FIT@Data@Lp[[1]]$nclusters[[2]] # first group only } else { N <- nobs(FIT) } # do we have 'extra' free parameter in FIT.PA that are not free in JOINT? step2.rm.idx <- integer(0L) if (length(extra.id) > 0L) { id.idx <- which(FIT.PA@ParTable$id %in% extra.id & FIT.PA@ParTable$free > 0L) step2.rm.idx <- FIT.PA@ParTable$free[id.idx] } # invert augmented information, for I.22 block only # new in 0.6-16 (otherwise, eq constraints in struc part are ignored) if (lavoptions$se != "naive") { I.22.inv <- lav_model_information_augment_invert( lavmodel = FIT.PA@Model, information = I.22, inverted = TRUE, rm.idx = step2.rm.idx ) if (inherits(I.22.inv, "try-error")) { # hm, not good if (lavoptions$se != "naive") { lav_msg_warn(gettext( "problem inverting information matrix (I.22); -> switching to naive standard errors!" )) lavoptions$se <- "naive" } } } # se is not "naive", but based on I.22 # method below has the advantage that we can use a 'robust' vcov # for the joint model; # but does not work if we have equality constraints in the MM! # -> D will be singular # A <- JOINT@vcov$vcov[ step2.free.idx, step2.free.idx] # B <- JOINT@vcov$vcov[ step2.free.idx, -step2.free.idx] # C <- JOINT@vcov$vcov[-step2.free.idx, step2.free.idx] # D <- JOINT@vcov$vcov[-step2.free.idx, -step2.free.idx] # I.22.inv <- A - B %*% solve(D) %*% C # se = "standard" if (lavoptions$se == "standard") { VCOV <- 1 / N * I.22.inv out$VCOV <- VCOV # se = "naive" } else if (lavoptions$se == "naive") { if (is.null(FIT.PA@vcov$vcov)) { FIT.PA@Options$se <- "standard" VCOV.naive <- lavTech(FIT.PA, "vcov") } else { VCOV.naive <- FIT.PA@vcov$vcov } if (length(step2.rm.idx) > 0L) { VCOV.naive <- VCOV.naive[-step2.rm.idx, -step2.rm.idx] } out$VCOV <- VCOV.naive # se = "twostep" or "twostep2" } else if (lavoptions$se %in% c("twostep", "twostep2")) { V2 <- 1 / N * I.22.inv # not the same as FIT.PA@vcov$vcov!! if (lavoptions$se == "twostep" ) { V1 <- I.22.inv %*% I.21 %*% Sigma.11 %*% I.12 %*% I.22.inv } else { stop("not ready yet") # R.21 <- crossprod(J2, J1)/nrow(J2) # V1 <- I.22.inv %*% (I.21 %*% Sigma.11 %*% t(I.21) - # I.21 %*% Sigma.11 %*% t(R.21) - # R.21 %*% Sigma.11 %*% t(I.21)) %*% I.22.inv } # V for second step if (!is.null(local.options$alpha.correction) && local.options$alpha.correction > 0) { alpha.N1 <- local.options$alpha.correction / (N - 1) if (alpha.N1 > 1.0) { alpha.N1 <- 1.0 } else if (alpha.N1 < 0.0) { alpha.N1 <- 0.0 } if (is.null(FIT.PA@vcov$vcov)) { FIT.PA@Options$se <- "standard" VCOV.naive <- lavTech(FIT.PA, "vcov") } else { VCOV.naive <- FIT.PA@vcov$vcov } if (length(step2.rm.idx) > 0L) { VCOV.naive <- VCOV.naive[-step2.rm.idx, -step2.rm.idx] } VCOV.corrected <- V2 + V1 VCOV <- alpha.N1 * VCOV.naive + (1 - alpha.N1) * VCOV.corrected } else { VCOV <- V2 + V1 } # store in out out$V2 <- V2 out$V1 <- V1 out$VCOV <- VCOV } if (lav_verbose()) { cat("done.\n") } out } lavaan/R/00class.R0000644000176200001440000002317514627656441013334 0ustar liggesusers# class definitions # # initial version: YR 25/03/2009 # added ModelSyntax: YR 02/08/2010 # deleted ModelSyntax: YR 01/11/2010 (using flattened model syntax now) # ldw 20/11/2023: replace 'representation()' by 'slots=' setClass("lavData", slots = c( data.type = "character", # "full", "moment" or "none" group = "character", # group variable ngroups = "integer", # number of groups group.label = "character", # group labels block.label = "character", # block labels cluster = "character", # cluster variable(s) nlevels = "integer", # number of levels level.label = "character", # level labels std.ov = "logical", # standardize observed variables? nobs = "list", # effective number of observations norig = "list", # original number of observations ov.names = "list", # variable names (per group) ov.names.x = "list", # exo variable names (per group) ov.names.l = "list", # names per level # ov.types = "list", # variable types (per group) # ov.idx = "list", # column indices (all observed variables) ordered = "character", # ordered variables weights = "list", # sampling weights (per group) sampling.weights = "character", # sampling weights variable ov = "list", # variable table case.idx = "list", # case indices per group missing = "character", # "listwise" or not? Mp = "list", # if not complete, missing patterns # we need this here, to get nobs right! Rp = "list", # response patterns (categorical only) Lp = "list", # level patterns eXo = "list", # local copy exo only X = "list" # local copy ) ) setClass("lavSampleStats", # sample moments slots = c( var = "list", # observed variances (per group) cov = "list", # observed var/cov matrix (per group) mean = "list", # observed mean vector (per group) th = "list", # thresholds for non-numeric var (per group) th.idx = "list", # th index (0 for numeric) th.names = "list", # threshold names res.cov = "list", # residual var/cov matrix (if conditional.x) res.var = "list", # residual variances res.th = "list", # residual thresholds res.th.nox = "list", # residual thresholds ignoring x res.slopes = "list", # slopes exo (if conditional.x) res.int = "list", # intercepts (if conditional.x) mean.x = "list", # mean exo cov.x = "list", # variance/covariance exo bifreq = "list", # bivariate frequency tables group.w = "list", # group weight nobs = "list", # effective number of obs (per group) ntotal = "numeric", # total number of obs (all groups) ngroups = "integer", # number of groups x.idx = "list", # x.idx if fixed.x = TRUE icov = "list", # inverse of observed cov (per group) cov.log.det = "list", # log det of observed cov (per group) res.icov = "list", res.cov.log.det = "list", # ridge.constant = "numeric", # ridge constant (per group) # ridge.constant.x = "numeric", # ridge constant (per group) for eXo ridge = "numeric", WLS.obs = "list", # all relevant observed stats in a vector WLS.V = "list", # weight matrix for GLS/WLS WLS.VD = "list", # diagonal of weight matrix only NACOV = "list", # N times the asymptotic covariance matrix NACOV.user = "logical", # user-specified NACOV? missing.flag = "logical", # missing patterns? missing = "list", # missingness information missing.h1 = "list", # h1 model YLp = "list", # cluster/level information zero.cell.tables = "list" # bivariate tables with empty cells ) ) setClass("lavModel", # MATRIX representation of the sem model slots = c( GLIST = "list", # list of all model matrices (for all groups) dimNames = "list", # dim names for the model matrices isSymmetric = "logical", # model matrix symmetric? mmSize = "integer", # model matrix size (unique only) representation = "character", # stub, until we define more classes modprop = "list", # model properties meanstructure = "logical", correlation = "logical", categorical = "logical", multilevel = "logical", group.w.free = "logical", link = "character", nblocks = "integer", ngroups = "integer", # only for rsem!! (which uses rsem:::computeDelta) nefa = "integer", nmat = "integer", nvar = "integer", num.idx = "list", th.idx = "list", nx.free = "integer", nx.unco = "integer", nx.user = "integer", m.free.idx = "list", x.free.idx = "list", # m.unco.idx = "list", # always the same as m.free.idx x.unco.idx = "list", m.user.idx = "list", x.user.idx = "list", x.def.idx = "integer", x.ceq.idx = "integer", x.cin.idx = "integer", x.free.var.idx = "integer", ceq.simple.only = "logical", ceq.simple.K = "matrix", eq.constraints = "logical", eq.constraints.K = "matrix", eq.constraints.k0 = "numeric", def.function = "function", ceq.function = "function", ceq.jacobian = "function", ceq.JAC = "matrix", ceq.rhs = "numeric", ceq.linear.idx = "integer", ceq.nonlinear.idx = "integer", cin.function = "function", cin.jacobian = "function", cin.JAC = "matrix", cin.rhs = "numeric", cin.linear.idx = "integer", cin.nonlinear.idx = "integer", ceq.efa.JAC = "matrix", con.jac = "matrix", con.lambda = "numeric", nexo = "integer", conditional.x = "logical", fixed.x = "logical", parameterization = "character", ov.x.dummy.ov.idx = "list", ov.x.dummy.lv.idx = "list", ov.y.dummy.ov.idx = "list", ov.y.dummy.lv.idx = "list", ov.efa.idx = "list", lv.efa.idx = "list", rv.ov = "list", rv.lv = "list", H = "list", lv.order = "list", estimator = "character", estimator.args = "list" ) ) setClass("Fit", slots = c( npar = "integer", # number of free parameters # ndat = "integer", # df = "integer", x = "numeric", # x partrace = "matrix", # parameter trace start = "numeric", # starting values (only for other packages) est = "numeric", # estimated values (only for other packages) se = "numeric", # standard errors fx = "numeric", fx.group = "numeric", logl = "numeric", logl.group = "numeric", iterations = "integer", # number of iterations converged = "logical", control = "list", Sigma.hat = "list", Mu.hat = "list", TH = "list", test = "list" ) ) setClass("lavaan", slots = c( version = "character", # lavaan version call = "call", # matched call timing = "list", # timing information Options = "list", # lavOptions ParTable = "list", # parameter table user-specified model pta = "list", # parameter table attributes Data = "lavData", # full data SampleStats = "lavSampleStats", # sample statistics Model = "lavModel", # internal matrix representation Cache = "list", # housekeeping stuff Fit = "Fit", # fitted results boot = "list", # bootstrap results optim = "list", # optimizer results loglik = "list", # loglik values and info implied = "list", # model implied moments vcov = "list", # vcov test = "list", # test h1 = "list", # unrestricted model results baseline = "list", # baseline model results internal = "list", # optional slot, for internal use external = "list" # optional slot, for add-on packages ) ) setClass("lavaanList", slots = c( version = "character", # lavaan version call = "call", # matched call Options = "list", # lavOptions ParTable = "list", pta = "list", Data = "lavData", # from first dataset (ngroups!) Model = "lavModel", # based on first dataset meta = "list", timingList = "list", ParTableList = "list", DataList = "list", SampleStatsList = "list", CacheList = "list", vcovList = "list", testList = "list", optimList = "list", impliedList = "list", h1List = "list", loglikList = "list", baselineList = "list", internalList = "list", funList = "list", external = "list" # optional slot, for add-on packages ) ) lavaan/R/lav_partable_constraints.R0000644000176200001440000003576714627656441017164 0ustar liggesusers# build def function from partable lav_partable_constraints_def <- function(partable, con = NULL, debug = FALSE, txtOnly = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } # empty function def.function <- function() NULL # if 'con', merge partable + con if (!is.null(con)) { partable$lhs <- c(partable$lhs, con$lhs) partable$op <- c(partable$op, con$op) partable$rhs <- c(partable$rhs, con$rhs) } # get := definitions def.idx <- which(partable$op == ":=") # catch empty def if (length(def.idx) == 0L) { if (txtOnly) { return(character(0L)) } else { return(def.function) } } # create function formals(def.function) <- alist(.x. = , ... = ) if (txtOnly) { BODY.txt <- "" } else { BODY.txt <- paste("{\n# parameter definitions\n\n") } lhs.names <- partable$lhs[def.idx] def.labels <- all.vars(parse(file = "", text = partable$rhs[def.idx])) # remove the ones in lhs.names idx <- which(def.labels %in% lhs.names) if (length(idx) > 0L) def.labels <- def.labels[-idx] # get corresponding 'x' indices def.x.idx <- partable$free[match(def.labels, partable$label)] if (any(is.na(def.x.idx))) { lav_msg_stop(gettext( "unknown label(s) in variable definition(s):"), lav_msg_view(def.labels[which(is.na(def.x.idx))], "none") ) } if (any(def.x.idx == 0)) { lav_msg_stop(gettext( "non-free parameter(s) in variable definition(s):"), lav_msg_view(def.labels[which(def.x.idx == 0)], "none") ) } def.x.lab <- paste(".x.[", def.x.idx, "]", sep = "") # put both the labels the function BODY if (length(def.x.idx) > 0L) { BODY.txt <- paste(BODY.txt, "# parameter labels\n", paste(def.labels, " <- ", def.x.lab, collapse = "\n"), "\n", sep = "" ) } # write the definitions literally BODY.txt <- paste(BODY.txt, "\n# parameter definitions\n", sep = "") for (i in 1:length(def.idx)) { BODY.txt <- paste(BODY.txt, lhs.names[i], " <- ", partable$rhs[def.idx[i]], "\n", sep = "" ) } if (txtOnly) { return(BODY.txt) } # put the results in 'out' BODY.txt <- paste(BODY.txt, "\nout <- ", paste("c(", paste(lhs.names, collapse = ","), ")\n", sep = ""), sep = "" ) # what to do with NA values? -> return +Inf??? BODY.txt <- paste(BODY.txt, "out[is.na(out)] <- Inf\n", sep = "") BODY.txt <- paste(BODY.txt, "names(out) <- ", paste("c(\"", paste(lhs.names, collapse = "\",\""), "\")\n", sep = ""), sep = "" ) BODY.txt <- paste(BODY.txt, "return(out)\n}\n", sep = "") body(def.function) <- parse(file = "", text = BODY.txt) if (lav_debug()) { cat("def.function = \n") print(def.function) cat("\n") } def.function } # build ceq function from partable # non-trivial equality constraints (linear or nonlinear) # convert to 'ceq(x)' function where 'x' is the (free) parameter vector # and ceq(x) returns the evaluated equality constraints # # eg. if b1 + b2 == 2 (and b1 correspond to, say, x[10] and x[17]) # ceq <- function(x) { # out <- rep(NA, 1) # b1 = x[10]; b2 = x[17] # out[1] <- b1 + b2 - 2 # } lav_partable_constraints_ceq <- function(partable, con = NULL, debug = FALSE, txtOnly = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } # empty function ceq.function <- function() NULL # if 'con', merge partable + con if (!is.null(con)) { partable$lhs <- c(partable$lhs, con$lhs) partable$op <- c(partable$op, con$op) partable$rhs <- c(partable$rhs, con$rhs) } # get equality constraints eq.idx <- which(partable$op == "==") # catch empty ceq if (length(eq.idx) == 0L) { if (txtOnly) { return(character(0L)) } else { return(ceq.function) } } # create function formals(ceq.function) <- alist(.x. = , ... = ) if (txtOnly) { BODY.txt <- "" } else { BODY.txt <- paste("{\nout <- rep(NA, ", length(eq.idx), ")\n", sep = "") } # first come the variable definitions DEF.txt <- lav_partable_constraints_def(partable, txtOnly = TRUE) def.idx <- which(partable$op == ":=") BODY.txt <- paste(BODY.txt, DEF.txt, "\n", sep = "") # extract labels lhs.labels <- all.vars(parse(file = "", text = partable$lhs[eq.idx])) rhs.labels <- all.vars(parse(file = "", text = partable$rhs[eq.idx])) eq.labels <- unique(c(lhs.labels, rhs.labels)) # remove def.names from eq.labels if (length(def.idx) > 0L) { def.names <- as.character(partable$lhs[def.idx]) d.idx <- which(eq.labels %in% def.names) if (length(d.idx) > 0) eq.labels <- eq.labels[-d.idx] } eq.x.idx <- rep(as.integer(NA), length(eq.labels)) # get user-labels ids ulab.idx <- which(eq.labels %in% partable$label) if (length(ulab.idx) > 0L) { eq.x.idx[ulab.idx] <- partable$free[match( eq.labels[ulab.idx], partable$label )] } # get plabels ids plab.idx <- which(eq.labels %in% partable$plabel) if (length(plab.idx) > 0L) { eq.x.idx[plab.idx] <- partable$free[match( eq.labels[plab.idx], partable$plabel )] } # check if we have found the label if (any(is.na(eq.x.idx))) { lav_msg_stop(gettext("unknown label(s) in equality constraint(s):"), lav_msg_view(eq.labels[which(is.na(eq.x.idx))], "none") ) } # check if they are all 'free' if (any(eq.x.idx == 0)) { fixed.eq.idx <- which(eq.x.idx == 0) # FIXME: what should we do here? we used to stop with an error # from 0.5.18, we give a warning, and replace the non-free label # with its fixed value in ustart # warning("lavaan WARNING: non-free parameter(s) in equality constraint(s): ", # paste(eq.labels[fixed.eq.idx], collapse=" ")) fixed.lab.lhs <- eq.labels[fixed.eq.idx] fixed.lab.rhs <- numeric(length(fixed.lab.lhs)) for (i in 1:length(fixed.lab.lhs)) { # first try label idx <- match(fixed.lab.lhs[i], partable$label) # then try plabel if (is.na(idx)) { idx <- match(fixed.lab.lhs[i], partable$plabel) } if (is.na(idx)) { # hm, not found? fill in zero, or NA? } else { fixed.lab.rhs[i] <- partable$ustart[idx] } } BODY.txt <- paste(BODY.txt, "# non-free parameter labels\n", paste(fixed.lab.lhs, "<-", fixed.lab.rhs, collapse = "\n"), "\n", sep = "" ) eq.x.idx <- eq.x.idx[-fixed.eq.idx] eq.labels <- eq.labels[-fixed.eq.idx] } # put the labels the function BODY eq.x.lab <- paste(".x.[", eq.x.idx, "]", sep = "") if (length(eq.x.idx) > 0L) { BODY.txt <- paste(BODY.txt, "# parameter labels\n", paste(eq.labels, "<-", eq.x.lab, collapse = "\n"), "\n", sep = "" ) } # write the equality constraints literally BODY.txt <- paste(BODY.txt, "\n# equality constraints\n", sep = "") for (i in 1:length(eq.idx)) { lhs <- partable$lhs[eq.idx[i]] rhs <- partable$rhs[eq.idx[i]] if (rhs == "0") { eq.string <- lhs } else { eq.string <- paste(lhs, " - (", rhs, ")", sep = "") } BODY.txt <- paste(BODY.txt, "out[", i, "] <- ", eq.string, "\n", sep = "") } if (txtOnly) { return(BODY.txt) } # put the results in 'out' # BODY.txt <- paste(BODY.txt, "\nout <- ", # paste("c(", paste(lhs.names, collapse=","),")\n", sep=""), sep="") # what to do with NA values? -> return +Inf??? BODY.txt <- paste(BODY.txt, "\n", "out[is.na(out)] <- Inf\n", sep = "") BODY.txt <- paste(BODY.txt, "return(out)\n}\n", sep = "") body(ceq.function) <- parse(file = "", text = BODY.txt) if (lav_debug()) { cat("ceq.function = \n") print(ceq.function) cat("\n") } ceq.function } # build ciq function from partable # non-trivial inequality constraints (linear or nonlinear) # convert to 'cin(x)' function where 'x' is the (free) parameter vector # and cin(x) returns the evaluated inequality constraints # # eg. if b1 + b2 > 2 (and b1 correspond to, say, x[10] and x[17]) # cin <- function(x) { # out <- rep(NA, 1) # b1 = x[10]; b2 = x[17] # out[1] <- b1 + b2 - 2 # } # # NOTE: very similar, but not identitical to ceq, because we need to take # care of the difference between '<' and '>' lav_partable_constraints_ciq <- function(partable, con = NULL, debug = FALSE, txtOnly = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } # empty function cin.function <- function() NULL # if 'con', merge partable + con if (!is.null(con)) { partable$lhs <- c(partable$lhs, con$lhs) partable$op <- c(partable$op, con$op) partable$rhs <- c(partable$rhs, con$rhs) } # get inequality constraints ineq.idx <- which(partable$op == ">" | partable$op == "<") # catch empty ciq if (length(ineq.idx) == 0L) { if (txtOnly) { return(character(0L)) } else { return(cin.function) } } # create function formals(cin.function) <- alist(.x. = , ... = ) if (txtOnly) { BODY.txt <- "" } else { BODY.txt <- paste("{\nout <- rep(NA, ", length(ineq.idx), ")\n", sep = "") } # first come the variable definitions DEF.txt <- lav_partable_constraints_def(partable, txtOnly = TRUE) def.idx <- which(partable$op == ":=") BODY.txt <- paste(BODY.txt, DEF.txt, "\n", sep = "") # extract labels lhs.labels <- all.vars(parse(file = "", text = partable$lhs[ineq.idx])) rhs.labels <- all.vars(parse(file = "", text = partable$rhs[ineq.idx])) ineq.labels <- unique(c(lhs.labels, rhs.labels)) # remove def.names from ineq.labels if (length(def.idx) > 0L) { def.names <- as.character(partable$lhs[def.idx]) d.idx <- which(ineq.labels %in% def.names) if (length(d.idx) > 0) ineq.labels <- ineq.labels[-d.idx] } ineq.x.idx <- rep(as.integer(NA), length(ineq.labels)) # get user-labels ids ulab.idx <- which(ineq.labels %in% partable$label) if (length(ulab.idx) > 0L) { ineq.x.idx[ulab.idx] <- partable$free[match( ineq.labels[ulab.idx], partable$label )] } # get plabels ids plab.idx <- which(ineq.labels %in% partable$plabel) if (length(plab.idx) > 0L) { ineq.x.idx[plab.idx] <- partable$free[match( ineq.labels[plab.idx], partable$plabel )] } # check if we have found the label if (any(is.na(ineq.x.idx))) { lav_msg_stop(gettext("unknown label(s) in inequality constraint(s):"), lav_msg_view(ineq.labels[which(is.na(ineq.x.idx))], "none") ) } # check if they are all 'free' if (any(ineq.x.idx == 0)) { fixed.ineq.idx <- which(ineq.x.idx == 0) # FIXME: what should we do here? we used to stop with an error # from 0.5.18, we give a warning, and replace the non-free label # with its fixed value in ustart lav_msg_warn(gettext("non-free parameter(s) in inequality constraint(s):"), lav_msg_view(ineq.labels[fixed.ineq.idx],"none") ) fixed.lab.lhs <- ineq.labels[fixed.ineq.idx] fixed.lab.rhs <- partable$ustart[match(fixed.lab.lhs, partable$label)] BODY.txt <- paste(BODY.txt, "# non-free parameter labels\n", paste(fixed.lab.lhs, "<-", fixed.lab.rhs, collapse = "\n"), "\n", sep = "" ) ineq.x.idx <- ineq.x.idx[-fixed.ineq.idx] ineq.labels <- ineq.labels[-fixed.ineq.idx] } # put the labels the function BODY ineq.x.lab <- paste(".x.[", ineq.x.idx, "]", sep = "") if (length(ineq.x.idx) > 0L) { BODY.txt <- paste(BODY.txt, "# parameter labels\n", paste(ineq.labels, "<-", ineq.x.lab, collapse = "\n"), "\n", sep = "" ) } # write the constraints literally BODY.txt <- paste(BODY.txt, "\n# inequality constraints\n", sep = "") for (i in 1:length(ineq.idx)) { lhs <- partable$lhs[ineq.idx[i]] op <- partable$op[ineq.idx[i]] rhs <- partable$rhs[ineq.idx[i]] # note,this is different from ==, because we have < AND > if (rhs == "0" && op == ">") { ineq.string <- lhs } else if (rhs == "0" && op == "<") { ineq.string <- paste(rhs, " - (", lhs, ")", sep = "") } else if (rhs != "0" && op == ">") { ineq.string <- paste(lhs, " - (", rhs, ")", sep = "") } else if (rhs != "0" && op == "<") { ineq.string <- paste(rhs, " - (", lhs, ")", sep = "") } BODY.txt <- paste(BODY.txt, "out[", i, "] <- ", ineq.string, "\n", sep = "") } if (txtOnly) { return(BODY.txt) } # put the results in 'out' # BODY.txt <- paste(BODY.txt, "\nout <- ", # paste("c(", paste(lhs.names, collapse=","),")\n", sep=""), sep="") # what to do with NA values? -> return +Inf??? BODY.txt <- paste(BODY.txt, "\n", "out[is.na(out)] <- Inf\n", sep = "") BODY.txt <- paste(BODY.txt, "return(out)\n}\n", sep = "") body(cin.function) <- parse(file = "", text = BODY.txt) if (lav_debug()) { cat("cin.function = \n") print(cin.function) cat("\n") } cin.function } # return a named vector of the 'free' indices, for the labels that # are used in a constrained (or optionally a definition) # (always 0 for definitions) lav_partable_constraints_label_id <- function(partable, con = NULL, def = TRUE) { # if 'con', merge partable + con if (!is.null(con)) { partable$lhs <- c(partable$lhs, con$lhs) partable$op <- c(partable$op, con$op) partable$rhs <- c(partable$rhs, con$rhs) } # get constraints if (def) { con.idx <- which(partable$op %in% c("==", "<", ">", ":=")) } else { con.idx <- which(partable$op %in% c("==", "<", ">")) } # catch empty con if (length(con.idx) == 0L) { return(integer(0L)) } def.idx <- which(partable$op == ":=") # extract labels lhs.labels <- all.vars(parse(file = "", text = partable$lhs[con.idx])) rhs.labels <- all.vars(parse(file = "", text = partable$rhs[con.idx])) con.labels <- unique(c(lhs.labels, rhs.labels)) # remove def.names from con.labels (unless def = TRUE) if (!def && length(def.idx) > 0L) { def.names <- as.character(partable$lhs[def.idx]) d.idx <- which(con.labels %in% def.names) if (length(d.idx) > 0) { con.labels <- con.labels[-d.idx] } } con.x.idx <- rep(as.integer(NA), length(con.labels)) # get user-labels ids ulab.idx <- which(con.labels %in% partable$label) if (length(ulab.idx) > 0L) { con.x.idx[ulab.idx] <- partable$free[match( con.labels[ulab.idx], partable$label )] } # get plabels ids plab.idx <- which(con.labels %in% partable$plabel) if (length(plab.idx) > 0L) { con.x.idx[plab.idx] <- partable$free[match( con.labels[plab.idx], partable$plabel )] } # check if we have found the label if (any(is.na(con.x.idx))) { lav_msg_warn(gettext("unknown label(s) in equality constraint(s):"), lav_msg_view(con.labels[which(is.na(con.x.idx))], "none") ) } # return named integer vector names(con.x.idx) <- con.labels con.x.idx } lavaan/R/lav_fit_rmsea.R0000644000176200001440000003356714627656441014710 0ustar liggesusers# functions related to the RMSEA index of approximate fit # lower-level functions: no checking of input: just compute the number(s): # - lav_fit_rmsea # - lav_fit_rmsea_ci # - lav_fit_rmsea_closefit # - lav_fit_rmsea_notclosefit (TODO) # higher-level functions: # - lav_fit_rmsea_lavobject # Y.R. 19 July 2022 # we assume X2 = N * F.val # lambda = (X2 - df) is the non-centrality parameter # RMSEA: sqrt( (X2 - df)/(N * df) ) # = sqrt( lambda/(N * df) ) # = sqrt( ((N*F.val) - df)/(N * df) ) # = sqrt( (N.F.val)/(N * df) - df/(N * df) ) # = sqrt( F.val/df - 1/N ) # = sqrt( (X2/N)/df - 1/N ) # 'scaled' RMSEA: X2 is replaced by X2-SB (or any other 'scaled' statistic) # robust RMSEA: sqrt( (X2/N)/df - c.hat/N ) # note: # - robust RMSEA == scaled RMSEA * sqrt(c.hat) # - robust RMSEA CI == scaled RMSEA CI * sqrt(c.hat) # robust RMSEA for MLMV (ie. scaled.shifted): # - c == a * (df - b) / df # - robust RMSEA MLM == robust RMSEA MLMV # - robust RMSEA MLMV == scaled RMSEA MLMV * sqrt(a) # - robust RMSEA CI MLMV == scaled RMSEA CI MLMV * sqrt(a) # References: # Steiger, J. H., & Lind, J. C. (1980, May). Statistically based tests for the # number of common factors. Paper presented at the annual meeting of the # Psychometric Society, Iowa City, IA. # confidence interval: # Browne, M. W., & Cudeck, R. (1993). Alternative ways of assessing model fit. # In K. A. Bollen & J. S. Long (Eds.), Testing structural equation models (pp. # 136-162). Newbury Park, CA: Sage. # problems with low df # Kenny, D. A., Kaniskan, B., & McCoach, D. B. (2015). The performance of RMSEA # in models with small degrees of freedom. Sociological Methods & Research, 44, # 486-507. # robust version MLM # Patricia E. Brosseau-Liard , Victoria Savalei & Libo Li (2012) An # Investigation of the Sample Performance of Two Nonnormality Corrections for # RMSEA, Multivariate Behavioral Research, 47:6, 904-930, DOI: # 10.1080/00273171.2012.715252 # robust version MLMV (scaled.shifted) # Savalei, V. (2018). On the computation of the RMSEA and CFI from the # mean-and-variance corrected test statistic with nonnormal data in SEM. # Multivariate behavioral research, 53(3), 419-429. # categorical data: # Savalei, V. (2021). Improving fit indices in structural equation modeling with # categorical data. Multivariate Behavioral Research, 56(3), 390-407. doi: # 10.1080/00273171.2020.1717922 # missing = "fiml": # Zhang, X., & Savalei, V. (2022). New computations for RMSEA and CFI following # FIML and TS estimation with missing data. Psychological Methods. # always using N (if a user needs N-1, just replace N by N-1) # vectorized! lav_fit_rmsea <- function(X2 = NULL, df = NULL, N = NULL, F.val = NULL, G = 1L, c.hat = 1.0) { # did we get a sample size? if (missing(N) && !missing(F.val)) { # population version RMSEA <- sqrt(F.val / df) } else { nel <- length(X2) if (nel == 0) { return(as.numeric(NA)) } RMSEA <- ifelse(df > 0, # 'standard' way to compute RMSEA RMSEA <- sqrt(pmax((X2 / N) / df - c.hat / N, rep(0, nel))) * sqrt(G), 0 ) # if df == 0 } RMSEA } # note: for 'robust' version, X2 should be SB-X2 lav_fit_rmsea_ci <- function(X2 = NULL, df = NULL, N = NULL, G = 1L, c.hat = 1, level = 0.90) { if (missing(N) || missing(X2) || missing(df) || !is.finite(X2) || !is.finite(df) || !is.finite(N)) { return(list( rmsea.ci.lower = as.numeric(NA), rmsea.ci.upper = as.numeric(NA) )) } if (!is.finite(level) || level < 0 || level > 1.0) { lav_msg_warn(gettextf( "invalid level value [%s] set to default 0.90.", level)) level <- 0.90 } upper.perc <- (1 - (1 - level) / 2) lower.perc <- (1 - level) / 2 # internal function lower.lambda <- function(lambda) { (pchisq(X2, df = df, ncp = lambda) - upper.perc) } upper.lambda <- function(lambda) { (pchisq(X2, df = df, ncp = lambda) - lower.perc) } # lower bound if (df < 1 || lower.lambda(0) < 0.0) { rmsea.ci.lower <- 0 } else { lambda.l <- try(uniroot(f = lower.lambda, lower = 0, upper = X2)$root, silent = TRUE ) if (inherits(lambda.l, "try-error")) { lambda.l <- as.numeric(NA) } # lower bound rmsea.ci.lower <- sqrt((c.hat * lambda.l) / (N * df)) # multiple groups? -> correction if (G > 1L) { rmsea.ci.lower <- rmsea.ci.lower * sqrt(G) } } # upper bound N.RMSEA <- max(N, X2 * 4) if (df < 1 || upper.lambda(N.RMSEA) > 0 || upper.lambda(0) < 0) { rmsea.ci.upper <- 0 } else { lambda.u <- try( uniroot( f = upper.lambda, lower = 0, upper = N.RMSEA )$root, silent = TRUE ) if (inherits(lambda.u, "try-error")) { lambda.u <- NA } # upper bound rmsea.ci.upper <- sqrt((c.hat * lambda.u) / (N * df)) # multiple groups? -> correction if (G > 1L) { rmsea.ci.upper <- rmsea.ci.upper * sqrt(G) } } list( rmsea.ci.lower = rmsea.ci.lower, rmsea.ci.upper = rmsea.ci.upper ) } # H_0: RMSEA <= rmsea.h0 lav_fit_rmsea_closefit <- function(X2 = NULL, df = NULL, N = NULL, G = 1L, c.hat = 1, rmsea.h0 = 0.05) { if (missing(N) || missing(X2) || missing(df) || !is.finite(X2) || !is.finite(df) || !is.finite(N)) { return(as.numeric(NA)) } rmsea.pvalue <- as.numeric(NA) if (df > 0) { # see Dudgeon 2004, eq 16 for the 'G' correction ncp <- (N * df * 1 / c.hat * rmsea.h0^2) / G rmsea.pvalue <- 1 - pchisq(X2, df = df, ncp = ncp) } rmsea.pvalue } # MacCallum, R. C., Browne, M. W., & Sugawara, H. M. (1996). # H_0: RMSEA >= rmsea.h0 lav_fit_rmsea_notclosefit <- function(X2 = NULL, df = NULL, N = NULL, G = 1L, c.hat = 1, rmsea.h0 = 0.05) { if (missing(N) || missing(X2) || missing(df) || !is.finite(X2) || !is.finite(df) || !is.finite(N)) { return(as.numeric(NA)) } rmsea.pvalue <- as.numeric(NA) if (df > 0) { # see Dudgeon 2004, eq 16 for the 'G' correction ncp <- (N * df * 1 / c.hat * rmsea.h0^2) / G rmsea.pvalue <- pchisq(X2, df = df, ncp = ncp) } rmsea.pvalue } lav_fit_rmsea_lavobject <- function(lavobject = NULL, fit.measures = "rmsea", standard.test = "standard", scaled.test = "none", ci.level = 0.90, close.h0 = 0.05, notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE) { # check lavobject stopifnot(inherits(lavobject, "lavaan")) # check for categorical categorical.flag <- lavobject@Model@categorical # tests TEST <- lavobject@test test.names <- sapply(lavobject@test, "[[", "test") if (test.names[1] == "none" || standard.test == "none") { return(list()) } test.idx <- which(test.names == standard.test)[1] if (length(test.idx) == 0L) { return(list()) } scaled.flag <- FALSE if (!scaled.test %in% c("none", "standard", "default")) { scaled.idx <- which(test.names == scaled.test) if (length(scaled.idx) > 0L) { scaled.idx <- scaled.idx[1] # only the first one scaled.flag <- TRUE } } # robust? robust.flag <- FALSE if (robust && scaled.flag && scaled.test %in% c( "satorra.bentler", "yuan.bentler.mplus", "yuan.bentler", "scaled.shifted" )) { robust.flag <- TRUE } # FIML? fiml.flag <- FALSE if (robust && lavobject@Options$missing %in% c("ml", "ml.x")) { fiml.flag <- robust.flag <- TRUE # check if we can compute corrected values if (scaled.flag) { version <- "V3" } else { version <- "V6" } fiml <- try(lav_fit_fiml_corrected(lavobject, version = version), silent = TRUE ) if (inherits(fiml, "try-error")) { lav_msg_warn(gettext("computation of robust RMSEA failed.")) fiml <- list( XX3 = as.numeric(NA), df3 = as.numeric(NA), c.hat3 = as.numeric(NA), XX3.scaled = as.numeric(NA) ) } else if (anyNA(c(fiml$XX3, fiml$df3, fiml$c.hat3, fiml$XX3.scaled))) { lav_msg_warn(gettext( "computation of robust RMSEA resulted in NA values.")) } } # supported fit measures in this function fit.rmsea <- c( "rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.ci.level", "rmsea.pvalue", "rmsea.close.h0", "rmsea.notclose.pvalue", "rmsea.notclose.h0" ) if (scaled.flag) { fit.rmsea <- c( fit.rmsea, "rmsea.scaled", "rmsea.ci.lower.scaled", "rmsea.ci.upper.scaled", "rmsea.pvalue.scaled", "rmsea.notclose.pvalue.scaled" ) } if (robust.flag) { fit.rmsea <- c( fit.rmsea, "rmsea.robust", "rmsea.ci.lower.robust", "rmsea.ci.upper.robust", "rmsea.pvalue.robust", "rmsea.notclose.pvalue.robust" ) } # which one do we need? if (missing(fit.measures)) { # default set fit.measures <- fit.rmsea } else { # remove any not-RMSEA related index from fit.measures rm.idx <- which(!fit.measures %in% fit.rmsea) if (length(rm.idx) > 0L) { fit.measures <- fit.measures[-rm.idx] } if (length(fit.measures) == 0L) { return(list()) } } # basic test statistics X2 <- TEST[[test.idx]]$stat df <- TEST[[test.idx]]$df G <- lavobject@Data@ngroups # number of groups N <- lav_utils_get_ntotal(lavobject = lavobject) # N vs N-1 # scaled X2/df values if (scaled.flag) { if (scaled.test == "scaled.shifted") { XX2 <- TEST[[scaled.idx]]$stat df2 <- df } else { XX2 <- X2 df2 <- sum(TEST[[scaled.idx]]$trace.UGamma) if (!is.finite(df2) || df2 == 0) { df2 <- as.numeric(NA) } } } # robust ingredients if (robust.flag) { if (categorical.flag) { out <- try(lav_fit_catml_dwls(lavobject, check.pd = cat.check.pd), silent = TRUE ) if (inherits(out, "try-error")) { XX3 <- df3 <- c.hat3 <- XX3.scaled <- as.numeric(NA) } else { XX3 <- out$XX3 df3 <- out$df3 c.hat3 <- c.hat <- out$c.hat3 XX3.scaled <- out$XX3.scaled } } else if (fiml.flag) { XX3 <- fiml$XX3 df3 <- fiml$df3 c.hat3 <- c.hat <- fiml$c.hat3 XX3.scaled <- fiml$XX3.scaled } else { XX3 <- X2 df3 <- df c.hat <- TEST[[scaled.idx]]$scaling.factor if (scaled.test == "scaled.shifted") { # compute c.hat from a and b a <- TEST[[scaled.idx]]$scaling.factor b <- TEST[[scaled.idx]]$shift.parameter c.hat3 <- a * (df - b) / df } else { c.hat3 <- c.hat } XX3.scaled <- TEST[[scaled.idx]]$stat } } # output container indices <- list() # what do we need? rmsea.val.flag <- rmsea.ci.flag <- rmsea.pvalue.flag <- FALSE if (any(c("rmsea", "rmsea.scaled", "rmsea.robust") %in% fit.measures)) { rmsea.val.flag <- TRUE } if (any(c( "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.ci.level", "rmsea.ci.lower.scaled", "rmsea.ci.upper.scaled", "rmsea.ci.lower.robust", "rmsea.ci.upper.robust" ) %in% fit.measures)) { rmsea.ci.flag <- TRUE } if (any(c( "rmsea.pvalue", "rmsea.pvalue.scaled", "rmsea.pvalue.robust", "rmsea.notclose.pvalue", "rmsea.notclose.pvalue.scaled", "rmsea.notclose.pvalue.robust", "rmsea.close.h0", "rmsea.notclose.h0" ) %in% fit.measures)) { rmsea.pvalue.flag <- TRUE } # 1. RMSEA if (rmsea.val.flag) { indices["rmsea"] <- lav_fit_rmsea(X2 = X2, df = df, N = N, G = G) if (scaled.flag) { indices["rmsea.scaled"] <- lav_fit_rmsea( X2 = XX2, df = df2, N = N, G = G ) } if (robust.flag) { indices["rmsea.robust"] <- lav_fit_rmsea(X2 = XX3, df = df3, N = N, c.hat = c.hat3, G = G) } } # 2. RMSEA CI if (rmsea.ci.flag) { indices["rmsea.ci.level"] <- ci.level ci <- lav_fit_rmsea_ci(X2 = X2, df = df, N = N, G = G, level = ci.level) indices["rmsea.ci.lower"] <- ci$rmsea.ci.lower indices["rmsea.ci.upper"] <- ci$rmsea.ci.upper if (scaled.flag) { ci.scaled <- lav_fit_rmsea_ci( X2 = XX2, df = df2, N = N, G = G, level = ci.level ) indices["rmsea.ci.lower.scaled"] <- ci.scaled$rmsea.ci.lower indices["rmsea.ci.upper.scaled"] <- ci.scaled$rmsea.ci.upper } if (robust.flag) { # note: input is scaled test statistic! ci.robust <- lav_fit_rmsea_ci( X2 = XX3.scaled, df = df3, N = N, G = G, c.hat = c.hat, level = ci.level ) indices["rmsea.ci.lower.robust"] <- ci.robust$rmsea.ci.lower indices["rmsea.ci.upper.robust"] <- ci.robust$rmsea.ci.upper } } # 3. RMSEA pvalue if (rmsea.pvalue.flag) { indices["rmsea.close.h0"] <- close.h0 indices["rmsea.notclose.h0"] <- notclose.h0 indices["rmsea.pvalue"] <- lav_fit_rmsea_closefit( X2 = X2, df = df, N = N, G = G, rmsea.h0 = close.h0 ) indices["rmsea.notclose.pvalue"] <- lav_fit_rmsea_notclosefit( X2 = X2, df = df, N = N, G = G, rmsea.h0 = notclose.h0 ) if (scaled.flag) { indices["rmsea.pvalue.scaled"] <- lav_fit_rmsea_closefit( X2 = XX2, df = df2, N = N, G = G, rmsea.h0 = close.h0 ) indices["rmsea.notclose.pvalue.scaled"] <- lav_fit_rmsea_notclosefit( X2 = XX2, df = df2, N = N, G = G, rmsea.h0 = notclose.h0 ) } if (robust.flag) { indices["rmsea.pvalue.robust"] <- # new in 0.6-13 lav_fit_rmsea_closefit( X2 = XX3.scaled, df = df3, N = N, G = G, c.hat = c.hat, rmsea.h0 = close.h0 ) indices["rmsea.notclose.pvalue.robust"] <- # new in 0.6-13 lav_fit_rmsea_notclosefit( X2 = XX3.scaled, df = df3, N = N, G = G, c.hat = c.hat, rmsea.h0 = notclose.h0 ) } } # return only those that were requested indices[fit.measures] } lavaan/R/lav_simulate.R0000644000176200001440000002301014627656441014540 0ustar liggesusers# new version of lavSimulateData (replaced simulateData) # from lavaan 0.6-1 # YR 23 March 2018 # # - calls lavaan directly to get model-implied statistics # - allows for groups with different sets of variables # - lavSimulateData <- function(model = NULL, cmd.pop = "sem", ..., # data properties sample.nobs = 1000L, cluster.idx = NULL, # control empirical = FALSE, # output add.labels = TRUE, return.fit = FALSE, output = "data.frame") { # dotdotdot dotdotdot <- list(...) dotdotdot.orig <- dotdotdot # remove/override some options dotdotdot$verbose <- FALSE dotdotdot$debug <- FALSE dotdotdot$data <- NULL dotdotdot$sample.cov <- NULL # add sample.nobs/group.label to lavaan call dotdotdot$sample.nobs <- sample.nobs # always use meanstructure = TRUE dotdotdot$meanstructure <- TRUE # remove 'ordered' argument: we will first pretend we generate # continuous data only dotdotdot$ordered <- NULL # 'fit' population model fit.pop <- do.call(cmd.pop, args = c(list(model = model), dotdotdot)) # categorical? if (fit.pop@Model@categorical) { # refit, as if continuous only dotdotdot$ordered <- NULL fit.con <- do.call(cmd.pop, args = c(list(model = model), dotdotdot)) # restore dotdotdot$ordered <- dotdotdot.orig$ordered } else { fit.con <- fit.pop } # extract model implied statistics and data slot lavimplied <- fit.con@implied # take continuous mean/cov lavdata <- fit.pop@Data lavmodel <- fit.pop@Model lavpartable <- fit.pop@ParTable lavoptions <- fit.pop@Options # number of groups/levels ngroups <- lav_partable_ngroups(lavpartable) nblocks <- lav_partable_nblocks(lavpartable) # check sample.nobs argument if (lavdata@nlevels > 1L) { # multilevel if (is.null(cluster.idx)) { # default? -> 1000 per block if (is.null(sample.nobs)) { sample.nobs <- rep.int( c( 1000L, rep.int(100L, lavdata@nlevels - 1L) ), times = ngroups ) } else { # we assume sample.nobs only contains a single number sample.nobs <- rep.int( c( sample.nobs, rep.int(100L, lavdata@nlevels - 1L) ), times = ngroups ) } } else { # we got a cluster.idx argument if (!is.list(cluster.idx)) { cluster.idx <- rep(list(cluster.idx), ngroups) } if (!is.null(sample.nobs) && (length(sample.nobs) > 1L || sample.nobs != 1000L)) { lav_msg_warn(gettext( "sample.nobs will be ignored if cluster.idx is provided")) } sample.nobs <- numeric(nblocks) for (g in seq_len(ngroups)) { gg <- (g - 1) * lavdata@nlevels + 1L sample.nobs[gg] <- length(cluster.idx[[g]]) sample.nobs[gg + 1] <- length(unique(cluster.idx[[g]])) } } } else { # single level if (length(sample.nobs) == ngroups) { # nothing to do } else if (ngroups > 1L && length(sample.nobs) == 1L) { sample.nobs <- rep.int(sample.nobs, ngroups) } else { lav_msg_stop(gettextf( "ngroups = %1$s but sample.nobs has length = %2$s", ngroups, length(sample.nobs))) } } # check if ov.names are the same for each group if (ngroups > 1L) { N1 <- lavdata@ov.names[[1]] if (!all(sapply( lavdata@ov.names, function(x) all(x %in% N1) ))) { if (output == "data.frame") { output <- "matrix" lav_msg_warn(gettext( "groups do not contain the same set of variables; changing output= argument to \"matrix\"")) } } } # prepare data containers X <- vector("list", length = nblocks) # generate data per BLOCK for (b in seq_len(nblocks)) { if (lavoptions$conditional.x) { lav_msg_stop(gettext("conditional.x is not ready yet")) } else { COV <- lavimplied$cov[[b]] MU <- lavimplied$mean[[b]] } # if empirical = TRUE, rescale by N/(N-1), so that estimator=ML # returns exact results if (empirical) { # check if sample.nobs is large enough if (sample.nobs[b] < NCOL(COV)) { lav_msg_stop(gettextf( "empirical = TRUE requires sample.nobs = %1$s to be larger than the number of variables = %2$s in block = %3$s", sample.nobs[b], NCOL(COV), b )) } if (lavdata@nlevels > 1L && (b %% lavdata@nlevels == 1L)) { COV <- COV * sample.nobs[b] / (sample.nobs[b] - sample.nobs[b + 1]) } else { COV <- COV * sample.nobs[b] / (sample.nobs[b] - 1) } } # generate normal data tmp <- try( MASS::mvrnorm( n = sample.nobs[b], mu = MU, Sigma = COV, empirical = empirical ), silent = TRUE ) if (inherits(tmp, "try-error")) { # something went wrong; most likely: non-positive COV? ev <- eigen(COV, symmetric = TRUE, only.values = TRUE)$values if (any(ev < 0)) { lav_msg_stop(gettextf( "model-implied covariance matrix is not positive-definite in block = %1$s; smallest eigen value = %2$s; change the model parameters.", b, round(min(ev), 5))) } else { lav_msg_stop(gettextf("data generation failed for block = %s", b)) } } else { X[[b]] <- unname(tmp) } } # block if (output == "block") { return(X) } # if multilevel, make a copy, and create X[[g]] per group if (lavdata@nlevels > 1L) { X.block <- X X <- vector("list", length = ngroups) } # assemble data per group group.values <- lav_partable_group_values(lavpartable) for (g in 1:ngroups) { # multilevel? if (lavdata@nlevels > 1L) { # which block? bb <- (g - 1) * lavdata@nlevels + 1L Lp <- lavdata@Lp[[g]] p.tilde <- length(lavdata@ov.names[[g]]) tmp1 <- matrix(0, nrow(X.block[[bb]]), p.tilde + 1L) # one extra for tmp2 <- matrix(0, nrow(X.block[[bb]]), p.tilde + 1L) # the clus id # level 1 # if(empirical) { if (FALSE) { # force the within-cluster means to be zero (for both.idx vars) Y2 <- unname(as.matrix(aggregate(X.block[[bb]], # NOTE: cluster.idx becomes a factor # should be 111122223333... by = list(cluster.idx[[g]]), FUN = mean, na.rm = TRUE )[, -1])) # don't touch within-only variables w.idx <- match(Lp$within.idx[[2]], Lp$ov.idx[[1]]) Y2[, w.idx] <- 0 # center cluster-wise Y1c <- X.block[[bb]] - Y2[cluster.idx[[g]], , drop = FALSE] # this destroys the within covariance matrix sigma.sqrt <- lav_matrix_symmetric_sqrt(lavimplied$cov[[bb]]) NY <- NROW(Y1c) S <- cov(Y1c) * (NY - 1) / NY S.inv <- solve(S) S.inv.sqrt <- lav_matrix_symmetric_sqrt(S.inv) # transform X.block[[bb]] <- Y1c %*% S.inv.sqrt %*% sigma.sqrt } tmp1[, Lp$ov.idx[[1]]] <- X.block[[bb]] # level 2 tmp2[, Lp$ov.idx[[2]]] <- X.block[[bb + 1L]][cluster.idx[[g]], , drop = FALSE ] # final X[[g]] <- tmp1 + tmp2 # cluster id X[[g]][, p.tilde + 1L] <- cluster.idx[[g]] } # add variable names? if (add.labels) { if (lavdata@nlevels > 1L) { colnames(X[[g]]) <- c(lavdata@ov.names[[g]], "cluster") } else { colnames(X[[g]]) <- lavdata@ov.names[[g]] } } # any categorical variables? ov.ord <- lavNames(fit.pop, "ov.ord", group = group.values[g]) if (is.list(ov.ord)) { # multilvel -> use within level only ov.ord <- ov.ord[[1L]] } if (length(ov.ord) > 0L) { ov.names <- lavdata@ov.names[[g]] # which block? bb <- (g - 1) * lavdata@nlevels + 1L # th/names TH.VAL <- as.numeric(fit.pop@implied$th[[bb]]) if (length(lavmodel@num.idx[[bb]]) > 0L) { NUM.idx <- which(lavmodel@th.idx[[bb]] == 0) TH.VAL <- TH.VAL[-NUM.idx] } th.names <- fit.pop@pta$vnames$th[[bb]] TH.NAMES <- sapply(strsplit(th.names, split = "|", fixed = TRUE ), "[[", 1L) # use thresholds to cut for (o in ov.ord) { o.idx <- which(o == ov.names) th.idx <- which(o == TH.NAMES) th.val <- c(-Inf, sort(TH.VAL[th.idx]), +Inf) # center (because model-implied 'mean' may be nonzero) tmp <- X[[g]][, o.idx] tmp <- tmp - mean(tmp, na.rm = TRUE) X[[g]][, o.idx] <- cut(tmp, th.val, labels = FALSE) } } } # output if (output == "matrix") { if (ngroups == 1L) { out <- X[[1L]] } else { out <- X } } else if (output == "data.frame") { if (ngroups == 1L) { # convert to data.frame out <- as.data.frame(X[[1L]], stringsAsFactors = FALSE) } else if (ngroups > 1L) { # rbind out <- do.call("rbind", X) # add group column group <- rep.int(1:ngroups, times = sapply(X, NROW)) out <- cbind(out, group) # convert to data.frame out <- as.data.frame(out, stringsAsFactors = FALSE) } } else if (output == "cov") { if (ngroups == 1L) { out <- cov(X[[1L]]) } else { out <- lapply(X, cov) } } else { lav_msg_stop(gettextf("unknown option for argument output: %s", output)) } if (return.fit) { attr(out, "fit") <- fit.pop } out } lavaan/R/lav_bvord.R0000644000176200001440000005632214627656441014045 0ustar liggesusers# the weighted bivariate ordinal model # YR 19 Feb 2020 (replacing the old lav_polychor.R routines) # # - polychoric (and tetrachoric) correlations # - bivariate ordinal regression # - using sampling weights wt # two-way frequency table # only works if Y = 1,2,3,... lav_bvord_freq <- function(Y1, Y2, wt = NULL) { max.y1 <- max(Y1, na.rm = TRUE) max.y2 <- max(Y2, na.rm = TRUE) bin <- Y1 - 1L bin <- bin + max.y1 * (Y2 - 1L) bin <- bin + 1L if (is.null(wt)) { bin <- bin[!is.na(bin)] out <- array(tabulate(bin, nbins = max.y1 * max.y2), dim = c(max.y1, max.y2) ) } else { if (anyNA(Y1) || anyNA(Y2)) { wt[is.na(Y1) | is.na(Y2)] <- 0 bin[is.na(bin)] <- 0 } y.ncat <- max.y1 * max.y2 y.freq <- numeric(y.ncat) for (cat in seq_len(y.ncat)) { y.freq[cat] <- sum(wt[bin == cat]) } out <- array(y.freq, dim = c(max.y1, max.y2)) } out } # polychoric correlation # # zero.add is a vector: first element is for 2x2 tables only, second element # for general tables # zero.keep.margins is only used for 2x2 tables # lav_bvord_cor_twostep_fit <- function(Y1, Y2, eXo = NULL, wt = NULL, fit.y1 = NULL, fit.y2 = NULL, freq = NULL, zero.add = c(0.5, 0.0), zero.keep.margins = TRUE, zero.cell.warn = FALSE, zero.cell.flag = FALSE, optim.method = "nlminb2", optim.scale = 1.0, init.theta = NULL, control = list(step.min = 0.1), # 0.6-7 Y1.name = NULL, Y2.name = NULL) { if (is.null(fit.y1)) { fit.y1 <- lav_uvord_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # create cache environment cache <- lav_bvord_init_cache(fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt) # empty cells or not empty.cells <- FALSE # check for zero cells (if not exo), and catch some special cases if (cache$nexo == 0L) { freq <- cache$freq nr <- nrow(freq) nc <- ncol(freq) # check for empty cells if (any(freq == 0L)) { empty.cells <- TRUE if (zero.cell.warn) { if (!is.null(Y1.name) && !is.null(Y2.name)) { lav_msg_warn(gettextf( "empty cell(s) in bivariate table of %1$s x %2$s", Y1.name, Y2.name)) } else { lav_msg_warn(gettext("empty cell(s) in bivariate table")) } } } # treat 2x2 tables if (nr == 2L && nc == 2L) { idx <- which(freq == 0L) # catch 2 empty cells: perfect correlation! if (length(idx) == 2L) { lav_msg_warn(gettext("two empty cells in 2x2 table")) if (freq[1, 1] > 0L) { rho <- 1.0 if (zero.cell.flag) { attr(rho, "zero.cell.flag") <- empty.cells } return(rho) } else { rho <- -1.0 if (zero.cell.flag) { attr(rho, "zero.cell.flag") <- empty.cells } return(rho) } } else if (length(idx) == 1L && zero.add[1] > 0.0) { if (zero.keep.margins) { # add + compensate to preserve margins if (idx == 1L || idx == 4L) { # main diagonal freq[1, 1] <- freq[1, 1] + zero.add[1] freq[2, 2] <- freq[2, 2] + zero.add[1] freq[2, 1] <- freq[2, 1] - zero.add[1] freq[1, 2] <- freq[1, 2] - zero.add[1] } else { freq[1, 1] <- freq[1, 1] - zero.add[1] freq[2, 2] <- freq[2, 2] - zero.add[1] freq[2, 1] <- freq[2, 1] + zero.add[1] freq[1, 2] <- freq[1, 2] + zero.add[1] } } else { freq[idx] <- freq[idx] + zero.add[1] } } # general table } else { if (any(freq == 0L) && zero.add[2] > 0.0) { # general table: just add zero.add to the empty cell(s) freq[freq == 0] <- zero.add[2] } } # update (possibly change) freq table cache$freq <- freq # catch special cases for 2x2 tables if (nr == 2L && nc == 2L) { # 1. a*d == c*d storage.mode(freq) <- "numeric" # to avoid integer overflow if (freq[1, 1] * freq[2, 2] == freq[1, 2] * freq[2, 1]) { rho <- 0.0 if (zero.cell.flag) { attr(rho, "zero.cell.flag") <- empty.cells } return(rho) } # 2. equal margins (th1 = th2 = 0) if (cache$th.y1[1] == 0 && cache$th.y2[1] == 0) { # see eg Brown & Benedetti 1977 eq 2 rho <- -cos(2 * pi * freq[1, 1] / sum(freq)) if (zero.cell.flag) { attr(rho, "zero.cell.flag") <- empty.cells } return(rho) } } } # non-exo # optim.method minObjective <- lav_bvord_min_objective minGradient <- lav_bvord_min_gradient minHessian <- lav_bvord_min_hessian if (optim.method == "nlminb" || optim.method == "nlminb2") { # nothing to do } else if (optim.method == "nlminb0") { minGradient <- minHessian <- NULL } else if (optim.method == "nlminb1") { minHessian <- NULL } # optimize if (is.null(control$trace)) { control$trace <- ifelse(lav_verbose(), 1, 0) } # init theta? if (!is.null(init.theta)) { start.x <- init.theta } else { start.x <- cache$theta } # try 1 optim <- nlminb( start = start.x, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control, scale = optim.scale, lower = -0.999, upper = +0.999, cache = cache ) # try 2 if (optim$convergence != 0L) { # try again, with different starting value optim <- nlminb( start = 0, objective = minObjective, gradient = NULL, hessian = NULL, control = control, scale = optim.scale, lower = -0.995, upper = +0.995, cache = cache ) } # check convergence if (optim$convergence != 0L) { if (!is.null(Y1.name) && !is.null(Y2.name)) { lav_msg_warn(gettextf( "estimation polychoric correlation did not converge for variables %1$s and %2$s", Y1.name, Y2.name)) } else { lav_msg_warn(gettext( "estimation polychoric correlation(s) did not always converge")) } rho <- start.x } else { rho <- optim$par } # zero.cell.flag if (zero.cell.flag) { attr(rho, "zero.cell.flag") <- empty.cells } rho } # prepare cache environment lav_bvord_init_cache <- function(fit.y1 = NULL, fit.y2 = NULL, wt = NULL, scores = FALSE, parent = parent.frame()) { # data Y1 <- fit.y1$y Y2 <- fit.y2$y eXo <- fit.y1$X # exo? if (is.null(eXo)) { nexo <- 0L freq <- lav_bvord_freq(Y1 = Y1, Y2 = Y2, wt = wt) th.y1 <- fit.y1$theta[fit.y1$th.idx] th.y2 <- fit.y2$theta[fit.y2$th.idx] nth.y1 <- length(th.y1) nth.y2 <- length(th.y2) pth.y1 <- pnorm(th.y1) pth.y2 <- pnorm(th.y2) upper.y <- rep(th.y2, times = rep.int(nth.y1, nth.y2)) upper.x <- rep(th.y1, times = ceiling(length(upper.y)) / nth.y1) } else { nexo <- ncol(eXo) freq <- NULL fit.y1.z1 <- fit.y1$z1 fit.y2.z1 <- fit.y2$z1 fit.y1.z2 <- fit.y1$z2 fit.y2.z2 <- fit.y2$z2 # take care of missing values if (length(fit.y1$missing.idx) > 0L || length(fit.y2$missing.idx) > 0L) { missing.idx <- unique(c(fit.y1$missing.idx, fit.y2$missing.idx)) fit.y1.z1[missing.idx] <- 0 fit.y2.z1[missing.idx] <- 0 fit.y1.z2[missing.idx] <- 0 fit.y2.z2[missing.idx] <- 0 } else { missing.idx <- integer(0L) } } # nobs if (is.null(wt)) { N <- length(Y1) } else { N <- sum(wt) } # starting value (for both exo and not-exo) # if(is.null(wt)) { rho.init <- cor(Y1, Y2, use = "pairwise.complete.obs") # } # cov.wt does not handle missing values... # rho.init <- cov.wt(cbind(Y1, Y2), wt = wt, cor = TRUE)$cor[2,1] if (is.na(rho.init) || abs(rho.init) >= 1.0) { rho.init <- 0.0 } # parameter vector theta <- rho.init # only, for now # different cache if exo or not if (nexo == 0L) { if (scores) { out <- list2env( list( nexo = nexo, theta = theta, N = N, fit.y1.z1 = fit.y1$z1, fit.y1.z2 = fit.y1$z2, fit.y2.z1 = fit.y2$z1, fit.y2.z2 = fit.y2$z2, y1.Y1 = fit.y1$Y1, y1.Y2 = fit.y1$Y2, y2.Y1 = fit.y2$Y1, y2.Y2 = fit.y2$Y2, Y1 = Y1, Y2 = Y2, freq = freq, th.y1 = th.y1, th.y2 = th.y2, nth.y1 = nth.y1, nth.y2 = nth.y2, pth.y1 = pth.y1, pth.y2 = pth.y2, upper.y = upper.y, upper.x = upper.x ), parent = parent ) } else { out <- list2env( list( nexo = nexo, theta = theta, N = N, Y1 = Y1, Y2 = Y2, freq = freq, th.y1 = th.y1, th.y2 = th.y2, nth.y1 = nth.y1, nth.y2 = nth.y2, pth.y1 = pth.y1, pth.y2 = pth.y2, upper.y = upper.y, upper.x = upper.x ), parent = parent ) } } else { if (scores) { out <- list2env( list( nexo = nexo, theta = theta, wt = wt, N = N, eXo = eXo, y1.Y1 = fit.y1$Y1, y1.Y2 = fit.y1$Y2, y2.Y1 = fit.y2$Y1, y2.Y2 = fit.y2$Y2, fit.y1.z1 = fit.y1.z1, fit.y1.z2 = fit.y1.z2, fit.y2.z1 = fit.y2.z1, fit.y2.z2 = fit.y2.z2, missing.idx = missing.idx ), parent = parent ) } else { out <- list2env( list( nexo = nexo, theta = theta, wt = wt, N = N, fit.y1.z1 = fit.y1.z1, fit.y1.z2 = fit.y1.z2, fit.y2.z1 = fit.y2.z1, fit.y2.z2 = fit.y2.z2, missing.idx = missing.idx ), parent = parent ) } } out } # probabilities for each cell, given rho, th.y1 and th.y2 lav_bvord_noexo_pi_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # catch special case: rho = 0.0 if (rho == 0.0) { rowPI <- base::diff(c(0, pth.y1, 1)) colPI <- base::diff(c(0, pth.y2, 1)) PI.ij <- base::outer(rowPI, colPI) return(PI.ij) } BI <- pbivnorm::pbivnorm(x = upper.x, y = upper.y, rho = rho) dim(BI) <- c(nth.y1, nth.y2) BI <- rbind(0, BI, pth.y2, deparse.level = 0L) BI <- cbind(0, BI, c(0, pth.y1, 1), deparse.level = 0L) # get probabilities nr <- nrow(BI) nc <- ncol(BI) PI <- BI[-1L, -1L] - BI[-1L, -nc] - BI[-nr, -1L] + BI[-nr, -nc] # all elements should be strictly positive PI[PI < sqrt(.Machine$double.eps)] <- sqrt(.Machine$double.eps) return(PI) }) } # partial derivative of CDF(th.y1, th.y2, rho) with respect to rho lav_bvord_noexo_phi_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # compute dbinorm for all possible combinations t1 <- rep(th.y1, times = nth.y2) t2 <- rep(th.y2, each = nth.y1) dbiNorm <- matrix(dbinorm(t1, t2, rho), nrow = nth.y1, ncol = nth.y2 ) p1 <- p2 <- p3 <- p4 <- matrix(0, nth.y1 + 1L, nth.y2 + 1L) t1.idx <- seq_len(nth.y1) t2.idx <- seq_len(nth.y2) # p1 is left-upper corner p1[t1.idx, t2.idx] <- dbiNorm # p2 is left-lower corner p2[t1.idx + 1L, t2.idx] <- dbiNorm # p3 is right-upper corner p3[t1.idx, t2.idx + 1L] <- dbiNorm # p3 is right-lower corner p4[t1.idx + 1L, t2.idx + 1L] <- dbiNorm phi <- p1 - p2 - p3 + p4 return(phi) }) } # Olsson 1979 A2 lav_bvord_noexo_gnorm_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # note: Olsson 1979 A2 contains an error!! # derivative of phi_2(y1,y2;rho) wrt to rho equals # phi_2(y1,y2;rho) * guv(y1,y2;rho), where guv() is defined below: guv <- function(u, v, rho) { R <- (1 - rho * rho) (u * v * R - rho * ((u * u) - 2 * rho * u * v + (v * v)) + rho * R) / (R * R) } # compute gnorm for all possible combinations Gnorm <- dbiNorm * matrix(guv(t1, t2, rho), nth.y1, nth.y2) p1 <- p2 <- p3 <- p4 <- matrix(0, nth.y1 + 1L, nth.y2 + 1L) t1.idx <- seq_len(nth.y1) t2.idx <- seq_len(nth.y2) # p1 is left-upper corner p1[t1.idx, t2.idx] <- Gnorm # p2 is left-lower corner p2[t1.idx + 1L, t2.idx] <- Gnorm # p3 is right-upper corner p3[t1.idx, t2.idx + 1L] <- Gnorm # p3 is right-lower corner p4[t1.idx + 1L, t2.idx + 1L] <- Gnorm gnorm <- p1 - p2 - p3 + p4 return(gnorm) }) } # casewise likelihoods, unweighted! lav_bvord_lik_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # no exo if (nexo == 0L) { PI <- lav_bvord_noexo_pi_cache(cache) lik <- PI[cbind(Y1, Y2)] # exo } else { lik <- pbinorm( upper.x = fit.y1.z1, upper.y = fit.y2.z1, lower.x = fit.y1.z2, lower.y = fit.y2.z2, rho = rho ) if (length(missing.idx) > 0L) { lik[missing.idx] <- NA } # catch very small values lik.toosmall.idx <- which(lik < sqrt(.Machine$double.eps)) lik[lik.toosmall.idx] <- as.numeric(NA) } return(lik) }) } lav_bvord_logl_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # no exo if (nexo == 0L) { PI <- lav_bvord_noexo_pi_cache(cache) logl <- sum(freq * log(PI), na.rm = TRUE) # exo } else { lik <- lav_bvord_lik_cache(cache) # unweighted! if (!is.null(wt)) { logl <- sum(wt * log(lik), na.rm = TRUE) } else { logl <- sum(log(lik), na.rm = TRUE) } } return(logl) }) } lav_bvord_gradient_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # no exo if (nexo == 0L) { phi <- lav_bvord_noexo_phi_cache(cache) bad.idx <- which(PI <= sqrt(.Machine$double.eps)) if (length(bad.idx) > 0L) { PI[bad.idx] <- as.numeric(NA) } dx.rho <- sum((freq * phi) / PI, na.rm = TRUE) # exo } else { d1 <- dbinorm(fit.y1.z1, fit.y2.z1, rho) d2 <- dbinorm(fit.y1.z2, fit.y2.z1, rho) d3 <- dbinorm(fit.y1.z1, fit.y2.z2, rho) d4 <- dbinorm(fit.y1.z2, fit.y2.z2, rho) phi <- (d1 - d2 - d3 + d4) # avoid dividing by very tine numbers (new in 0.6-6) # -> done automatically: lik == NA in this case # bad.idx <- which(lik <= sqrt(.Machine$double.eps)) # if(length(bad.idx) > 0L) { # lik[bad.idx] <- as.numeric(NA) # } dx2 <- phi / lik if (is.null(wt)) { dx.rho <- sum(dx2, na.rm = TRUE) } else { dx.rho <- sum(wt * dx2, na.rm = TRUE) } } return(dx.rho) }) } lav_bvord_hessian_cache <- function(cache = NULL) { with(cache, { rho <- theta[1L] # no exo if (nexo == 0L) { bad.idx <- which(PI <= sqrt(.Machine$double.eps)) if (length(bad.idx) > 0L) { PI[bad.idx] <- as.numeric(NA) } gnorm <- lav_bvord_noexo_gnorm_cache(cache) # H <- sum( freq * (gnorm/PI - (phi*phi)/(PI*PI)), na.rm = TRUE) H <- (sum((freq * gnorm) / PI, na.rm = TRUE) - sum((freq * phi * phi) / (PI * PI), na.rm = TRUE)) dim(H) <- c(1L, 1L) # exo } else { guv <- function(u, v, rho) { R <- (1 - rho * rho) (u * v * R - rho * ((u * u) - 2 * rho * u * v + (v * v)) + rho * R) / (R * R) } gnorm <- ((d1 * guv(fit.y1.z1, fit.y2.z1, rho)) - (d2 * guv(fit.y1.z2, fit.y2.z1, rho)) - (d3 * guv(fit.y1.z1, fit.y2.z2, rho)) + (d4 * guv(fit.y1.z2, fit.y2.z2, rho))) if (is.null(wt)) { H <- sum(gnorm / lik - (phi * phi) / (lik * lik), na.rm = TRUE) } else { H <- sum(wt * (gnorm / lik - (phi * phi) / (lik * lik)), na.rm = TRUE) } dim(H) <- c(1L, 1L) } return(H) }) } # compute total (log)likelihood, for specific 'x' (nlminb) lav_bvord_min_objective <- function(x, cache = NULL) { cache$theta <- x -1 * lav_bvord_logl_cache(cache = cache) / cache$N } # compute gradient, for specific 'x' (nlminb) lav_bvord_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_bvord_logl_cache(cache = cache) } -1 * lav_bvord_gradient_cache(cache = cache) / cache$N } # compute hessian, for specific 'x' (nlminb) lav_bvord_min_hessian <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_bvord_logl_cache(cache = cache) tmp <- lav_bvord_gradient_cache(cache = cache) } -1 * lav_bvord_hessian_cache(cache = cache) / cache$N } # casewise scores lav_bvord_cor_scores_cache <- function(cache = NULL, na.zero = FALSE, use.weights = TRUE) { with(cache, { rho <- theta[1L] R <- sqrt(1 - rho * rho) # lik lik <- lav_bvord_lik_cache(cache = cache) bad.idx <- which(lik <= sqrt(.Machine$double.eps)) if (length(bad.idx) > 0L) { lik[bad.idx] <- as.numeric(NA) } d.y1.z1 <- dnorm(fit.y1.z1) d.y1.z2 <- dnorm(fit.y1.z2) d.y2.z1 <- dnorm(fit.y2.z1) d.y2.z2 <- dnorm(fit.y2.z2) # th.y1 if (identical(R, 0.0)) { y1.Z1 <- d.y1.z1 * 0.5 y1.Z2 <- d.y1.z2 * 0.5 } else { y1.Z1 <- (d.y1.z1 * pnorm((fit.y2.z1 - rho * fit.y1.z1) / R) - d.y1.z1 * pnorm((fit.y2.z2 - rho * fit.y1.z1) / R)) y1.Z2 <- (d.y1.z2 * pnorm((fit.y2.z1 - rho * fit.y1.z2) / R) - d.y1.z2 * pnorm((fit.y2.z2 - rho * fit.y1.z2) / R)) } dx.th.y1 <- (y1.Y1 * y1.Z1 - y1.Y2 * y1.Z2) / lik if (na.zero) { dx.th.y1[is.na(dx.th.y1)] <- 0 } # th.y2 if (identical(R, 0.0)) { y2.Z1 <- d.y2.z1 * 0.5 y2.Z2 <- d.y2.z2 * 0.5 } else { y2.Z1 <- (d.y2.z1 * pnorm((fit.y1.z1 - rho * fit.y2.z1) / R) - d.y2.z1 * pnorm((fit.y1.z2 - rho * fit.y2.z1) / R)) y2.Z2 <- (d.y2.z2 * pnorm((fit.y1.z1 - rho * fit.y2.z2) / R) - d.y2.z2 * pnorm((fit.y1.z2 - rho * fit.y2.z2) / R)) } dx.th.y2 <- (y2.Y1 * y2.Z1 - y2.Y2 * y2.Z2) / lik if (na.zero) { dx.th.y2[is.na(dx.th.y2)] <- 0 } # slopes dx.sl.y1 <- dx.sl.y2 <- NULL if (nexo > 0L) { # sl.y1 dx.sl.y1 <- (y1.Z2 - y1.Z1) * eXo / lik if (na.zero) { dx.sl.y1[is.na(dx.sl.y1)] <- 0 } # sl.y2 dx.sl.y2 <- (y2.Z2 - y2.Z1) * eXo / lik if (na.zero) { dx.sl.y2[is.na(dx.sl.y2)] <- 0 } } # rho if (nexo == 0L) { phi <- lav_bvord_noexo_phi_cache(cache) dx <- phi[cbind(Y1, Y2)] } else { dx <- (dbinorm(fit.y1.z1, fit.y2.z1, rho) - dbinorm(fit.y1.z2, fit.y2.z1, rho) - dbinorm(fit.y1.z1, fit.y2.z2, rho) + dbinorm(fit.y1.z2, fit.y2.z2, rho)) } dx.rho <- dx / lik if (na.zero) { dx.rho[is.na(dx.rho)] <- 0 } if (!is.null(wt) && use.weights) { dx.th.y1 <- dx.th.y1 * wt dx.th.y2 <- dx.th.y2 * wt if (nexo > 0L) { dx.sl.y1 <- dx.sl.y1 * wt dx.sl.y2 <- dx.sl.y2 * wt } dx.rho <- dx.rho * wt } out <- list( dx.th.y1 = dx.th.y1, dx.th.y2 = dx.th.y2, dx.sl.y1 = dx.sl.y1, dx.sl.y2 = dx.sl.y2, dx.rho = dx.rho ) return(out) }) } # casewise scores - no cache lav_bvord_cor_scores <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, th.y1 = NULL, th.y2 = NULL, sl.y1 = NULL, sl.y2 = NULL, na.zero = FALSE, use.weights = TRUE) { if (is.null(fit.y1)) { fit.y1 <- lav_uvord_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update z1/z2 if needed (used in pml_deriv1() in lav_model_gradient_pml.R) fit.y1 <- lav_uvord_update_fit( fit.y = fit.y1, th.new = th.y1, sl.new = sl.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvord_init_cache( fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt, scores = TRUE ) cache$theta <- rho SC <- lav_bvord_cor_scores_cache( cache = cache, na.zero = na.zero, use.weights = use.weights ) SC } # logl - no cache lav_bvord_logl <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, th.y1 = NULL, th.y2 = NULL, sl.y1 = NULL, sl.y2 = NULL) { if (is.null(fit.y1)) { fit.y1 <- lav_uvord_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update z1/z2 if needed (used in pml_deriv1() in lav_model_gradient_pml.R) fit.y1 <- lav_uvord_update_fit( fit.y = fit.y1, th.new = th.y1, sl.new = sl.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvord_init_cache(fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt) cache$theta <- rho lav_bvord_logl_cache(cache = cache) } # lik - no cache lav_bvord_lik <- function(Y1, Y2, eXo = NULL, wt = NULL, rho = NULL, fit.y1 = NULL, fit.y2 = NULL, th.y1 = NULL, th.y2 = NULL, sl.y1 = NULL, sl.y2 = NULL, .log = FALSE) { if (is.null(fit.y1)) { fit.y1 <- lav_uvord_fit(y = Y1, X = eXo, wt = wt) } if (is.null(fit.y2)) { fit.y2 <- lav_uvord_fit(y = Y2, X = eXo, wt = wt) } # update fit.y1/fit.y2 fit.y1 <- lav_uvord_update_fit( fit.y = fit.y1, th.new = th.y1, sl.new = sl.y1 ) fit.y2 <- lav_uvord_update_fit( fit.y = fit.y2, th.new = th.y2, sl.new = sl.y2 ) # create cache environment cache <- lav_bvord_init_cache(fit.y1 = fit.y1, fit.y2 = fit.y2, wt = wt) cache$theta <- rho lik <- lav_bvord_lik_cache(cache = cache) # unweighted if (.log) { lik <- log(lik) } if (!is.null(wt)) { if (.log) { lik <- wt * lik } else { tmp <- wt * log(lik) lik <- exp(tmp) } } lik } # noexo_pi - for backwards compatibility lav_bvord_noexo_pi <- function(rho = NULL, th.y1 = NULL, th.y2 = NULL) { nth.y1 <- length(th.y1) nth.y2 <- length(th.y2) pth.y1 <- pnorm(th.y1) pth.y2 <- pnorm(th.y2) # catch special case: rho = 0.0 if (rho == 0.0) { rowPI <- base::diff(c(0, pth.y1, 1)) colPI <- base::diff(c(0, pth.y2, 1)) PI.ij <- base::outer(rowPI, colPI) return(PI.ij) } # prepare for a single call to pbinorm upper.y <- rep(th.y2, times = rep.int(nth.y1, nth.y2)) upper.x <- rep(th.y1, times = ceiling(length(upper.y)) / nth.y1) # rho <- rep(rho, length(upper.x)) # only one rho here BI <- pbivnorm::pbivnorm(x = upper.x, y = upper.y, rho = rho) dim(BI) <- c(nth.y1, nth.y2) BI <- rbind(0, BI, pth.y2, deparse.level = 0L) BI <- cbind(0, BI, c(0, pth.y1, 1), deparse.level = 0L) # get probabilities nr <- nrow(BI) nc <- ncol(BI) PI <- BI[-1L, -1L] - BI[-1L, -nc] - BI[-nr, -1L] + BI[-nr, -nc] # all elements should be strictly positive PI[PI < sqrt(.Machine$double.eps)] <- sqrt(.Machine$double.eps) PI } lavaan/R/lav_lavaan_step15_baseline.R0000644000176200001440000000334214627656441017230 0ustar liggesuserslav_lavaan_step15_baseline <- function(lavoptions = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, lavh1 = NULL, lavpartable = NULL) { # # # # # # # # # # # # # 15. baseline # # (since 0.6-5) # # # # # # # # # # # # if options$do.fit and options$test not "none" and options$baseline = TRUE # try fit.indep <- lav_object_independence(...) # if not succesfull or not converged # ** warning ** # lavbaseline < list() # else # lavbaseline <- list with partable and test of fit.indep lavbaseline <- list() if (lavoptions$do.fit && !("none" %in% lavoptions$test) && is.logical(lavoptions$baseline) && lavoptions$baseline) { if (lav_verbose()) { cat("lavbaseline ...") } current.verbose <- lav_verbose() lav_verbose(FALSE) fit.indep <- try(lav_object_independence( object = NULL, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavoptions = lavoptions, lavpartable = lavpartable, lavh1 = lavh1 ), silent = TRUE) lav_verbose(current.verbose) if (inherits(fit.indep, "try-error") || !fit.indep@optim$converged) { lav_msg_warn(gettext("estimation of the baseline model failed.")) lavbaseline <- list() if (lav_verbose()) { cat(" FAILED.\n") } } else { # store relevant information lavbaseline <- list( partable = fit.indep@ParTable, test = fit.indep@test ) if (lav_verbose()) { cat(" done.\n") } } } lavbaseline } lavaan/R/lav_cfa_jamesstein.R0000644000176200001440000002204714627656441015701 0ustar liggesusers# James-Stein estimator # # Burghgraeve, E., De Neve, J., & Rosseel, Y. (2021). Estimating structural # equation models using James-Stein type shrinkage estimators. Psychometrika, # 86(1), 96-130. # # YR 08 Feb 2023: - first version in lavaan, cfa only (for now) lav_cfa_jamesstein <- function(S, Y = NULL, # raw data marker.idx = NULL, lambda.nonzero.idx = NULL, theta = NULL, # vector! theta.bounds = TRUE, aggregated = FALSE) { # aggregated? # dimensions nvar <- ncol(S) nfac <- length(marker.idx) stopifnot(length(theta) == nvar) N <- nrow(Y) stopifnot(ncol(Y) == nvar) # overview of lambda structure B <- LAMBDA <- B.nomarker <- matrix(0, nvar, nfac) lambda.marker.idx <- (seq_len(nfac) - 1L) * nvar + marker.idx B[lambda.marker.idx] <- LAMBDA[lambda.marker.idx] <- 1L B[lambda.nonzero.idx] <- B.nomarker[lambda.nonzero.idx] <- 1L # Nu NU <- numeric(nvar) # do we first 'clip' the theta values so they are within standard bounds? # (Question: do we need the 0.01 and 0.99 multipliers?) diagS <- diag(S) if (theta.bounds) { # lower bound lower.bound <- diagS * 0 # * 0.01 too.small.idx <- which(theta < lower.bound) if (length(too.small.idx) > 0L) { theta[too.small.idx] <- lower.bound[too.small.idx] } # upper bound upper.bound <- diagS * 1 # * 0.99 too.large.idx <- which(theta > upper.bound) if (length(too.large.idx) > 0L) { theta[too.large.idx] <- upper.bound[too.large.idx] } } # compute conditional expectation conditional on the scaling indicator E.JS1 <- lav_cfa_jamesstein_ce( Y = Y, marker.idx = marker.idx, resvars.markers = theta[marker.idx] ) # compute LAMBDA for (f in seq_len(nfac)) { nomarker.idx <- which(B.nomarker[, f] == 1) Y.nomarker.f <- Y[, nomarker.idx, drop = FALSE] # regress no.marker.idx data on E(\eta|Y) fit <- lm(Y.nomarker.f ~ E.JS1[, f, drop = FALSE]) # extract 'lambda' values LAMBDA[nomarker.idx, f] <- drop(coef(fit)[-1, ]) # (optional) extract means # NU[nomarker.idx] <- drop(coef(fit)[1,]) if (aggregated) { # local copy of 'scaling' LAMBDA LAMBDA.scaling <- LAMBDA J <- length(nomarker.idx) for (j in seq_len(J)) { # data without this indicator j.idx <- nomarker.idx[j] no.j.idx <- c(marker.idx[f], nomarker.idx[-j]) Y.agg <- Y[, no.j.idx, drop = FALSE] Y.j <- Y[, j.idx, drop = FALSE] # retrieve estimated values scaling JS lambda.JS.scaling <- LAMBDA.scaling[no.j.idx, f, drop = FALSE] # optimize the weights starting.weights <- rep(1 / J, times = J) w <- optim( par = starting.weights, fn = lav_cfa_jamesstein_rel, data = Y.agg, resvars = theta[no.j.idx] )$par # make sure the weights sum up to 1 w.optim <- w / sum(w) # compute aggregated indicator using the optimal weights y_agg <- t(t(w.optim) %*% t(Y.agg)) # compute error variance of the aggregated indicator var_eps_agg <- drop(t(w.optim) %*% diag(theta[no.j.idx], nrow = length(no.j.idx)) %*% w.optim) # compute conditional expectation using aggregated indicator tmp <- lav_cfa_jamesstein_ce( Y = y_agg, marker.idx = 1L, resvars.markers = var_eps_agg ) CE_agg <- tmp / drop(w.optim %*% lambda.JS.scaling) # compute factor loading fit <- lm(Y.j ~ CE_agg) LAMBDA[j.idx, f] <- drop(coef(fit)[-1]) # (optional) extract means # NU[j.idx] <- drop(coef(fit)[1,]) } # j } # aggregate } # f out <- list(lambda = LAMBDA, nu = NU) } # internal function to be used inside lav_optim_noniter # return 'x', the estimated vector of free parameters lav_cfa_jamesstein_internal <- function(lavobject = NULL, # convenience # internal slot lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL, theta.bounds = TRUE) { lavpta <- NULL if (!is.null(lavobject)) { stopifnot(inherits(lavobject, "lavaan")) # extract slots lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavpta <- lavobject@pta lavdata <- lavobject@Data lavoptions <- lavobject@Options } if (is.null(lavpta)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } # no structural part! if (any(lavpartable$op == "~")) { lav_msg_stop(gettext( "JS(A) estimator only available for CFA models (for now)")) } # no BETA matrix! (i.e., no higher-order factors) if (!is.null(lavmodel@GLIST$beta)) { lav_msg_stop(gettext( "JS(A) estimator not available for models that require a BETA matrix")) } # no std.lv = TRUE for now if (lavoptions$std.lv) { lav_msg_stop(gettext("S(A) estimator not available if std.lv = TRUE")) } nblocks <- lav_partable_nblocks(lavpartable) stopifnot(nblocks == 1L) # for now b <- 1L sample.cov <- lavsamplestats@cov[[b]] nvar <- nrow(sample.cov) lv.names <- lavpta$vnames$lv.regular[[b]] nfac <- length(lv.names) marker.idx <- lavpta$vidx$lv.marker[[b]] lambda.idx <- which(names(lavmodel@GLIST) == "lambda") lambda.nonzero.idx <- lavmodel@m.free.idx[[lambda.idx]] # only diagonal THETA for now... theta.idx <- which(names(lavmodel@GLIST) == "theta") # usually '2' m.theta <- lavmodel@m.free.idx[[theta.idx]] nondiag.idx <- m.theta[!m.theta %in% lav_matrix_diag_idx(nvar)] if (length(nondiag.idx) > 0L) { lav_msg_warn(gettext( "this implementation of JS/JSA does not handle correlated residuals yet!" )) } # 1. obtain estimate for (diagonal elements of) THETA # for now we use Spearman per factor B <- matrix(0, nvar, nfac) lambda.marker.idx <- (seq_len(nfac) - 1L) * nvar + marker.idx B[lambda.marker.idx] <- 1L B[lambda.nonzero.idx] <- 1L theta <- numeric(nvar) for (f in seq_len(nfac)) { ov.idx <- which(B[, f] == 1L) S.fac <- sample.cov[ov.idx, ov.idx, drop = FALSE] theta[ov.idx] <- lav_cfa_theta_spearman(S.fac, bounds = "wide") } THETA <- diag(theta, nrow = nvar) # 2. run James-Stein algorithm Y <- lavdata@X[[1]] # raw data aggregated <- FALSE if (lavoptions$estimator == "JSA") { aggregated <- TRUE } out <- lav_cfa_jamesstein( S = sample.cov, Y = Y, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx, theta = theta, # experimental theta.bounds = theta.bounds, # aggregated = aggregated ) LAMBDA <- out$lambda # 3. PSI PSI <- lav_cfa_lambdatheta2psi( lambda = LAMBDA, theta = theta, S = sample.cov, mapping = "ML" ) # store matrices in lavmodel@GLIST lavmodel@GLIST$lambda <- LAMBDA lavmodel@GLIST$theta <- THETA lavmodel@GLIST$psi <- PSI # extract free parameters only x <- lav_model_get_parameters(lavmodel) # apply bounds (if any) if (!is.null(lavpartable$lower)) { lower.x <- lavpartable$lower[lavpartable$free > 0] too.small.idx <- which(x < lower.x) if (length(too.small.idx) > 0L) { x[too.small.idx] <- lower.x[too.small.idx] } } if (!is.null(lavpartable$upper)) { upper.x <- lavpartable$upper[lavpartable$free > 0] too.large.idx <- which(x > upper.x) if (length(too.large.idx) > 0L) { x[too.large.idx] <- upper.x[too.large.idx] } } x } # Conditional expectation (Section 2.1, eq. 10) lav_cfa_jamesstein_ce <- function(Y = NULL, marker.idx = NULL, resvars.markers = NULL) { Y <- as.matrix(Y) # sample size N <- nrow(Y) N1 <- N - 1 N3 <- N - 3 # markers only Y.marker <- Y[, marker.idx, drop = FALSE] # means and variances MEAN <- colMeans(Y.marker, na.rm = TRUE) VAR <- apply(Y.marker, 2, var, na.rm = TRUE) # 1 - R per maker oneminR <- N3 * resvars.markers / (N1 * VAR) # R per marker R <- 1 - oneminR # create E(\eta | Y) E.eta.cond.Y <- t(t(Y.marker) * R + oneminR * MEAN) E.eta.cond.Y } # Reliability function used to obtain the weights (Section 4, Aggregation) lav_cfa_jamesstein_rel <- function(w = NULL, data = NULL, resvars = NULL) { # construct weight vector w <- matrix(w, ncol = 1) # construct aggregated indicator: y_agg = t(w) %*% y_i y_agg <- t(t(w) %*% t(data)) # calculate variance of aggregated indicator var_y_agg <- var(y_agg) # calculate error variance of the aggregated indicator var_eps_agg <- t(w) %*% diag(resvars) %*% w # reliability function to be maximized rel <- (var_y_agg - var_eps_agg) %*% solve(var_y_agg) # return value return(-rel) } lavaan/R/lav_residuals_casewise.R0000644000176200001440000000233114627656441016576 0ustar liggesusers# casewise residuals lav_residuals_casewise <- function(object, labels = labels) { # check if we have full data if (object@Data@data.type != "full") { lav_msg_stop(gettext("casewise residuals not available if sample statistics were used for fitting the model")) } # check if we have categorical data if (object@Model@categorical) { lav_msg_stop(gettext( "casewise residuals not available if data is categorical")) } G <- object@Data@ngroups ov.names <- object@Data@ov.names X <- object@Data@X if (object@Model@categorical) { # add 'eXo' columns to X X <- lapply(seq_len(object@Data@ngroups), function(g) { ret <- cbind(X[[g]], object@Data@eXo[[g]]) ret }) } M <- lav_predict_yhat(object) # Note: if M has already class lavaan.matrix, print goes crazy # with Error: C stack usage is too close to the limit OUT <- lapply(seq_len(G), function(x) { out <- X[[x]] - M[[x]] class(out) <- c("lavaan.matrix", "matrix") out }) if (labels) { for (g in 1:G) { colnames(OUT[[g]]) <- object@pta$vnames$ov[[g]] } } if (G == 1) { OUT <- OUT[[1]] } else { names(OUT) <- unlist(object@Data@group.label) } OUT } lavaan/R/lav_options_mimic.R0000644000176200001440000000543514627656441015601 0ustar liggesusers# LDW 11/4/24 : overwrite defaults depending on mimic in separate function # lav_options_mimic <- function(opt) { mlr.test <- "yuan.bentler.mplus" # for now if (opt$mimic == "lavaan") { if (is.character(opt$conditional.x)) { # = "default" if (lav_options_estimatorgroup(opt$estimator) == "ML") { opt$conditional.x <- FALSE } } if (opt$fixed.x == "default") { if (any(lav_options_estimatorgroup(opt$estimator) == c("MML", "ML")) && is.character(opt$start) && opt$start != "simple") { # new in 0.6-12 opt$fixed.x <- TRUE } } if (is.character(opt$zero.keep.margins)) { # = "default" opt$zero.keep.margins <- TRUE } } else if (opt$mimic == "Mplus") { if (length(opt$group.equal) == 0L || all(nchar(opt$group.equal) == 0L)) { if (opt$.categorical) { opt$group.equal <- c("loadings", "thresholds") } else { if (is.logical(opt$meanstructure) && !opt$meanstructure) { opt$group.equal <- "loadings" } else { opt$group.equal <- c("loadings", "intercepts") } } } if (opt$missing == "default") { if (!opt$.categorical && any(opt$estimator == c("ml", "mlr"))) { # since version 5? opt$missing <- "ml" # check later if this is ok } } if (opt$estimator != "pml") { if (opt$meanstructure == "default") opt$meanstructure <- TRUE } if (opt$estimator == "mlr") mlr.test <- "yuan.bentler.mplus" if (any(lav_options_estimatorgroup(opt$estimator) == c("ML", "REML", "NTRLS", "catML"))) { } if (is.character(opt$conditional.x)) { # = "default" if (lav_options_estimatorgroup(opt$estimator) == "ML") { opt$conditional.x <- FALSE } } if (opt$fixed.x == "default") { if (any(lav_options_estimatorgroup(opt$estimator) == c("MML", "ML")) && is.character(opt$start) && opt$start != "simple") { # new in 0.6-12 opt$fixed.x <- TRUE } } if (is.character(opt$zero.keep.margins)) { # = "default" opt$zero.keep.margins <- TRUE } opt$baseline.conditional.x.free.slopes <- FALSE } else if (opt$mimic == "EQS") { if (opt$estimator == "mlr") mlr.test <- "yuan.bentler" if (any(lav_options_estimatorgroup(opt$estimator) == c("ML", "REML", "NTRLS", "catML"))) { if (opt$likelihood == "default") opt$likelihood <- "wishart" } } else if (opt$mimic == "LISREL") { if (any(lav_options_estimatorgroup(opt$estimator) == c("ML", "REML", "NTRLS", "catML"))) { if (opt$likelihood == "default") opt$likelihood <- "wishart" } } if (opt$estimator == "mlr") { if (opt$test[1] == "default") { opt$test <- mlr.test } else { opt$test <- union(mlr.test, opt$test) } } opt }lavaan/R/lav_residuals.R0000644000176200001440000014400414627656441014717 0ustar liggesusers# residual diagnostics # two types: # 1) residuals for summary statistics # 2) case-wise residuals # this (new) version written around Aug/Sept 2018 for 0.6-3 # - based on obsList (inspect_sampstat) and estList (inspect_implied) # - pre-scaling for type = "cor.bollen" and type = "cor.bentler" # - summary statistics: rmr, srmr, crmr, urmr, usrmr, ucrmr; standard errors, # confidence intervals (for u(cs)rmr), # z-statistics (exact test, close test), p-values # - type = "normalized" is based on lav_model_h1_acov(), and should now work # for all estimators # - type = "standardized" now uses the correct formula, and should work for # for all estimators # - type = "standardized.mplus" uses the simplified Mplus/LISREL version, # often resulting in NAs due to negative var(resid) estimates # (this was "standardized" in lavaan < 0.6.3 # WARNING: only partial support for conditional.x = TRUE!! # - in categorical case: we only compute summary statistics, using cor + th # (no var, slopes, ...) # - twolevel not supported here; see lav_fit_srmr.R, where we convert to # the unconditional setting # - change 0.6-6: we enforce observed.information = "h1" to ensure 'Q' is a # projection matrix (see lav_residuals_acov) # - change 0.6-13: fixed.x = TRUE is ignored (to conform with 'tradition') setMethod( "residuals", "lavaan", function(object, type = "raw", labels = TRUE) { # lowercase type type <- tolower(type) # type = "casewise" if (type %in% c("casewise", "case", "obs", "observations", "ov")) { return(lav_residuals_casewise(object, labels = labels)) } else { out <- lav_residuals( object = object, type = type, h1 = TRUE, add.type = TRUE, rename.cov.cor = FALSE, # should become FALSE! # after packages (eg jmv) # have adapted 0.6-3 style add.labels = labels, add.class = TRUE, drop.list.single.group = TRUE ) } out } ) setMethod( "resid", "lavaan", function(object, type = "raw") { residuals(object, type = type, labels = TRUE) } ) # user-visible function lavResiduals <- function(object, type = "cor.bentler", custom.rmr = NULL, se = FALSE, zstat = TRUE, summary = TRUE, h1.acov = "unstructured", add.type = TRUE, add.labels = TRUE, add.class = TRUE, drop.list.single.group = TRUE, maximum.number = length(res.vech), output = "list") { out <- lav_residuals( object = object, type = type, h1 = TRUE, custom.rmr = custom.rmr, se = se, zstat = zstat, summary = summary, summary.options = list( se = TRUE, zstat = TRUE, pvalue = TRUE, unbiased = TRUE, unbiased.se = TRUE, unbiased.ci = TRUE, unbiased.ci.level = 0.90, unbiased.zstat = TRUE, unbiased.test.val = 0.05, unbiased.pvalue = TRUE ), h1.acov = h1.acov, add.type = add.type, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group ) # no pretty printing yet... if (output == "table") { # new in 0.6-18: handle multiple blocks nblocks <- lav_partable_nblocks(object@ParTable) out.list <- vector("list", length = nblocks) for (block in seq_len(nblocks)) { if (nblocks == 1L) { res <- out$cov } else { res <- out[[block]]$cov } # extract only below-diagonal elements res.vech <- lav_matrix_vech(res, diagonal = FALSE) # get names P <- nrow(res) NAMES <- colnames(res) nam <- expand.grid( NAMES, NAMES )[lav_matrix_vech_idx(P, diagonal = FALSE), ] NAMES.vech <- paste(nam[, 1], "~~", nam[, 2], sep = "") # create table TAB <- data.frame( name = NAMES.vech, res = round(res.vech, 3), stringsAsFactors = FALSE ) # sort table idx <- sort.int(abs(TAB$res), decreasing = TRUE, index.return = TRUE )$ix out.sorted <- TAB[idx, ] # show first rows only if (maximum.number == 0L || maximum.number > length(res.vech)) { maximum.number <- length(res.vech) } out.list[[block]] <- out.sorted[seq_len(maximum.number), ] } if (nblocks == 1L) { out <- out.list[[1]] } else { out <- out.list names(out) <- object@Data@block.label } } else { # list -> nothing to do } out } # main function lav_residuals <- function(object, type = "raw", h1 = TRUE, custom.rmr = NULL, se = FALSE, zstat = FALSE, summary = FALSE, summary.options = list( se = TRUE, zstat = TRUE, pvalue = TRUE, unbiased = TRUE, unbiased.se = TRUE, unbiased.ci = TRUE, unbiased.ci.level = 0.90, unbiased.zstat = FALSE, unbiased.test.val = 0.05, unbiased.pvalue = FALSE ), h1.acov = "unstructured", add.type = FALSE, rename.cov.cor = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # type type <- tolower(type)[1] # check type if (!type %in% c( "raw", "cor", "cor.bollen", "cor.bentler", "cor.eqs", "rmr", "srmr", "crmr", "normalized", "standardized", "standardized.mplus" )) { lav_msg_stop(gettext("unknown argument for type:"), dQuote(type)) } # if cor, choose 'default' if (type == "cor") { if (object@Options$mimic == "EQS") { type <- "cor.bentler" } else { type <- "cor.bollen" } } if (type == "cor.eqs") { type <- "cor.bentler" } if (type == "rmr") { type <- "raw" } if (type == "srmr") { type <- "cor.bentler" } if (type == "crmr") { type <- "cor.bollen" } # slots lavdata <- object@Data lavmodel <- object@Model # change options if multilevel (for now) if (lavdata@nlevels > 1L) { zstat <- se <- FALSE summary <- FALSE } # change options if categorical (for now) if (lavmodel@categorical) { # only if conditional.x = FALSE AND no continuous endogenous variables # -> only the simple setting where we only have thresholds and # correlations # As soon as we add continuous variables, we get means/variances too, # and we need to decide how WLS.obs/WLS.est/WLS.V will then map to # the output of lavInspect(fit, "implied") and # lavInspect(fit, "sampstat") if (lavmodel@conditional.x || length(unlist(lavmodel@num.idx)) > 0L) { zstat <- se <- FALSE summary <- FALSE summary.options <- list( se = FALSE, zstat = FALSE, pvalue = FALSE, unbiased = FALSE, unbiased.se = FALSE, unbiased.ci = FALSE, unbiased.ci.level = 0.90, unbiased.zstat = FALSE, unbiased.test.val = 0.05, unbiased.pvalue = FALSE ) } } # change options if conditional.x (for now) if (!lavmodel@categorical && lavmodel@conditional.x) { zstat <- se <- FALSE summary <- FALSE summary.options <- list( se = FALSE, zstat = FALSE, pvalue = FALSE, unbiased = FALSE, unbiased.se = FALSE, unbiased.ci = FALSE, unbiased.ci.level = 0.90, unbiased.zstat = FALSE, unbiased.test.val = 0.05, unbiased.pvalue = FALSE ) } # observed and fitted sample statistics obsList <- lav_object_inspect_sampstat(object, h1 = h1, add.labels = add.labels, add.class = add.class, drop.list.single.group = FALSE ) estList <- lav_object_inspect_implied(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = FALSE ) # blocks nblocks <- length(obsList) # pre-scale? if (type %in% c("cor.bentler", "cor.bollen")) { for (b in seq_len(nblocks)) { var.obs <- if (lavmodel@conditional.x) { diag(obsList[[b]][["res.cov"]]) } else { diag(obsList[[b]][["cov"]]) } var.est <- if (lavmodel@conditional.x) { diag(estList[[b]][["res.cov"]]) } else { diag(estList[[b]][["cov"]]) } # rescale obsList obsList[[b]] <- lav_residuals_rescale(x = obsList[[b]], diag.cov = var.obs) # rescale estList if (type == "cor.bentler") { # use obsList estList[[b]] <- lav_residuals_rescale(x = estList[[b]], diag.cov = var.obs) } else if (type == "cor.bollen") { # use estList for COV only estList[[b]] <- lav_residuals_rescale( x = estList[[b]], diag.cov = var.est, diag.cov2 = var.obs ) } } } # compute residuals: (observed - implied) resList <- vector("list", length = nblocks) for (b in seq_len(nblocks)) { resList[[b]] <- lapply(seq_len(length(obsList[[b]])), FUN = function(el) { obsList[[b]][[el]] - estList[[b]][[el]] } ) # always name the elements, even if add.labels = FALSE NAMES <- names(obsList[[b]]) names(resList[[b]]) <- NAMES } # do we need seList? if (se || zstat) { seList <- lav_residuals_se(object, type = type, z.type = "standardized", h1.acov = h1.acov, add.class = add.class, add.labels = add.labels ) } else if (type %in% c("normalized", "standardized", "standardized.mplus")) { seList <- lav_residuals_se(object, type = "raw", z.type = type, h1.acov = h1.acov, add.class = add.class, add.labels = add.labels ) } else { seList <- NULL } # normalize/standardize? if (type %in% c("normalized", "standardized", "standardized.mplus")) { for (b in seq_len(nblocks)) { if (add.labels) { NAMES <- names(resList[[b]]) } resList[[b]] <- lapply(seq_len(length(resList[[b]])), FUN = function(el) { A <- resList[[b]][[el]] B <- seList[[b]][[el]] near.zero.idx <- which(abs(A) < 1e-05) if (length(near.zero.idx) > 0L) { B[near.zero.idx] <- 1 } A / B } ) if (add.labels) { names(resList[[b]]) <- NAMES } } } # add se resList.orig <- resList if (se) { for (b in seq_len(nblocks)) { NAMES.res <- names(resList[[b]]) NAMES.se <- paste0(NAMES.res, ".se") resList[[b]] <- c(resList[[b]], seList[[b]]) names(resList[[b]]) <- c(NAMES.res, NAMES.se) } } # add zstat if (zstat) { for (b in seq_len(nblocks)) { NAMES.res <- names(resList[[b]]) NAMES.z <- paste0(names(resList.orig[[b]]), ".z") tmp <- lapply(seq_len(length(resList.orig[[b]])), FUN = function(el) { A <- resList.orig[[b]][[el]] B <- seList[[b]][[el]] # NOTE: which threshold should we use? # used to be 1e-05 # changed to 1e-04 in 0.6-4 near.zero.idx <- which(abs(A) < 1e-04) if (length(near.zero.idx) > 0L) { # B[near.zero.idx] <- as.numeric(NA) B[near.zero.idx] <- 1.0 } A / B } ) resList[[b]] <- c(resList[[b]], tmp) names(resList[[b]]) <- c(NAMES.res, NAMES.z) } } # add summary statistics (rms, mabs) if (summary) { args <- c( list( object = object, type = type, h1.acov = h1.acov, add.class = add.class, custom.rmr = custom.rmr ), summary.options ) sumStat <- do.call("lav_residuals_summary", args) for (b in seq_len(nblocks)) { NAMES <- names(resList[[b]]) resList[[b]] <- c(resList[[b]], list(sumStat[[b]][[1]])) # only 1 NAMES <- c(NAMES, "summary") names(resList[[b]]) <- NAMES } } # last: add type if (add.type) { for (b in seq_len(nblocks)) { NAMES <- names(resList[[b]]) resList[[b]] <- c(type, resList[[b]]) NAMES <- c("type", NAMES) names(resList[[b]]) <- NAMES } } # optional: rename 'cov' to 'cor' (if type = "cor") if (rename.cov.cor && type %in% c("cor.bentler", "cor.bollen")) { for (b in seq_len(nblocks)) { NAMES <- names(resList[[b]]) NAMES <- gsub("cov", "cor", NAMES) names(resList[[b]]) <- NAMES } } # output OUT <- resList if (nblocks == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { if (lavdata@nlevels == 1L && length(lavdata@group.label) > 0L) { names(OUT) <- unlist(lavdata@group.label) } else if (lavdata@nlevels > 1L && length(lavdata@group.label) == 0L) { names(OUT) <- lavdata@level.label } } OUT } # return ACOV as list per group lav_residuals_acov <- function(object, type = "raw", z.type = "standardized", h1.acov = "unstructured") { # check type if (z.type %in% c("normalized", "standardized.mplus") && type != "raw") { lav_msg_stop(gettextf( "z.type = %1$s can only be used with type = %2$s", dQuote(z.type), dQuote("raw"))) } # slots lavdata <- object@Data lavmodel <- object@Model lavsamplestats <- object@SampleStats # return list per group ACOV.res <- vector("list", length = lavdata@ngroups) # compute ACOV for observed h1 sample statistics (ACOV == Gamma/N) if (!is.null(lavsamplestats@NACOV[[1]])) { NACOV.obs <- lavsamplestats@NACOV # if this changes, tag @TDJorgensen in commit message ACOV.obs <- lapply(NACOV.obs, function(x) x / lavsamplestats@ntotal) } else { ACOV.obs <- lav_model_h1_acov( lavobject = object, h1.information = h1.acov ) } # shortcut for normalized if (z.type == "normalized") { ACOV.res <- ACOV.obs return(ACOV.res) } else { if (z.type == "standardized") { A1 <- lav_model_h1_information(object) if (lavmodel@estimator == "DWLS" || lavmodel@estimator == "ULS") { # A1 is diagonal matrix A1 <- lapply(A1, diag) } if (type %in% c("cor.bentler", "cor.bollen")) { sampstat <- lavTech(object, "sampstat") } } else if (z.type == "standardized.mplus") { VCOV <- lavTech(object, "vcov") } DELTA <- lavTech(object, "delta") } # for each group, compute ACOV for (g in seq_len(lavdata@ngroups)) { # group weight gw <- object@SampleStats@nobs[[g]] / object@SampleStats@ntotal # if this changes, tag @TDJorgensen in commit message if (z.type == "standardized.mplus") { # simplified formula # also used by LISREL? # see https://www.statmodel.com/download/StandardizedResiduals.pdf ACOV.est.g <- DELTA[[g]] %*% VCOV %*% t(DELTA[[g]]) ACOV.res[[g]] <- ACOV.obs[[g]] - ACOV.est.g } else if (z.type == "standardized") { # see Ogasawara (2001) using Bentler & Dijkstra (1985) eq 1.7.4 # NVarCov, but always 'not' robust # # new in 0.6-6: to ensure Q is a projection matrix, we # force observed.information = "h1" # (only needed if information is observed) this.options <- object@Options this.options$observed.information[1] <- "h1" A0.g.inv <- lav_model_information( lavmodel = lavmodel, lavsamplestats = object@SampleStats, lavdata = lavdata, lavcache = object@Cache, lavimplied = object@implied, lavh1 = object@h1, lavoptions = this.options, extra = FALSE, augmented = TRUE, inverted = TRUE, use.ginv = TRUE ) ACOV.est.g <- gw * (DELTA[[g]] %*% A0.g.inv %*% t(DELTA[[g]])) Q <- diag(nrow = nrow(ACOV.est.g)) - ACOV.est.g %*% A1[[g]] ACOV.res[[g]] <- Q %*% ACOV.obs[[g]] %*% t(Q) # correct ACOV.res for type = "cor.bentler" or type = "cor.bollen" if (type == "cor.bentler") { if (lavmodel@categorical) { if (lavmodel@conditional.x || length(unlist(lavmodel@num.idx)) > 0L) { lav_msg_stop(gettext( "SE for cor.bentler not available (yet) if categorical = TRUE, and conditional.x = TRUE OR some endogenous variables are continuous")) } else { # nothing to do, as we already are in correlation metric } } else { # Ogasawara (2001), eq (13), or # Maydeu-Olivares (2017), eq (16) COV <- if (lavmodel@conditional.x) { sampstat[[g]][["res.cov"]] } else { sampstat[[g]][["cov"]] } SS <- 1 / sqrt(diag(COV)) tmp <- lav_matrix_vech(tcrossprod(SS)) G.inv.sqrt <- diag(tmp, nrow = length(tmp)) if (lavmodel@meanstructure) { GG <- lav_matrix_bdiag( diag(SS, nrow = length(SS)), G.inv.sqrt ) } else { GG <- G.inv.sqrt } ACOV.res[[g]] <- GG %*% ACOV.res[[g]] %*% GG } # continuous } else if (type == "cor.bollen") { if (lavmodel@categorical) { if (lavmodel@conditional.x || length(unlist(lavmodel@num.idx)) > 0L) { lav_msg_stop(gettext( "SE for cor.bentler not available (yet) if categorical = TRUE, and conditional.x = TRUE OR some endogenous variables are continuous")) } else { # nothing to do, as we already are in correlation metric } } else { # here we use the Maydeu-Olivares (2017) approach, see eq 17 COV <- if (lavmodel@conditional.x) { sampstat[[g]][["res.cov"]] } else { sampstat[[g]][["cov"]] } F1 <- lav_deriv_cov2corB(COV) if (lavmodel@meanstructure) { SS <- 1 / sqrt(diag(COV)) FF <- lav_matrix_bdiag(diag(SS, nrow = length(SS)), F1) } else { FF <- F1 } ACOV.res[[g]] <- FF %*% ACOV.res[[g]] %*% t(FF) } # continuous } # cor.bollen } # z.type = "standardized" } # g ACOV.res } # return resList with 'se' values for each residual lav_residuals_se <- function(object, type = "raw", z.type = "standardized", h1.acov = "unstructured", add.class = FALSE, add.labels = FALSE) { # slots lavdata <- object@Data lavmodel <- object@Model lavpta <- object@pta # return list per group seList <- vector("list", length = lavdata@ngroups) # get ACOV per group ACOV.res <- lav_residuals_acov( object = object, type = type, z.type = z.type, h1.acov = h1.acov ) # labels if (add.labels) { ov.names <- object@pta$vnames$ov ov.names.res <- object@pta$vnames$ov.nox ov.names.x <- object@pta$vnames$ov.x } # for each group, compute 'se' values, and fill list for (g in seq_len(lavdata@ngroups)) { nvar <- object@pta$nvar[[g]] # block or group-based? diag.ACOV <- diag(ACOV.res[[g]]) # take care of negative, or non-finite diag.ACOV elements diag.ACOV[!is.finite(diag.ACOV)] <- NA diag.ACOV[diag.ACOV < 0] <- NA # categorical if (lavmodel@categorical) { if (lavmodel@conditional.x || length(unlist(lavmodel@num.idx)) > 0L) { lav_msg_stop(gettext("not ready yet!")) } # COR nth <- length(lavmodel@th.idx[[g]]) tmp <- sqrt(diag.ACOV[-(1:nth)]) cov.se <- lav_matrix_vech_reverse(tmp, diagonal = FALSE) # MEAN mean.se <- rep(as.numeric(NA), nth) # TH th.se <- sqrt(diag.ACOV[1:nth]) if (add.class) { class(cov.se) <- c("lavaan.matrix.symmetric", "matrix") class(mean.se) <- c("lavaan.vector", "numeric") class(th.se) <- c("lavaan.vector", "numeric") } if (add.labels) { rownames(cov.se) <- colnames(cov.se) <- ov.names[[g]] names(mean.se) <- ov.names[[g]] names(th.se) <- lavpta$vnames$th.mean[[g]] } seList[[g]] <- list( cov.se = cov.se, mean.se = mean.se, th.se = th.se ) # continuous -- single level } else if (lavdata@nlevels == 1L) { if (lavmodel@conditional.x) { lav_msg_stop(gettext("not ready yet")) } else { if (lavmodel@meanstructure) { tmp <- sqrt(diag.ACOV[-(1:nvar)]) cov.se <- lav_matrix_vech_reverse(tmp) mean.se <- sqrt(diag.ACOV[1:nvar]) if (add.class) { class(cov.se) <- c("lavaan.matrix.symmetric", "matrix") class(mean.se) <- c("lavaan.vector", "numeric") } if (add.labels) { rownames(cov.se) <- colnames(cov.se) <- ov.names[[g]] names(mean.se) <- ov.names[[g]] } seList[[g]] <- list(cov.se = cov.se, mean.se = mean.se) } else { cov.se <- lav_matrix_vech_reverse(sqrt(diag.ACOV)) if (add.class) { class(cov.se) <- c("lavaan.matrix.symmetric", "matrix") } if (add.labels) { rownames(cov.se) <- colnames(cov.se) <- ov.names[[g]] } seList[[g]] <- list(cov.se = cov.se) } } # continuous -- multilevel } else if (lavdata@nlevels > 1L) { lav_msg_stop(gettext("not ready yet")) } } # g seList } # return summary statistics as list per group lav_residuals_summary <- function(object, type = c("rmr", "srmr", "crmr"), h1.acov = "unstructured", custom.rmr = NULL, se = FALSE, zstat = FALSE, pvalue = FALSE, unbiased = FALSE, unbiased.se = FALSE, unbiased.ci = FALSE, unbiased.ci.level = 0.90, unbiased.zstat = FALSE, unbiased.test.val = 0.05, unbiased.pvalue = FALSE, add.class = FALSE) { # arguments if (length(custom.rmr)) { if (!is.list(custom.rmr)) lav_msg_stop(gettext("custom.rmr must be a list")) ## Each custom (S/C)RMR must have a unique name customNAMES <- names(custom.rmr) if (is.null(customNAMES)) lav_msg_stop(gettext( "custom.rmr list must have names")) if (length(unique(customNAMES)) < length(custom.rmr)) { lav_msg_stop(gettext( "custom.rmr must have a unique name for each summary")) } ## Each list must contain a list consisting of $cov and/or $mean (no $th yet) for (i in seq_along(custom.rmr)) { if (!is.list(custom.rmr[[i]])) { lav_msg_stop(gettext("Each element in custom.rmr must be a list")) } if (is.null(names(custom.rmr[[i]]))) { lav_msg_stop(gettext("The list in custom.rmr must have names")) } if (!all(names(custom.rmr[[i]]) %in% c("cov", "mean"))) { lav_msg_stop(gettext( 'Elements in custom.rmr must be names "cov" and/or "mean"')) } ## below, verify dimensions match rmsList.g } # FIXME: blocks can have unique models, need another layer of lists # between custom summaries and moments } else { customNAMES <- NULL } if (pvalue) { zstat <- TRUE } if (zstat) { se <- TRUE } if (unbiased.pvalue) { unbiased.zstat <- TRUE } if (unbiased.zstat) { unbiased.se <- TRUE } if (!all(type %in% c( "rmr", "srmr", "crmr", "raw", "cor.bentler", "cor.bollen" ))) { lav_msg_stop(gettext("unknown type:"), dQuote(type)) } # change type name idx <- which(type == "raw") if (length(idx) > 0L) { type[idx] <- "rmr" } idx <- which(type == "cor.bentler") if (length(idx) > 0L) { type[idx] <- "srmr" } idx <- which(type == "cor.bollen") if (length(idx) > 0L) { type[idx] <- "crmr" } # slots lavdata <- object@Data lavmodel <- object@Model # fixed.x/conditional.x fixed.x <- lavmodel@fixed.x conditional.x <- lavmodel@conditional.x rmrFlag <- srmrFlag <- crmrFlag <- FALSE if ("rmr" %in% type || "raw" %in% type) { # FIXME: recursive call to lav_residuals() is summary = TRUE!! rmrList <- lav_residuals(object = object, type = "raw") if (se || unbiased) { rmrList.se <- lav_residuals_acov( object = object, type = "raw", z.type = "standardized", h1.acov = "unstructured" ) } } if ("srmr" %in% type || "cor.bentler" %in% type || "cor" %in% type) { srmrList <- lav_residuals(object = object, type = "cor.bentler") if (se || unbiased) { srmrList.se <- lav_residuals_acov( object = object, type = "cor.bentler", z.type = "standardized", h1.acov = "unstructured" ) } } if ("crmr" %in% type || "cor.bollen" %in% type) { crmrList <- lav_residuals(object = object, type = "cor.bollen") if (se || unbiased) { crmrList.se <- lav_residuals_acov( object = object, type = "cor.bollen", z.type = "standardized", h1.acov = "unstructured" ) } } # return list per group sumStat <- vector("list", length = lavdata@ngroups) # for each group, compute ACOV for (g in seq_len(lavdata@ngroups)) { nvar <- object@pta$nvar[[g]] # block or group-based? # categorical single level if (lavdata@nlevels == 1L && lavmodel@categorical) { if ((se || unbiased) && (conditional.x || length(unlist(lavmodel@num.idx)) > 0L)) { lav_msg_stop(gettext("not ready yet")) } else { # remove fixed.x elements: # seems like a good idea, but nobody likes it # nvar.x <- pstar.x <- 0L # if(lavmodel@fixed.x) { # nvar.x <- lavmodel@nexo[g] # pstar.x <- nvar.x * (nvar.x - 1) / 2 # note '-' # } OUT <- vector("list", length(type)) names(OUT) <- type for (typ in seq_len(length(type))) { if (type[typ] == "rmr") { rmsList.g <- rmrList[[g]] if (se || unbiased) { rmsList.se.g <- rmrList.se[[g]] } } else if (type[typ] == "srmr") { rmsList.g <- srmrList[[g]] if (se || unbiased) { rmsList.se.g <- srmrList.se[[g]] } } else if (type[typ] == "crmr") { rmsList.g <- crmrList[[g]] if (se || unbiased) { rmsList.se.g <- crmrList.se[[g]] } } # COR nth <- length(lavmodel@th.idx[[g]]) if (conditional.x) { STATS <- lav_matrix_vech(rmsList.g[["res.cov"]], diagonal = FALSE ) } else { STATS <- lav_matrix_vech(rmsList.g[["cov"]], diagonal = FALSE ) } # should pstar be p*(p+1)/2 or p*(p-1)/2 # we use the first for SRMR and the latter for CRMR if (type[typ] == "crmr") { pstar <- length(STATS) } else { pstar <- length(STATS) + nvar } ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g[-seq_len(nth), -seq_len(nth), drop = FALSE ] } RMS.COR <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # THRESHOLDS if (conditional.x) { STATS <- rmsList.g[["res.th"]] } else { STATS <- rmsList.g[["th"]] } pstar <- length(STATS) ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g[seq_len(nth), seq_len(nth), drop = FALSE ] } RMS.TH <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # MEAN # STATS <- rmsList.g[["mean"]] STATS <- numeric(0L) pstar <- length(STATS) ACOV <- NULL if (se || unbiased) { # TODO: extract from rmsList.se.g } RMS.MEAN <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # VAR (not ready yet) # STATS <- diag(rmsList.g[["cov"]])[lavmodel@num.idx[[g]]] STATS <- numeric(0L) pstar <- length(STATS) ACOV <- NULL if (se || unbiased) { # TODO: extract from rmsList.se.g } RMS.VAR <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # TOTAL -- FIXME: for conditional.x .... if (conditional.x) { STATS <- c( lav_matrix_vech(rmsList.g[["res.cov"]], diagonal = FALSE ), rmsList.g[["res.th"]] ) } else { STATS <- c( lav_matrix_vech(rmsList.g[["cov"]], diagonal = FALSE ), rmsList.g[["th"]] ) # rmsList.g[["mean"]], # diag(rmsList.g[["cov"]])[lavmodel@num.idx[[g]]]) } # should pstar be p*(p+1)/2 or p*(p-1)/2 for COV/COR? # we use the first for SRMR and the latter for CRMR if (type[typ] == "crmr") { pstar <- length(STATS) } else { pstar <- length(STATS) + nvar } # if(lavmodel@fixed.x) { # pstar <- pstar - pstar.x # } ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g } RMS.TOTAL <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) TABLE <- as.data.frame(cbind( RMS.COR, RMS.TH, # RMS.MEAN, # RMS.VAR, RMS.TOTAL )) # colnames(TABLE) <- c("cor", "thresholds", "mean", # "var", "total") colnames(TABLE) <- c("cor", "thresholds", "total") if (add.class) { class(TABLE) <- c("lavaan.data.frame", "data.frame") } OUT[[typ]] <- TABLE } # type } # not conditional.x or mixed cat/con # continuous -- single level } else if (lavdata@nlevels == 1L) { if ((se || unbiased) && conditional.x) { lav_msg_stop(gettext("not ready yet")) } else { # nvar.x <- pstar.x <- 0L # if(lavmodel@fixed.x) { # nvar.x <- lavmodel@nexo[g] # pstar.x <- nvar.x * (nvar.x + 1) / 2 # } OUT <- vector("list", length(type)) names(OUT) <- type for (typ in seq_len(length(type))) { if (type[typ] == "rmr") { rmsList.g <- rmrList[[g]] if (se || unbiased) { rmsList.se.g <- rmrList.se[[g]] } } else if (type[typ] == "srmr") { rmsList.g <- srmrList[[g]] if (se || unbiased) { rmsList.se.g <- srmrList.se[[g]] } } else if (type[typ] == "crmr") { rmsList.g <- crmrList[[g]] if (se || unbiased) { rmsList.se.g <- crmrList.se[[g]] } } # COV if (conditional.x) { STATS <- lav_matrix_vech(rmsList.g[["res.cov"]]) } else { STATS <- lav_matrix_vech(rmsList.g[["cov"]]) } # pstar <- ( length(STATS) - pstar.x ) pstar <- length(STATS) if (type[typ] == "crmr") { # pstar <- pstar - ( nvar - nvar.x ) pstar <- pstar - nvar } ACOV <- NULL if (se || unbiased) { ACOV <- if (lavmodel@meanstructure) { rmsList.se.g[-seq_len(nvar), -seq_len(nvar), drop = FALSE ] } else { rmsList.se.g } } RMS.COV <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # MEAN if (lavmodel@meanstructure) { if (conditional.x) { STATS <- rmsList.g[["res.int"]] } else { STATS <- rmsList.g[["mean"]] } # pstar <- ( length(STATS) - nvar.x ) pstar <- length(STATS) ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g[seq_len(nvar), seq_len(nvar), drop = FALSE ] } RMS.MEAN <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) } # TOTAL if (lavmodel@meanstructure) { if (conditional.x) { STATS <- c( rmsList.g[["res.int"]], lav_matrix_vech(rmsList.g[["res.cov"]]) ) } else { STATS <- c( rmsList.g[["mean"]], lav_matrix_vech(rmsList.g[["cov"]]) ) } # pstar <- ( length(STATS) - ( pstar.x + nvar.x) ) pstar <- length(STATS) if (type[typ] == "crmr") { # pstar <- pstar - ( nvar - nvar.x ) pstar <- pstar - nvar } ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g } RMS.TOTAL <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) } # CUSTOM if (length(custom.rmr)) { if (lavmodel@fixed.x && !lavmodel@conditional.x) { ## save exogenous-variable indices, use to remove or set ## FALSE any moments that cannot have nonzero residuals x.idx <- which(rownames(rmsList.g$cov) %in% object@Data@ov.names.x[[g]]) } RMS.CUSTOM.LIST <- vector("list", length(customNAMES)) for (cus in customNAMES) { ## in case there is no meanstructure STATS <- NULL ACOV.idx <- NULL # MEANS? if (lavmodel@meanstructure) { if ("mean" %in% names(custom.rmr[[cus]])) { ## if logical, save numeric indices if (is.logical(custom.rmr[[cus]]$mean)) { ## check length if (length(custom.rmr[[cus]]$mean) != length(rmsList.g[["mean"]])) { lav_msg_stop(gettextf("length(custom.rmr$%s$mean) must match length(lavResiduals(fit)$mean)", cus)) } ACOV.idx <- which(custom.rmr[[cus]]$mean) if (lavmodel@fixed.x && !lavmodel@conditional.x) { ACOV.idx[x.idx] <- FALSE } } else if (!is.numeric(custom.rmr[[cus]]$mean)) { lav_msg_stop(gettextf("custom.rmr$%s$mean must contain logical or numeric indices.", cus)) } else { ACOV.idx <- custom.rmr[[cus]]$mean if (lavmodel@fixed.x && !lavmodel@conditional.x) { ACOV.idx <- setdiff(ACOV.idx, x.idx) } ACOV.idx <- ACOV.idx[!is.na(ACOV.idx)] # necessary? if (max(ACOV.idx) > length(rmsList.g[["mean"]])) { lav_msg_stop(gettextf( "custom.rmr$%1$s$mean[%2$s] is an out-of-bounds index", cus, which.max(ACOV.idx)) ) } } STATS <- rmsList.g[["mean"]][ACOV.idx] } } # (CO)VARIANCES? if ("cov" %in% names(custom.rmr[[cus]])) { ## if numeric, create a logical matrix to obtain ## ACOV.idx and check for x.idx if (is.numeric(custom.rmr[[cus]]$cov)) { cusCOV <- rmsList.g[["cov"]] == "start with all FALSE" ## matrix of row/column indices? if (length(dim(custom.rmr[[cus]]$cov))) { if (max(custom.rmr[[cus]]$cov[, 1:2] > nrow(rmsList.g[["cov"]]))) { lav_msg_stop(gettextf( "numeric indices in custom.rmr$%1$s$cov cannot exceed %2$s", cus, nrow(rmsList.g[["cov"]]))) } for (RR in 1:nrow(custom.rmr[[cus]]$cov)) { cusCOV[ custom.rmr[[cus]]$cov[RR, 1], custom.rmr[[cus]]$cov[RR, 2] ] <- TRUE } } else { ## numeric-vector indices if (max(custom.rmr[[cus]]$cov > length(rmsList.g[["cov"]]))) { lav_msg_stop(gettextf( "numeric indices in custom.rmr$%1$s$cov cannot exceed %2$s", cus, length(rmsList.g[["cov"]]))) } cusCOV[custom.rmr[[cus]]$cov] <- TRUE } ## numeric indices no longer needed, use logical custom.rmr[[cus]]$cov <- cusCOV } else if (!is.logical(custom.rmr[[cus]]$cov)) { lav_msg_stop(gettextf( "custom.rmr$%s$cov must be a logical square matrix or a numeric matrix of (row/column) indices.", cus)) } ## check dimensions if (!all(dim(custom.rmr[[cus]]$cov) == dim(rmsList.g[["cov"]]))) { lav_msg_stop(gettextf( "dim(custom.rmr$%s$cov) must match dim(lavResiduals(fit)$cov)", cus)) } ## users can specify upper.tri or lower.tri indices custom.rmr[[cus]]$cov <- custom.rmr[[cus]]$cov | t(custom.rmr[[cus]]$cov) ## but ACOV refers to lower.tri indices custom.rmr[[cus]]$cov[upper.tri(custom.rmr[[cus]]$cov)] <- FALSE ## diagonal relevant? if (type[typ] == "crmr") diag(custom.rmr[[cus]]$cov) <- FALSE ## extract lower.tri indices vech.idx <- which(lav_matrix_vech(custom.rmr[[cus]]$cov)) ## add residuals to STATS, indices to ACOV.idx STATS <- c(STATS, lav_matrix_vech(rmsList.g[["cov"]])[vech.idx]) ACOV.idx <- c(ACOV.idx, vech.idx) } ## count residuals in summary (x.idx already removed) pstar <- length(STATS) ACOV <- NULL if (se || unbiased) { ACOV <- rmsList.se.g[ACOV.idx, ACOV.idx, drop = FALSE] } RMS.CUSTOM.LIST[[cus]] <- lav_residuals_summary_rms( STATS = STATS, ACOV = ACOV, se = se, zstat = zstat, pvalue = pvalue, unbiased = unbiased, unbiased.se = unbiased.se, unbiased.ci = unbiased.ci, unbiased.ci.level = unbiased.ci.level, unbiased.zstat = unbiased.zstat, unbiased.test.val = unbiased.test.val, unbiased.pvalue = unbiased.pvalue, pstar = pstar, type = type[typ] ) # FIXME: update for categorical } # cus RMS.CUSTOM <- do.call(rbind, RMS.CUSTOM.LIST) } else { RMS.CUSTOM <- NULL } if (lavmodel@meanstructure) { TABLE <- as.data.frame(cbind( RMS.COV, RMS.MEAN, RMS.TOTAL, RMS.CUSTOM )) colnames(TABLE) <- c( "cov", "mean", "total", customNAMES ) } else { TABLE <- as.data.frame(cbind(RMS.COV, RMS.CUSTOM)) colnames(TABLE) <- c("cov", customNAMES) } if (add.class) { class(TABLE) <- c("lavaan.data.frame", "data.frame") } OUT[[typ]] <- TABLE } # type } # continuous, single-level, unconditional # continuous -- multilevel } else if (lavdata@nlevels > 1L) { lav_msg_stop(gettext("not ready yet")) } sumStat[[g]] <- OUT } # g sumStat } lav_residuals_summary_rms <- function(STATS = NULL, ACOV = NULL, se = FALSE, level = 0.90, zstat = FALSE, pvalue = FALSE, unbiased = FALSE, unbiased.se = FALSE, unbiased.ci = FALSE, unbiased.ci.level = 0.90, unbiased.zstat = FALSE, unbiased.test.val = 0.05, unbiased.pvalue = FALSE, pstar = 0, type = "rms") { OUT <- vector("list", length = 0L) # covariance matrix if (length(STATS) > 0L) { rms <- sqrt(sum(STATS * STATS) / pstar) } else { rms <- 0 se <- unbiased <- zstat <- FALSE } # default is NULL rms.se <- rms.z <- rms.pvalue <- NULL urms <- urms.se <- urms.z <- urms.pvalue <- NULL urms.ci.lower <- urms.ci.upper <- NULL if (!unbiased.zstat) { unbiased.test.val <- NULL } if (se || unbiased) { TR2 <- sum(diag(ACOV %*% ACOV)) TR1 <- sum(diag(ACOV)) if (se) { rms.avar <- TR2 / (TR1 * 2 * pstar) if (!is.finite(rms.avar) || rms.avar < .Machine$double.eps) { rms.se <- as.numeric(NA) } else { rms.se <- sqrt(rms.avar) } } } if (zstat) { E.rms <- (sqrt(TR1 / pstar) * (4 * TR1 * TR1 - TR2) / (4 * TR1 * TR1)) rms.z <- max((rms - E.rms), 0) / rms.se if (pvalue) { rms.pvalue <- 1 - pnorm(rms.z) } } if (unbiased) { T.cov <- as.numeric(crossprod(STATS)) eVe <- as.numeric(t(STATS) %*% ACOV %*% STATS) k.cov <- 1 - (TR2 + 2 * eVe) / (4 * T.cov * T.cov) urms <- (1 / k.cov * sqrt(max((T.cov - TR1), 0) / pstar)) if (unbiased.se) { urms.avar <- (1 / (k.cov * k.cov) * (TR2 + 2 * eVe) / (2 * pstar * T.cov)) if (!is.finite(urms.avar) || urms.avar < .Machine$double.eps) { urms.se <- as.numeric(NA) } else { urms.se <- sqrt(urms.avar) } if (unbiased.ci) { a <- (1 - unbiased.ci.level) / 2 a <- c(a, 1 - a) fac <- stats::qnorm(a) urms.ci.lower <- urms + urms.se * fac[1] urms.ci.upper <- urms + urms.se * fac[2] } if (unbiased.zstat) { urms.z <- (urms - unbiased.test.val) / urms.se if (unbiased.pvalue) { urms.pvalue <- 1 - pnorm(urms.z) } } } } # labels if (type == "rmr") { OUT <- list( rmr = rms, rmr.se = rms.se, rmr.exactfit.z = rms.z, rmr.exactfit.pvalue = rms.pvalue, urmr = urms, urmr.se = urms.se, urmr.ci.lower = urms.ci.lower, urmr.ci.upper = urms.ci.upper, urmr.closefit.h0.value = unbiased.test.val, urmr.closefit.z = urms.z, urmr.closefit.pvalue = urms.pvalue ) } else if (type == "srmr") { OUT <- list( srmr = rms, srmr.se = rms.se, srmr.exactfit.z = rms.z, srmr.exactfit.pvalue = rms.pvalue, usrmr = urms, usrmr.se = urms.se, usrmr.ci.lower = urms.ci.lower, usrmr.ci.upper = urms.ci.upper, usrmr.closefit.h0.value = unbiased.test.val, usrmr.closefit.z = urms.z, usrmr.closefit.pvalue = urms.pvalue ) } else if (type == "crmr") { OUT <- list( crmr = rms, crmr.se = rms.se, crmr.exactfit.z = rms.z, crmr.exactfit.pvalue = rms.pvalue, ucrmr = urms, ucrmr.se = urms.se, ucrmr.ci.lower = urms.ci.lower, ucrmr.ci.upper = urms.ci.upper, ucrmr.closefit.h0.value = unbiased.test.val, ucrmr.closefit.z = urms.z, ucrmr.closefit.pvalue = urms.pvalue ) } unlist(OUT) } # generate summary statistics for the residuals lav_residuals_summary_old <- function(resList = NULL, add.class = FALSE, add.labels = FALSE) { # per block nblocks <- length(resList) for (b in seq_len(nblocks)) { # create new list, including with summary statistics interleaved x <- vector("list", length = 0L) nel <- length(resList[[b]]) NAMES <- names(resList[[b]]) for (el in seq_len(nel)) { EL <- resList[[b]][[el]] if (!is.null(NAMES)) { NAME <- NAMES[el] } if (is.character(EL)) { new.x <- list(EL) if (add.labels) { names(new.x) <- "type" } x <- c(x, new.x) } else if (is.matrix(EL) && isSymmetric(EL)) { tmp <- na.omit(lav_matrix_vech(EL)) rms <- sqrt(sum(tmp * tmp) / length(tmp)) mabs <- mean(abs(tmp)) tmp2 <- na.omit(lav_matrix_vech(EL, diagonal = FALSE)) rms.nodiag <- sqrt(sum(tmp2 * tmp2) / length(tmp2)) mabs.nodiag <- mean(abs(tmp2)) cov.summary <- c(rms, rms.nodiag, mabs, mabs.nodiag) if (add.labels) { names(cov.summary) <- c("rms", "rms.nodiag", "mabs", "mabs.nodiag") } if (add.class) { class(cov.summary) <- c("lavaan.vector", "numeric") } new.x <- list(EL, cov.summary) if (add.labels && !is.null(NAMES)) { names(new.x) <- c(NAME, paste0(NAME, ".summary")) } x <- c(x, new.x) } else { tmp <- na.omit(EL) rms <- sqrt(sum(tmp * tmp) / length(tmp)) mabs <- mean(abs(tmp)) mean.summary <- c(rms, mabs) if (add.labels) { names(mean.summary) <- c("rms", "mabs") } if (add.class) { class(mean.summary) <- c("lavaan.vector", "numeric") } new.x <- list(EL, mean.summary) if (add.labels && !is.null(NAMES)) { names(new.x) <- c(NAME, paste0(NAME, ".summary")) } x <- c(x, new.x) } } # nel # fill in block including summary statistics resList[[b]] <- x } # nblocks resList } # x is a list with sample statistics (eg output of inspect(fit, "sampstat") # y is another (possibly the same) list with sample statistics # # to avoid many 'NAs', we set the scale-factor to 1 # if the to-be-scaled value is < 1e-05 (in absolute value) lav_residuals_rescale <- function(x, diag.cov = NULL, diag.cov2 = NULL) { if (is.null(diag.cov2)) { diag.cov2 <- diag.cov } # make sure we can take the sqrt and invert diag.cov[!is.finite(diag.cov)] <- NA diag.cov[diag.cov < .Machine$double.eps] <- NA scale.cov <- tcrossprod(1 / sqrt(diag.cov)) # for the mean, we use diag.cov2 diag.cov2[!is.finite(diag.cov2)] <- NA diag.cov2[diag.cov2 < .Machine$double.eps] <- NA scale.mean <- 1 / sqrt(diag.cov2) # rescale cov if (!is.null(x[["cov"]])) { # catch (near) zero elements in x$cov near.zero.idx <- which(abs(x[["cov"]]) < 1e-05) scale.cov[near.zero.idx] <- 1 x[["cov"]][] <- x[["cov"]] * scale.cov } if (!is.null(x[["res.cov"]])) { # catch (near) zero elements in x$res.cov near.zero.idx <- which(abs(x[["res.cov"]]) < 1e-05) scale.cov[near.zero.idx] <- 1 x[["res.cov"]][] <- x[["res.cov"]] * scale.cov } # rescale int/mean if (!is.null(x[["res.int"]])) { # catch (near) zero elements in x$res.int near.zero.idx <- which(abs(x[["res.int"]]) < 1e-05) scale.mean[near.zero.idx] <- 1 x[["res.int"]] <- x[["res.int"]] * scale.mean } if (!is.null(x[["mean"]])) { # catch (near) zero elements in x$mean near.zero.idx <- which(abs(x[["mean"]]) < 1e-05) scale.mean[near.zero.idx] <- 1 x[["mean"]] <- x[["mean"]] * scale.mean } # FIXME: do something sensible for th, slopes, ... x } lavaan/R/lav_data.R0000644000176200001440000012052514627656441013637 0ustar liggesusers# # the lavData class describes how the data looks like # - do we have a full data frame, or only sample statistics? # (TODO: allow for patterns + freq, if data is categorical) # - variable type ("numeric", "ordered", ...) # - how many groups, how many observations, ... # - what about missing patterns? # # initial version: YR 14 April 2012 # YR 23 Feb 2017: blocks/levels/groups, but everything is group-based! # FIXME: if nlevels > 1L, and ngroups > 1L, we should check that # group is at the upper-level # YR 08 May 2019: sampling weights normalization -> different options # extract the data we need for this particular model lavData <- function(data = NULL, # data.frame group = NULL, # multiple groups? cluster = NULL, # clusters? ov.names = NULL, # variables in model ov.names.x = character(0), # exo variables ov.names.l = list(), # names per level ordered = NULL, # ordered variables sampling.weights = NULL, # sampling weights sample.cov = NULL, # sample covariance(s) sample.mean = NULL, # sample mean vector(s) sample.th = NULL, # sample thresholds sample.nobs = NULL, # sample nobs lavoptions = lavOptions(), # lavoptions allow.single.case = FALSE # for newdata in predict ) { # get info from lavoptions # group.labels group.label <- lavoptions$group.label if (is.null(group.label)) { group.label <- character(0L) } # level.labels level.label <- lavoptions$level.label if (is.null(level.label)) { level.label <- character(0L) } # block.labels block.label <- character(0L) if (length(group.label) > 0L && length(level.label) == 0L) { block.label <- group.label } else if (length(level.label) > 0L && length(group.label) == 0L) { block.label <- level.label } else if (length(group.label) > 0L && length(level.label) > 0L) { block.label <- paste(rep(group.label, each = length(level.label)), rep(level.label, times = length(group.label)), sep = "." ) } # std.ov? std.ov <- lavoptions$std.ov if (is.null(std.ov)) { std.ov <- FALSE } # missing? missing <- lavoptions$missing if (is.null(missing) || missing == "default") { missing <- "listwise" } # warn? if (allow.single.case) { # eg, in lavPredict current.warn <- lav_warn() if (lav_warn(FALSE)) on.exit(lav_warn(current.warn), TRUE) } # four scenarios: # 0) data is already a lavData object: do nothing # 1) data is full data.frame (or a matrix) # 2) data are sample statistics only # 3) no data at all # 1) full data if (!is.null(data)) { # catch lavaan/lavData objects if (inherits(data, "lavData")) { return(data) } else if (inherits(data, "lavaan")) { return(data@Data) } # catch matrix if (!is.data.frame(data)) { # is it a matrix? if (is.matrix(data)) { if (nrow(data) == ncol(data)) { # perhaps it is a covariance matrix? if (isSymmetric(data)) { lav_msg_warn( gettext("data argument looks like a covariance matrix; please use the sample.cov argument instead")) } } # or perhaps it is a data matrix? ### FIXME, we should avoid as.data.frame() and handle ### data matrices directly data <- as.data.frame(data, stringsAsFactors = FALSE) } else { lav_msg_stop(gettextf( "data= argument is not a data.frame, but of class ", sQuote(class(data)))) } } # no ov.names? if (is.null(ov.names)) { ov.names <- names(data) # remove group variable, if provided if (length(group) > 0L) { group.idx <- which(ov.names == group) ov.names <- ov.names[-group.idx] } # remove cluster variable, if provided if (length(cluster) > 0L) { cluster.idx <- which(ov.names == cluster) ov.names <- ov.names[-cluster.idx] } } lavData <- lav_data_full( data = data, group = group, cluster = cluster, group.label = group.label, level.label = level.label, block.label = block.label, ov.names = ov.names, ordered = ordered, sampling.weights = sampling.weights, sampling.weights.normalization = lavoptions$sampling.weights.normalization, ov.names.x = ov.names.x, ov.names.l = ov.names.l, std.ov = std.ov, missing = missing, allow.single.case = allow.single.case ) sample.cov <- NULL # not needed, but just in case } # 2) sample moments if (is.null(data) && !is.null(sample.cov)) { # for now: no levels!! nlevels <- 1L # we also need the number of observations (per group) if (is.null(sample.nobs)) { lav_msg_stop(gettext("please specify number of observations")) } # if a 'group' argument was provided, keep it -- new in 0.6-4 if (is.null(group)) { group <- character(0L) } else if (is.character(group)) { # nothing to do, just store it } else { lav_msg_stop(gettext("group argument should be a string")) } # list? if (is.list(sample.cov)) { # multiple groups, multiple cov matrices if (!is.null(sample.mean)) { stopifnot(length(sample.mean) == length(sample.cov)) } if (!is.null(sample.th)) { stopifnot(length(sample.th) == length(sample.cov)) } # multiple groups, multiple cov matrices ngroups <- length(sample.cov) LABEL <- names(sample.cov) if (is.null(group.label) || length(group.label) == 0L) { if (is.null(LABEL)) { group.label <- paste("Group ", 1:ngroups, sep = "") } else { group.label <- LABEL } } else { if (is.null(LABEL)) { stopifnot(length(group.label) == ngroups) } else { # FIXME!!!! # check if they match } } } else { ngroups <- 1L group.label <- character(0) if (!is.matrix(sample.cov)) { lav_msg_stop(gettext( "sample.cov must be a matrix or a list of matrices")) } sample.cov <- list(sample.cov) } # get ov.names if (is.null(ov.names)) { ov.names <- lapply(sample.cov, row.names) } else if (!is.list(ov.names)) { # duplicate ov.names for each group tmp <- ov.names ov.names <- vector("list", length = ngroups) ov.names[1:ngroups] <- list(tmp) } else { if (length(ov.names) != ngroups) { lav_msg_stop(gettextf( "ov.names assumes %1$s groups; data contains %2$s groups", length(ov.names), ngroups)) } # nothing to do } # handle ov.names.x if (!is.list(ov.names.x)) { tmp <- ov.names.x ov.names.x <- vector("list", length = ngroups) ov.names.x[1:ngroups] <- list(tmp) } else { if (length(ov.names.x) != ngroups) { lav_msg_stop(gettextf( "ov.names.x assumes %1$s groups; data contains %2$s groups", length(ov.names.x), ngroups)) } } ov <- list() ov$name <- unique(unlist(c(ov.names, ov.names.x))) nvar <- length(ov$name) ov$idx <- rep(NA, nvar) ov$nobs <- rep(sum(unlist(sample.nobs)), nvar) ov$type <- rep("numeric", nvar) ov$nlev <- rep(0, nvar) # check for categorical if (!is.null(sample.th)) { th.idx <- attr(sample.th, "th.idx") if (is.list(th.idx)) { th.idx <- th.idx[[1]] ## FIRST group only (assuming same ths!) } if (any(th.idx > 0)) { TAB <- table(th.idx[th.idx > 0]) ord.idx <- as.numeric(names(TAB)) nlev <- as.integer(unname(TAB) + 1) ov$type[ord.idx] <- "ordered" ov$nlev[ord.idx] <- nlev } } # if std.ov = TRUE, give a warning (suggested by Peter Westfall) if (std.ov) { lav_msg_warn(gettext( "std.ov argument is ignored if only sample statistics are provided.")) } # check variances (new in 0.6-7) for (g in seq_len(ngroups)) { VAR <- diag(sample.cov[[g]]) # 1. finite? if (!all(is.finite(VAR))) { lav_msg_stop(gettext( "at least one variance in the sample covariance matrix is not finite.")) } # 2. near zero (or negative)? if (any(VAR < .Machine$double.eps)) { lav_msg_stop( gettext("at least one variance in the sample covariance matrix is (near) zero or negative.")) } # 3. very large? max.var <- max(VAR) if (max.var > 1000000) { lav_msg_warn( gettext("some observed variances in the sample covariance matrix are larger than 1000000.")) } } # block.labels block.label <- character(0L) if (length(group.label) > 0L && length(level.label) == 0L) { block.label <- group.label } else if (length(level.label) > 0L && length(group.label) == 0L) { block.label <- level.label } else if (length(group.label) > 0L && length(level.label) > 0L) { block.label <- paste(rep(group.label, each = length(level.label)), rep(level.label, times = length(group.label)), sep = "." ) } # construct lavData object lavData <- new("lavData", data.type = "moment", ngroups = ngroups, group = group, nlevels = 1L, # for now cluster = character(0L), group.label = group.label, level.label = character(0L), block.label = block.label, nobs = as.list(sample.nobs), norig = as.list(sample.nobs), ov.names = ov.names, ov.names.x = ov.names.x, ov.names.l = ov.names.l, ordered = as.character(ordered), weights = vector("list", length = ngroups), sampling.weights = character(0L), ov = ov, std.ov = FALSE, missing = "listwise", case.idx = vector("list", length = ngroups), Mp = vector("list", length = ngroups), Rp = vector("list", length = ngroups), Lp = vector("list", length = ngroups), X = vector("list", length = ngroups), eXo = vector("list", length = ngroups) ) } # 3) data.type = "none": both data and sample.cov are NULL if (is.null(data) && is.null(sample.cov)) { # clustered/multilevel? --> ov.names.l should be filled in if (length(ov.names.l) > 0L) { nlevels <- length(ov.names.l[[1]]) # we assume the same number # of levels in each group! # do we have a cluster argument? if not, create one if (is.null(cluster)) { if (nlevels == 2L) { cluster <- "cluster" } else { cluster <- paste0("cluster", seq_len(nlevels - 1L)) } } # default level.labels if (length(level.label) == 0L) { level.label <- c("within", cluster) } else { # check if length(level.label) = 1 + length(cluster) if (length(level.label) != length(cluster) + 1L) { lav_msg_stop(gettext("length(level.label) != length(cluster) + 1L")) } # nothing to do } } else { nlevels <- 1L cluster <- character(0L) level.label <- character(0L) } # ngroups: ov.names (when group: is used), or sample.nobs if (is.null(ov.names)) { lav_msg_warn(gettext("ov.names is NULL")) ov.names <- character(0L) if (is.null(sample.nobs)) { ngroups <- 1L sample.nobs <- rep(list(0L), ngroups) } else { sample.nobs <- as.list(sample.nobs) ngroups <- length(sample.nobs) } } else if (!is.list(ov.names)) { if (is.null(sample.nobs)) { ngroups <- 1L sample.nobs <- rep(list(0L), ngroups) } else { sample.nobs <- as.list(sample.nobs) ngroups <- length(sample.nobs) } ov.names <- rep(list(ov.names), ngroups) } else if (is.list(ov.names)) { ngroups <- length(ov.names) if (is.null(sample.nobs)) { sample.nobs <- rep(list(0L), ngroups) } else { sample.nobs <- as.list(sample.nobs) if (length(sample.nobs) != ngroups) { lav_msg_stop(gettextf( "length(sample.nobs) = %1$s but syntax implies ngroups = %2$s", length(sample.nobs), ngroups)) } } } # group.label if (ngroups > 1L) { if (is.null(group)) { group <- "group" } group.label <- paste("Group", 1:ngroups, sep = "") } else { group <- character(0L) group.label <- character(0L) } # handle ov.names.x if (!is.list(ov.names.x)) { ov.names.x <- rep(list(ov.names.x), ngroups) } ov <- list() ov$name <- unique(unlist(c(ov.names, ov.names.x))) nvar <- length(ov$name) ov$idx <- rep(NA, nvar) ov$nobs <- rep(0L, nvar) ov$type <- rep("numeric", nvar) ov$nlev <- rep(0L, nvar) # collect information per upper-level group Lp <- vector("list", length = ngroups) for (g in 1:ngroups) { if (nlevels > 1L) { # ALWAYS add ov.names.x at the end, even if conditional.x OV.NAMES <- unique(c(ov.names[[g]], ov.names.x[[g]])) Lp[[g]] <- lav_data_cluster_patterns( Y = NULL, clus = NULL, cluster = cluster, multilevel = TRUE, ov.names = OV.NAMES, ov.names.x = ov.names.x[[g]], ov.names.l = ov.names.l[[g]] ) } } # g # block.labels block.label <- character(0L) if (length(group.label) > 0L && length(level.label) == 0L) { block.label <- group.label } else if (length(level.label) > 0L && length(group.label) == 0L) { block.label <- level.label } else if (length(group.label) > 0L && length(level.label) > 0L) { block.label <- paste(rep(group.label, each = length(level.label)), rep(level.label, times = length(group.label)), sep = "." ) } # construct lavData object lavData <- new("lavData", data.type = "none", ngroups = ngroups, group = group, nlevels = nlevels, cluster = cluster, group.label = group.label, level.label = level.label, block.label = block.label, nobs = sample.nobs, norig = sample.nobs, ov.names = ov.names, ov.names.x = ov.names.x, ov.names.l = ov.names.l, ordered = as.character(ordered), weights = vector("list", length = ngroups), sampling.weights = character(0L), ov = ov, missing = "listwise", case.idx = vector("list", length = ngroups), Mp = vector("list", length = ngroups), Rp = vector("list", length = ngroups), Lp = Lp, X = vector("list", length = ngroups), eXo = vector("list", length = ngroups) ) } lavData } # handle full data lav_data_full <- function(data = NULL, # data.frame group = NULL, # multiple groups? cluster = NULL, # clustered? group.label = NULL, # custom group labels? level.label = NULL, block.label = NULL, ov.names = NULL, # variables needed # in model ordered = NULL, # ordered variables sampling.weights = NULL, # sampling weights sampling.weights.normalization = "none", ov.names.x = character(0L), # exo variables ov.names.l = list(), # var per level std.ov = FALSE, # standardize ov's? missing = "listwise", # remove missings? allow.single.case = FALSE # allow single case? ) { # number of groups and group labels if (!is.null(group) && length(group) > 0L) { if (!(group %in% names(data))) { lav_msg_stop(gettextf( "grouping variable %1$s not found; variable names found in data frame are: %2$s", sQuote(group),paste(names(data), collapse = " "))) } # note: by default, we use the order as in the data; # not as in levels(data[,group]) if (length(group.label) == 0L) { group.label <- unique(as.character(data[[group]])) if (any(is.na(group.label))) { lav_msg_warn(gettextf("group variable %s contains missing values", sQuote(group))) } group.label <- group.label[!is.na(group.label)] } else { group.label <- unique(as.character(group.label)) # check if user-provided group labels exist LABEL <- unique(as.character(data[[group]])) idx <- match(group.label, LABEL) if (any(is.na(idx))) { lav_msg_warn(gettextf( "some group.labels do not appear in the grouping variable: %s", lav_msg_view(group.label[which(is.na(idx))], log.sep = "none")) ) } group.label <- group.label[!is.na(idx)] # any groups left? if (length(group.label) == 0L) { lav_msg_stop(gettext( "no group levels left; check the group.label argument")) } } ngroups <- length(group.label) } else { if (length(group.label) > 0L) { lav_msg_warn(gettext( "`group.label' argument will be ignored if `group' argument is missing")) } ngroups <- 1L group.label <- character(0L) group <- character(0L) } # sampling weights if (!is.null(sampling.weights)) { if (is.character(sampling.weights)) { if (!(sampling.weights %in% names(data))) { lav_msg_stop( gettextf("sampling weights variable %1$s not found; variable names found in data frame are: %2$s", sQuote(sampling.weights), paste(names(data), collapse = " "))) } # check for missing values in sampling weight variable if (any(is.na(data[[sampling.weights]]))) { lav_msg_stop( gettextf("sampling.weights variable %s contains missing values", sQuote(sampling.weights))) } } else { lav_msg_stop(gettext( "sampling weights argument should be a variable name in the data.frame" )) } } # clustered? if (!is.null(cluster) && length(cluster) > 0L) { # cluster variable in data? if (!all(cluster %in% names(data))) { # which one did we not find? not.ok <- which(!cluster %in% names(data)) lav_msg_stop(gettextf( "cluster variable(s) %1$s not found; variable names found in data frame are: %2$s", sQuote(cluster[not.ok]), paste(names(data), collapse = " "))) } # check for missing values in cluster variable(s) for (cl in 1:length(cluster)) { if (anyNA(data[[cluster[cl]]])) { lav_msg_warn(gettextf("cluster variable %s contains missing values", sQuote(cluster[cl]))) } } # multilevel? if (length(ov.names.l) > 0L) { # default level.labels if (length(level.label) == 0L) { level.label <- c("within", cluster) } else { # check if length(level.label) = 1 + length(cluster) if (length(level.label) != length(cluster) + 1L) { lav_msg_stop(gettext("length(level.label) != length(cluster) + 1L")) } # nothing to do } nlevels <- length(level.label) } else { # just clustered data, but no random effects nlevels <- 1L level.label <- character(0L) } } else { if (length(level.label) > 0L) { lav_msg_warn(gettext( "`level.label' argument will be ignored if `cluster' argument is missing" )) } nlevels <- 1L level.label <- character(0L) cluster <- character(0L) } # check ov.names vs ngroups if (ngroups > 1L) { if (is.list(ov.names)) { if (length(ov.names) != ngroups) { lav_msg_stop(gettextf( "ov.names assumes %1$s groups; data contains %2$s groups", length(ov.names), ngroups)) } } else { tmp <- ov.names ov.names <- vector("list", length = ngroups) ov.names[1:ngroups] <- list(tmp) } if (is.list(ov.names.x)) { if (length(ov.names.x) != ngroups) { lav_msg_stop(gettextf( "ov.names.x assumes %1$s groups; data contains %2$s groups", length(ov.names.x), ngroups)) } } else { tmp <- ov.names.x ov.names.x <- vector("list", length = ngroups) ov.names.x[1:ngroups] <- list(tmp) } } else { if (is.list(ov.names)) { if (length(ov.names) > 1L) { lav_msg_stop(gettext( "model syntax defines multiple groups; data suggests a single group")) } } else { ov.names <- list(ov.names) } if (is.list(ov.names.x)) { if (length(ov.names.x) > 1L) { lav_msg_stop(gettext( "model syntax defines multiple groups; data suggests a single group")) } } else { ov.names.x <- list(ov.names.x) } } # check if all ov.names can be found in the data.frame for (g in 1:ngroups) { # does the data contain all the observed variables # needed in the user-specified model for this group ov.all <- unique(c(ov.names[[g]], ov.names.x[[g]])) # no overlap if categ # handle interactions ov.int.names <- ov.all[grepl(":", ov.all)] n.int <- length(ov.int.names) if (n.int > 0L) { ov.names.noint <- ov.all[!ov.all %in% ov.int.names] for (iv in seq_len(n.int)) { NAMES <- strsplit(ov.int.names[iv], ":", fixed = TRUE)[[1L]] if (all(NAMES %in% ov.names.noint)) { # add this interaction term to the data.frame, unless # it already exists if (is.null(data[[ov.int.names[iv]]])) { data[[ov.int.names[iv]]] <- data[[NAMES[1L]]] * data[[NAMES[2L]]] } } } } # check for missing observed variables idx.missing <- which(!(ov.all %in% names(data))) if (length(idx.missing)) { lav_msg_stop( gettextf("some (observed) variables specified in the model are not found in the dataset: %s", paste(ov.all[idx.missing], collapse = " "))) } } # here, we know for sure all ov.names exist in the data.frame # create varTable # FIXME: should we add the 'group'/'cluster' variable (no for now) ov <- lav_dataframe_vartable( frame = data, ov.names = ov.names, ov.names.x = ov.names.x, ordered = ordered, as.data.frame. = FALSE ) # do some checking # check for unordered factors (but only if nlev > 2) if ("factor" %in% ov$type) { f.names <- ov$name[ov$type == "factor" & ov$nlev > 2L] f.names.all <- ov$name[ov$type == "factor"] OV.names <- unlist(ov.names) OV.names.x <- unlist(ov.names.x) OV.names.nox <- OV.names[!OV.names %in% OV.names.x] if (any(f.names %in% OV.names.x)) { lav_msg_stop( gettext("unordered factor(s) with more than 2 levels detected as exogenous covariate(s): "), paste(f.names, collapse = " ")) } else if (any(f.names.all %in% OV.names.nox)) { lav_msg_stop( gettext("unordered factor(s) detected; make them numeric or ordered:"), paste(f.names.all, collapse = " ")) } } # check for ordered exogenous variables if ("ordered" %in% ov$type[ov$name %in% unlist(ov.names.x)]) { f.names <- ov$name[ov$type == "ordered" & ov$name %in% unlist(ov.names.x)] if (any(f.names %in% unlist(ov.names.x))) { lav_msg_warn(gettextf( "exogenous variable(s) declared as ordered in data: %s", lav_msg_view(f.names, log.sep = "none"))) } } # check for ordered endogenous variables with more than 12 levels if ("ordered" %in% ov$type[!ov$name %in% unlist(ov.names.x)]) { f.names <- ov$name[ov$type == "ordered" & !ov$name %in% unlist(ov.names.x) & ov$nlev > 12L] if (length(f.names) > 0L) { lav_msg_warn(gettextf( "some ordered categorical variable(s) have more than 12 levels: %s", lav_msg_view(f.names, log.sep = "none"))) } } # check for zero-cases idx <- which(ov$nobs == 0L | ov$var == 0) if (!allow.single.case && length(idx) > 0L) { OV <- as.data.frame(ov) rn <- rownames(OV) rn[idx] <- paste(rn[idx], "***", sep = "") rownames(OV) <- rn print(OV) lav_msg_stop(gettext( "some variables have no values (only missings) or no variance")) } # check for single cases (no variance!) idx <- which(ov$nobs == 1L | (ov$type == "numeric" & !is.finite(ov$var))) if (!allow.single.case && length(idx) > 0L) { OV <- as.data.frame(ov) rn <- rownames(OV) rn[idx] <- paste(rn[idx], "***", sep = "") rownames(OV) <- rn print(OV) lav_msg_stop(gettext( "some variables have only 1 observation or no finite variance")) } # check for ordered variables with only 1 level idx <- which(ov$type == "ordered" & ov$nlev == 1L) if (!allow.single.case && length(idx) > 0L) { OV <- as.data.frame(ov) rn <- rownames(OV) rn[idx] <- paste(rn[idx], "***", sep = "") rownames(OV) <- rn print(OV) lav_msg_stop(gettext("ordered variable(s) has/have only 1 level")) } # check for mix small/large variances (NOT including exo variables) if (!std.ov && !allow.single.case && any(ov$type == "numeric")) { num.idx <- which(ov$type == "numeric" & ov$exo == 0L) if (length(num.idx) > 0L) { min.var <- min(ov$var[num.idx]) max.var <- max(ov$var[num.idx]) rel.var <- max.var / min.var if (rel.var > 1000) { lav_msg_warn( gettext("some observed variances are (at least) a factor 1000 times larger than others; use varTable(fit) to investigate")) } } } # check for really large variances (perhaps -999999 for missing?) if (!std.ov && any(ov$type == "numeric")) { num.idx <- which(ov$type == "numeric" & ov$exo == 0L) if (length(num.idx) > 0L) { max.var <- max(ov$var[num.idx]) if (max.var > 1000000) { lav_msg_warn( gettext("some observed variances are larger than 1000000 use varTable(fit) to investigate")) } } } # check for all-exogenous variables (eg in f <~ x1 + x2 + x3) if (all(ov$exo == 1L)) { lav_msg_warn(gettext( "all observed variables are exogenous; model may not be identified")) } # prepare empty lists # group-based case.idx <- vector("list", length = ngroups) Mp <- vector("list", length = ngroups) Rp <- vector("list", length = ngroups) norig <- vector("list", length = ngroups) nobs <- vector("list", length = ngroups) X <- vector("list", length = ngroups) eXo <- vector("list", length = ngroups) Lp <- vector("list", length = ngroups) weights <- vector("list", length = ngroups) # collect information per upper-level group for (g in 1:ngroups) { # extract variables in correct order if (nlevels > 1L) { # keep 'joint' (Y,X) matrix in @X if multilevel (or always?) # yes for multilevel (for now); no for clustered only OV.NAMES <- unique(c(ov.names[[g]], ov.names.x[[g]])) ov.idx <- ov$idx[match(OV.NAMES, ov$name)] } else { ov.idx <- ov$idx[match(ov.names[[g]], ov$name)] } exo.idx <- ov$idx[match(ov.names.x[[g]], ov$name)] all.idx <- unique(c(ov.idx, exo.idx)) # extract cases per group if (ngroups > 1L || length(group.label) > 0L) { if (missing == "listwise") { case.idx[[g]] <- which(data[[group]] == group.label[g] & complete.cases(data[all.idx])) nobs[[g]] <- length(case.idx[[g]]) norig[[g]] <- length(which(data[[group]] == group.label[g])) # } else if(missing == "pairwise" && length(exo.idx) > 0L) { # case.idx[[g]] <- which(data[[group]] == group.label[g] & # complete.cases(data[exo.idx])) # nobs[[g]] <- length(case.idx[[g]]) # norig[[g]] <- length(which(data[[group]] == group.label[g])) } else if (length(exo.idx) > 0L && missing != "ml.x") { case.idx[[g]] <- which(data[[group]] == group.label[g] & complete.cases(data[exo.idx])) nobs[[g]] <- length(case.idx[[g]]) norig[[g]] <- length(which(data[[group]] == group.label[g])) if ((nobs[[g]] < norig[[g]])) { lav_msg_warn(gettextf( "%1$s cases were deleted in group %2$s due to missing values in exogenous variable(s), while fixed.x = TRUE.", (norig[[g]] - nobs[[g]]), group.label[g])) } } else { case.idx[[g]] <- which(data[[group]] == group.label[g]) nobs[[g]] <- norig[[g]] <- length(case.idx[[g]]) } } else { if (missing == "listwise") { case.idx[[g]] <- which(complete.cases(data[all.idx])) nobs[[g]] <- length(case.idx[[g]]) norig[[g]] <- nrow(data) # } else if(missing == "pairwise" && length(exo.idx) > 0L) { # case.idx[[g]] <- which(complete.cases(data[exo.idx])) # nobs[[g]] <- length(case.idx[[g]]) # norig[[g]] <- nrow(data) } else if (length(exo.idx) > 0L && missing != "ml.x") { case.idx[[g]] <- which(complete.cases(data[exo.idx])) nobs[[g]] <- length(case.idx[[g]]) norig[[g]] <- nrow(data) if ((nobs[[g]] < norig[[g]])) { lav_msg_warn( gettextf("%s cases were deleted due to missing values in exogenous variable(s), while fixed.x = TRUE.", (norig[[g]] - nobs[[g]]))) } } else { case.idx[[g]] <- 1:nrow(data) nobs[[g]] <- norig[[g]] <- length(case.idx[[g]]) } } # extract data X[[g]] <- data.matrix(data[case.idx[[g]], ov.idx, drop = FALSE]) dimnames(X[[g]]) <- NULL ### copy? # sampling weights (but no normalization yet) if (!is.null(sampling.weights)) { WT <- data[[sampling.weights]][case.idx[[g]]] if (any(WT < 0)) { lav_msg_stop(gettext("some sampling weights are negative")) } # check for missing values in sampling weight variable if (any(is.na(WT))) { lav_msg_stop(gettextf( "sampling.weights variable %s contains missing values", sQuote(sampling.weights))) } weights[[g]] <- WT } # construct integers for user-declared 'ordered' factors # FIXME: is this really (always) needed??? # (but still better than doing lapply(data[,idx], ordered) which # generated even more copies) user.ordered.names <- ov$name[ov$type == "ordered" & ov$user == 1L] user.ordered.idx <- which(ov.names[[g]] %in% user.ordered.names) if (length(user.ordered.idx) > 0L) { for (i in user.ordered.idx) { X[[g]][, i][is.na(X[[g]][, i])] <- NA # change NaN to NA X[[g]][, i] <- as.numeric(as.factor(X[[g]][, i])) # possible alternative to the previous two lines: # X[[g]][,i] <- as.numeric(factor(X[[g]][,i], exclude = c(NA, NaN))) } } ## FIXME: ## - why also in X? (for samplestats, for now) if (length(exo.idx) > 0L) { eXo[[g]] <- data.matrix(data[case.idx[[g]], exo.idx, drop = FALSE]) dimnames(eXo[[g]]) <- NULL } else { eXo[g] <- list(NULL) } # standardize observed variables? numeric only! if (std.ov) { num.idx <- which(ov$name %in% ov.names[[g]] & ov$type == "numeric" & ov$exo == 0L) if (length(num.idx) > 0L) { X[[g]][, num.idx] <- scale(X[[g]][, num.idx, drop = FALSE])[, , drop = FALSE] # three copies are made!!!!! } if (length(exo.idx) > 0L) { eXo[[g]] <- scale(eXo[[g]])[, , drop = FALSE] } } # response patterns (ordered variables only) ord.idx <- which(ov.names[[g]] %in% ov$name[ov$type == "ordered"]) if (length(ord.idx) > 0L) { Rp[[g]] <- lav_data_resp_patterns(X[[g]][, ord.idx, drop = FALSE]) } # warn if we have a small number of observations (but NO error!) if (!allow.single.case && nobs[[g]] < (nvar <- length(ov.idx))) { txt <- "" if (ngroups > 1L) txt <- gettextf("in group %s", g) lav_msg_warn( gettextf("small number of observations (nobs < nvar) %1$s: nobs = %2$s nvar = %3$s", txt, nobs[[g]], nvar)) } # check variances per group (if we have multiple groups) # to catch zero-variance variables within a group (new in 0.6-8) if (ngroups > 1L) { # X group.var <- apply(X[[g]], 2, var, na.rm = TRUE) zero.var <- which(group.var < .Machine$double.eps) if (length(zero.var) == 0L) { # all is good } else { # some zero variances! gtxt <- if (ngroups > 1L) { gettextf("in group %s", g) } else { "" } lav_msg_stop( gettext("some variables have no variance"), gtxt, ":", paste(ov.names[[g]][zero.var], collapse = " ")) } # eXo (if conditional.x = TRUE)... if (length(exo.idx) > 0L) { group.var <- apply(eXo[[g]], 2, var, na.rm = TRUE) zero.var <- which(group.var < .Machine$double.eps) if (length(zero.var) == 0L) { # all is good } else { # some zero variances! gtxt <- if (ngroups > 1L) { gettextf("in group %s", g) } else { "" } lav_msg_stop( gettext("some exogenous variables have no variance"), gtxt, ":", paste(ov.names.x[[g]][zero.var], collapse = " ") ) } } } # cluster information if (length(cluster) > 0L) { # extract cluster variable(s), for this group clus <- data.matrix(data[case.idx[[g]], cluster]) if (nlevels > 1L) { multilevel <- TRUE } else { multilevel <- FALSE } # ALWAYS add ov.names.x at the end, even if conditional.x (0.6-7) OV.NAMES <- unique(c(ov.names[[g]], ov.names.x[[g]])) Lp[[g]] <- lav_data_cluster_patterns( Y = X[[g]], clus = clus, cluster = cluster, multilevel = multilevel, ov.names = OV.NAMES, ov.names.x = ov.names.x[[g]], ov.names.l = ov.names.l[[g]] ) # new in 0.6-4 # check for 'level-1' variables with zero within variance l1.idx <- c( Lp[[g]]$within.idx[[2]], # within only Lp[[g]]$both.idx[[2]] ) for (v in l1.idx) { within.var <- tapply(X[[g]][, v], Lp[[g]]$cluster.idx[[2]], FUN = var, na.rm = TRUE ) # ignore singletons singleton.idx <- which(Lp[[g]]$cluster.size[[2]] == 1L) if (length(singleton.idx) > 0L) { within.var[singleton.idx] <- 10 # non-zero variance } zero.var <- which(within.var < .Machine$double.eps) if (length(zero.var) == 0L) { # all is good } else if (length(zero.var) == length(within.var)) { # all zero! possibly a between-level variable gtxt <- if (ngroups > 1L) { gettextf("in group %s", g) } else { "" } lav_msg_warn( gettextf("Level-1 variable %1$s has no variance at the within level %2$s. The variable appears to be a between-level variable. Please remove this variable from the level 1 section in the model syntax.", dQuote(ov.names[[g]][v]), gtxt)) } else { # some zero variances! gtxt <- if (ngroups > 1L) { gettextf("in group %s", g) } else { "" } lav_msg_warn(gettextf( "Level-1 variable %1$s has no variance within some clusters %2$s. The cluster ids with zero within variance are: %3$s.", dQuote(ov.names[[g]][v]), gtxt, lav_msg_view(Lp[[g]]$cluster.id[[2]][zero.var], "none"))) } } # new in 0.6-4 # check for 'level-2' only variables with non-zero within variance l2.idx <- Lp[[g]]$between.idx[[2]] # between only error.flag <- FALSE for (v in l2.idx) { within.var <- tapply(X[[g]][, v], Lp[[g]]$cluster.idx[[2]], FUN = var, na.rm = TRUE ) non.zero.var <- which(unname(within.var) > .Machine$double.eps) if (length(non.zero.var) == 0L) { # all is good } else if (length(non.zero.var) == 1L) { # just one gtxt <- if (ngroups > 1L) { gettextf("in group %s.", g) } else { "." } lav_msg_warn(gettextf( "Level-2 variable %1$ss has non-zero variance at the within level %2$s in one cluster with id: %3$ss. Please double-check if this is a between only variable.", dQuote(ov.names[[g]][v]), gtxt, Lp[[g]]$cluster.id[[2]][non.zero.var])) } else { error.flag <- TRUE # several gtxt <- if (ngroups > 1L) { gettextf("in group %s", g) } else { "" } lav_msg_warn(gettextf( "Level-2 variable %1$s has non-zero variance at the within level %2$s. The cluster ids with non-zero within variance are: %3$s", dQuote(ov.names[[g]][v]), gtxt, lav_msg_view(Lp[[g]]$cluster.id[[2]][non.zero.var], "none"))) } } if (error.flag) { lav_msg_stop( gettext("Some between-level (only) variables have non-zero variance at the within-level. Please double-check your data.") ) } } # clustered data # missing data if (missing != "listwise") { if (length(cluster) > 0L) { # get missing patterns Mp[[g]] <- lav_data_missing_patterns(X[[g]], sort.freq = TRUE, coverage = TRUE, Lp = Lp[[g]] ) } else { # get missing patterns Mp[[g]] <- lav_data_missing_patterns(X[[g]], sort.freq = TRUE, coverage = TRUE, Lp = NULL ) } # checking! if (length(Mp[[g]]$empty.idx) > 0L) { # new in 0.6-4: return 'original' index in full data.frame empty.case.idx <- case.idx[[g]][Mp[[g]]$empty.idx] lav_msg_warn(gettextf( "some cases are empty and will be ignored: %s.", paste(empty.case.idx, collapse = " "))) } if (any(Mp[[g]]$coverage < 0.1)) { coverage.vech <- lav_matrix_vech(Mp[[g]]$coverage, diagonal = FALSE) small.idx <- which(coverage.vech < 0.1) if (all(coverage.vech[small.idx] == 0)) { # 0.6-18: no warning --> this could be due to missing by design } else { lav_msg_warn(gettext( "due to missing values, some pairwise combinations have less than 10% coverage; use lavInspect(fit, \"coverage\") to investigate.")) } } # in case we had observations with only missings nobs[[g]] <- NROW(X[[g]]) - length(Mp[[g]]$empty.idx) } # missing } # groups, at first level # sampling weigths, again if (is.null(sampling.weights)) { sampling.weights <- character(0L) } else { # check if we need normalization if (sampling.weights.normalization == "none") { # nothing to do } else if (sampling.weights.normalization == "total") { sum.weights <- sum(unlist(weights)) ntotal <- sum(unlist(nobs)) for (g in 1:ngroups) { WT <- weights[[g]] WT2 <- WT / sum.weights * ntotal weights[[g]] <- WT2 } } else if (sampling.weights.normalization == "group") { for (g in 1:ngroups) { WT <- weights[[g]] WT2 <- WT / sum(WT) * nobs[[g]] weights[[g]] <- WT2 } } else { lav_msg_stop(gettext( "sampling.weights.normalization should be total, group or none.")) } } # block.labels block.label <- character(0L) if (length(group.label) > 0L && length(level.label) == 0L) { block.label <- group.label } else if (length(level.label) > 0L && length(group.label) == 0L) { block.label <- level.label } else if (length(group.label) > 0L && length(level.label) > 0L) { block.label <- paste(rep(group.label, each = length(level.label)), rep(level.label, times = length(group.label)), sep = "." ) } lavData <- new("lavData", data.type = "full", ngroups = ngroups, group = group, nlevels = nlevels, cluster = cluster, group.label = group.label, level.label = level.label, block.label = block.label, std.ov = std.ov, nobs = nobs, norig = norig, ov.names = ov.names, ov.names.x = ov.names.x, ov.names.l = ov.names.l, # ov.types = ov.types, # ov.idx = ov.idx, ordered = as.character(ordered), weights = weights, sampling.weights = sampling.weights, ov = ov, case.idx = case.idx, missing = missing, X = X, eXo = eXo, Mp = Mp, Rp = Rp, Lp = Lp ) lavData } lavaan/R/lav_partable.R0000644000176200001440000012066514627656441014525 0ustar liggesusers# lavaan parameter table # # initial version: YR 22/05/2009 # major revision: YR 02/11/2010: - FLATTEN the model syntax and turn it into a # data.frame, with a "modifiers" attribute # - add default elements here # - check for duplicate elements # - allow for every possible model... # - since 0.4-5 # - the end result is a full description of # a model (but no matrix representation) # - 14 Jan 2014: merge 02lavaanUser.R with lav_partable.R # move syntax-based code to lav_syntax.R # - 26 April 2016: handle multiple 'blocks' (levels, classes, groups, ...) # - 24 March 2019: handle efa sets # - 23 May 2020: support for random slopes lavaanify <- lavParTable <- function( model = NULL, meanstructure = FALSE, int.ov.free = FALSE, int.lv.free = FALSE, marker.int.zero = FALSE, orthogonal = FALSE, orthogonal.y = FALSE, orthogonal.x = FALSE, orthogonal.efa = FALSE, std.lv = FALSE, correlation = FALSE, effect.coding = "", conditional.x = FALSE, fixed.x = FALSE, parameterization = "delta", constraints = NULL, ceq.simple = FALSE, auto = FALSE, model.type = "sem", auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = FALSE, auto.cov.lv.x = FALSE, auto.cov.y = FALSE, auto.th = FALSE, auto.delta = FALSE, auto.efa = FALSE, varTable = NULL, # nolint ngroups = 1L, nthresholds = NULL, group.equal = NULL, group.partial = NULL, group.w.free = FALSE, debug = FALSE, warn = TRUE, as.data.frame. = TRUE) { # nolint if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } if (!missing(warn)) { current.warn <- lav_warn() if (lav_warn(warn)) on.exit(lav_warn(current.warn), TRUE) } # check if model is already flat or a full parameter table if (is.list(model) && !is.null(model$lhs)) { if (is.null(model$mod.idx)) { lav_msg_warn(gettext("input already looks like a parameter table")) return(lav_partable_set_cache(model)) } else { flat <- model } } else { # parse the model syntax and flatten the user-specified model # return a data.frame, where each line is a model element (rhs, op, lhs) flat <- lavParseModelString( model.syntax = model, debug = FALSE ) } # user-specified *modifiers* are returned as an attribute tmp.mod <- attr(flat, "modifiers") attr(flat, "modifiers") <- NULL # user-specified *constraints* are returned as an attribute tmp.con <- attr(flat, "constraints") attr(flat, "constraints") <- NULL # ov.names.data? ov.names.data <- attr(flat, "ovda") # extra constraints? if (!is.null(constraints) && any(nchar(constraints) > 0L)) { flat2 <- lavParseModelString(model.syntax = constraints, warn = lav_warn()) con2 <- attr(flat2, "constraints") rm(flat2) tmp.con <- c(tmp.con, con2) } if (length(tmp.con) > 0L) { # add 'user' column tmp.con <- lapply(tmp.con, function(x) { x$user <- 1L x }) # any explicit (in)equality constraints? (ignoring := definitions) tmp.con.nondef.flag <- (sum(sapply(tmp.con, "[[", "op") %in% c("==", "<", ">")) > 0L) # any explicit equality constraints? tmp.con.eq.flag <- (sum(sapply(tmp.con, "[[", "op") == "==") > 0L) if (tmp.con.nondef.flag) { ceq.simple <- FALSE } } if (lav_debug()) { cat("[lavaan DEBUG]: flat (flattened user model):\n") print(flat) cat("[lavaan DEBUG]: tmp.mod (modifiers):\n") print(str(tmp.mod)) cat("[lavaan DEBUG]: tmp.con (constraints):\n") print(str(tmp.con)) } # bogus varTable? (if data.type == "none") if (!is.null(varTable)) { if (!is.list(varTable) || is.null(varTable$name)) { lav_msg_stop(gettext( "varTable is not a list or does not contain variable names.")) } if (all(varTable$nobs == 0)) { varTable <- NULL # nolint } } # check for wrongly specified variances/covariances/intercepts # of exogenous variables in model syntax (if fixed.x=TRUE) if (fixed.x && lav_warn()) { # we ignore the groups here! # we only call this function for the warning message tmp <- lav_partable_vnames(flat, "ov.x", force.warn = TRUE) rm(tmp) } # check if group.equal is non-empty, but ngroups = 1L # fixme: triggers this if mimic="Mplus"! # if(ngroups == 1L && length(group.equal) > 0L) { # warning("lavaan WARNING: group.equal= argument", # " has no effect if no groups are specified.") # } # auto=TRUE? if (auto) { # mimic sem/cfa auto behavior if (model.type == "sem") { int.ov.free <- TRUE int.lv.free <- FALSE auto.fix.first <- !std.lv auto.fix.single <- TRUE auto.var <- TRUE auto.cov.lv.x <- TRUE auto.cov.y <- TRUE auto.th <- TRUE auto.delta <- TRUE auto.efa <- TRUE } else if (model.type == "growth") { model.type <- "growth" int.ov.free <- FALSE int.lv.free <- TRUE auto.fix.first <- !std.lv auto.fix.single <- TRUE auto.var <- TRUE auto.cov.lv.x <- TRUE auto.cov.y <- TRUE auto.th <- TRUE auto.delta <- TRUE auto.efa <- TRUE } } # check for meanstructure if (any(flat$op == "~1")) { meanstructure <- TRUE } # check for block identifiers in the syntax (op = ":") n.block.flat <- length(which(flat$op == ":")) # this is NOT the number of blocks (eg group 1: level 1: -> 1 block) # for each non-empty `block' in n.block.flat, produce a USER if (n.block.flat > 0L) { # make sure flat is a data.frame flat <- as.data.frame(flat, stringsAsFactors = FALSE) # what are the block lhs labels? blocks.lhs.all <- tolower(flat$lhs[flat$op == ":"]) tmp.block.lhs <- unique(blocks.lhs.all) # if we have group and level, check that group comes first! if ("group" %in% tmp.block.lhs && "level" %in% tmp.block.lhs) { group.idx <- which(tmp.block.lhs == "group") level.idx <- which(tmp.block.lhs == "level") if (group.idx > level.idx) { lav_msg_stop(gettext( "levels must be nested within groups (not the other way around).")) } } # block op == ":" indices block.op.idx <- which(flat$op == ":") # check for wrong spelled 'group' lhs if (length(grep("group", tmp.block.lhs)) > 1L) { lav_msg_warn(gettext("ambiguous block identifiers for group:"), lav_msg_view(tmp.block.lhs[grep("group", tmp.block.lhs)], "none")) } # no empty :rhs fields allowed! if (any(nchar(flat$rhs[block.op.idx]) == 0L)) { empty.idx <- nchar(flat$rhs[block.op.idx]) == 0L txt <- paste(flat$lhs[block.op.idx][empty.idx], ":") lav_msg_stop(gettext( "syntax contains block identifiers with missing numbers/labels: "), txt) } # check for ngroups (ngroups is based on the data!) if ("group" %in% tmp.block.lhs) { # how many group blocks? group.block.idx <- flat$op == ":" & flat$lhs == "group" n.group.flat <- length(unique(flat$rhs[group.block.idx])) if (n.group.flat > 0L && n.group.flat != ngroups) { lav_msg_stop(gettextf( "syntax defines %1$s groups; data (or argument ngroups) suggests %2$s groups", n.group.flat, ngroups)) } } # figure out how many 'blocks' we have, and store indices/block.labels tmp.block.rhs <- rep("0", length(tmp.block.lhs)) block.id <- 0L block.info <- vector("list", length = n.block.flat) # too large block.op.idx1 <- c(block.op.idx, nrow(flat) + 1L) # add addition row for (block.op in seq_len(n.block.flat)) { # fill block.rhs value(s) block.lhs <- flat$lhs[block.op.idx1[block.op]] block.rhs <- flat$rhs[block.op.idx1[block.op]] tmp.block.rhs[which(block.lhs == tmp.block.lhs)] <- block.rhs # another block identifier? if (block.op.idx1[block.op + 1L] - block.op.idx1[block.op] == 1L) { next } # we have a 'block' block.id <- block.id + 1L # select flat rows for this block tmp.idx <- seq.int( block.op.idx1[block.op] + 1L, block.op.idx1[block.op + 1L] - 1L ) # store info in block.info block.info[[block.id]] <- list( lhs = tmp.block.lhs, # always the same rhs = tmp.block.rhs, # for this block idx = tmp.idx ) } block.info <- block.info[seq_len(block.id)] # new in 0.6-12 # check for blocks with the same block.rhs combination # (perhaps added later?) # - merge the indices # - remove the duplicated blocks block.labels <- sapply(lapply(block.info, "[[", "rhs"), paste, collapse = "." ) nblocks <- length(unique(block.labels)) if (nblocks < length(block.labels)) { # it would appear we have duplicated block.labels -> merge dup.idx <- which(duplicated(block.labels)) for (i in seq_along(dup.idx)) { this.dup.idx <- dup.idx[i] orig.idx <- which(block.labels == block.labels[this.dup.idx])[1] block.info[[orig.idx]]$idx <- c( block.info[[orig.idx]]$idx, block.info[[this.dup.idx]]$idx ) } block.info <- block.info[-dup.idx] } # split the flat data.frame per `block', create tmp.list # for each `block', and rbind them together, adding block columns for (block in seq_len(nblocks)) { tmp.block.rhs <- block.info[[block]]$rhs block.lhs <- block.info[[block]]$lhs[length(tmp.block.lhs)] # last one block.idx <- block.info[[block]]$idx flat.block <- flat[block.idx, ] # rm 'block' column (if any) in flat.block flat.block$block <- NULL # new in 0.6-7: check for random slopes, add them here if (block.lhs == "level" && block > 1L && # FIXME: multigroup, multilevel !is.null(flat$rv) && any(nchar(flat$rv) > 0L)) { lv.names.rv <- unique(flat$rv[nchar(flat$rv) > 0L]) for (i in seq_along(lv.names.rv)) { # add phantom latent variable tmp <- flat.block[1, ] tmp$lhs <- lv.names.rv[i] tmp$op <- "=~" tmp$rhs <- lv.names.rv[i] tmp$mod.idx <- max(flat$mod.idx) + i tmp$fixed <- "0" tmp$start <- "" tmp$lower <- "" tmp$upper <- "" tmp$label <- "" tmp$prior <- "" tmp$efa <- "" tmp$rv <- lv.names.rv[i] flat.block <- rbind(flat.block, tmp, deparse.level = 0L) tmp.mod <- c(tmp.mod, list(list(fixed = 0))) } } # new in 0.6-8: if multilevel, use 'global' ov.names.x if (fixed.x && block.lhs == "level") { tmp.ov.names.x <- lav_partable_vnames(flat, "ov.x") # global ov.names.x.block <- lav_partable_vnames(flat.block, "ov.x") if (length(ov.names.x.block) > 0L) { idx <- which(!ov.names.x.block %in% tmp.ov.names.x) if (length(idx) > 0L) { # warn! lav_msg_warn(gettextf( "the variable(s) [%s] are exogenous at one level, but endogenous at another level. These variables will be treated as endogenous, and their variances/intercepts will be freely estimated. To remove this warning, use fixed.x = FALSE.", lav_msg_view(ov.names.x.block[idx], "none"))) ov.names.x.block <- ov.names.x.block[-idx] } } } else { ov.names.x.block <- NULL } # new in 0.6-12: if multilevel and conditional.x, make sure # that 'splitted' exogenous covariates become 'y' variables if (conditional.x && block.lhs == "level") { if (ngroups == 1L) { other.block.names <- lav_partable_vnames(flat, "ov", block = seq_len(nblocks)[-block] ) } else { # TEST ME this.group <- ceiling(block / nlevels) blocks.within.group <- (this.group - 1L) * nlevels + seq_len(nlevels) other.block.names <- lav_partable_vnames(flat, "ov", block = blocks.within.group[-block] ) } ov.names.x.block <- lav_partable_vnames(flat.block, "ov.x") if (length(ov.names.x.block) > 0L) { idx <- which(ov.names.x.block %in% other.block.names) if (length(idx) > 0L) { ov.names.x.block <- ov.names.x.block[-idx] } } } else { ov.names.x.block <- NULL } list.block <- lav_partable_flat(flat.block, blocks = tmp.block.lhs, block.id = block, meanstructure = meanstructure, int.ov.free = int.ov.free, int.lv.free = int.lv.free, orthogonal = orthogonal, orthogonal.y = orthogonal.y, orthogonal.x = orthogonal.x, orthogonal.efa = orthogonal.efa, std.lv = std.lv, correlation = correlation, conditional.x = conditional.x, fixed.x = fixed.x, parameterization = parameterization, auto.fix.first = auto.fix.first, auto.fix.single = auto.fix.single, auto.var = auto.var, auto.cov.lv.x = auto.cov.lv.x, auto.cov.y = auto.cov.y, auto.th = auto.th, auto.delta = auto.delta, auto.efa = auto.efa, varTable = varTable, group.equal = NULL, group.w.free = group.w.free, ngroups = 1L, nthresholds = nthresholds, ov.names.x.block = ov.names.x.block ) list.block <- as.data.frame(list.block, stringsAsFactors = FALSE) # add block columns with current values in block.rhs for (b in seq_len(length(tmp.block.lhs))) { block.lhs <- tmp.block.lhs[b] block.rhs <- tmp.block.rhs[b] list.block[block.lhs] <- rep(block.rhs, length(list.block$lhs)) } if (!exists("tmp.list")) { tmp.list <- list.block } else { list.block$id <- list.block$id + max(tmp.list$id) tmp.list <- rbind(tmp.list, list.block) } } tmp.list <- as.list(tmp.list) # convert block columns to integers if possible for (b in seq_len(length(tmp.block.lhs))) { block.lhs <- tmp.block.lhs[b] block.rhs <- tmp.block.rhs[b] tmp <- try(scan( text = tmp.list[[block.lhs]], what = integer(), quiet = TRUE ), silent = TRUE) if (inherits(tmp, "integer")) { tmp.list[[block.lhs]] <- tmp } } } else { tmp.list <- lav_partable_flat(flat, blocks = "group", meanstructure = meanstructure, int.ov.free = int.ov.free, int.lv.free = int.lv.free, orthogonal = orthogonal, orthogonal.y = orthogonal.y, orthogonal.x = orthogonal.x, orthogonal.efa = orthogonal.efa, std.lv = std.lv, correlation = correlation, conditional.x = conditional.x, fixed.x = fixed.x, parameterization = parameterization, auto.fix.first = auto.fix.first, auto.fix.single = auto.fix.single, auto.var = auto.var, auto.cov.lv.x = auto.cov.lv.x, auto.cov.y = auto.cov.y, auto.th = auto.th, auto.delta = auto.delta, auto.efa = auto.efa, varTable = varTable, group.equal = group.equal, group.w.free = group.w.free, ngroups = ngroups, nthresholds = nthresholds ) } if (lav_debug()) { cat("[lavaan DEBUG]: parameter tmp.list without MODIFIERS:\n") print(as.data.frame(tmp.list, stringsAsFactors = FALSE)) } # handle multilevel-specific constraints multilevel <- FALSE if (!is.null(tmp.list$level)) { nlevels <- lav_partable_nlevels(tmp.list) if (nlevels > 1L) { multilevel <- TRUE } } if (multilevel && any(tmp.list$op == "~1")) { # fix ov intercepts for all within ov that also appear at level 2 # FIXME: not tested with > 2 levels ov.names <- lav_partable_vnames(tmp.list, "ov") ## all names level.values <- lav_partable_level_values(tmp.list) other.names <- tmp.list$lhs[tmp.list$op == "~1" & tmp.list$level %in% level.values[-1L] & tmp.list$lhs %in% ov.names] fix.names.idx <- which(tmp.list$op == "~1" & tmp.list$level %in% level.values[1L] & tmp.list$lhs %in% other.names) if (length(fix.names.idx) > 0L) { tmp.list$free[fix.names.idx] <- 0L tmp.list$ustart[fix.names.idx] <- 0 } } if (multilevel && any(tmp.list$op == "|")) { # fix ALL thresholds at level 1 level.values <- lav_partable_level_values(tmp.list) th.idx <- which(tmp.list$op == "|" & tmp.list$level %in% level.values[1L]) tmp.list$free[th.idx] <- 0L tmp.list$ustart[th.idx] <- 0 # fix ALL scaling parmaters at higher levels scale.idx <- which(tmp.list$op == "~*~" & tmp.list$level %in% level.values[-1L]) tmp.list$free[scale.idx] <- 0L tmp.list$ustart[scale.idx] <- 1 } # apply user-specified modifiers warn.about.single.label <- FALSE if (length(tmp.mod)) { for (el in seq_along(tmp.mod)) { idx <- which(tmp.list$mod.idx == el) # for each group # 0.5-21: check if idx exists # perhaps the corresponding element was duplicated, and removed if (length(idx) == 0L) { next } tmp.mod.fixed <- tmp.mod[[el]]$fixed tmp.mod.start <- tmp.mod[[el]]$start tmp.mod.lower <- tmp.mod[[el]]$lower tmp.mod.upper <- tmp.mod[[el]]$upper tmp.mod.label <- tmp.mod[[el]]$label tmp.mod.prior <- tmp.mod[[el]]$prior tmp.mod.efa <- tmp.mod[[el]]$efa tmp.mod.rv <- tmp.mod[[el]]$rv # check for single argument if multiple groups if (ngroups > 1L && length(idx) > 1L) { # Ok, this is not very consistent: # A) here we force same behavior across groups if (length(tmp.mod.fixed) == 1L) { tmp.mod.fixed <- rep(tmp.mod.fixed, ngroups) } if (length(tmp.mod.start) == 1L) { tmp.mod.start <- rep(tmp.mod.start, ngroups) } if (length(tmp.mod.lower) == 1L) { tmp.mod.lower <- rep(tmp.mod.lower, ngroups) } if (length(tmp.mod.upper) == 1L) { tmp.mod.upper <- rep(tmp.mod.upper, ngroups) } if (length(tmp.mod.prior) == 1L) { tmp.mod.prior <- rep(tmp.mod.prior, ngroups) } if (length(tmp.mod.efa) == 1L) { tmp.mod.efa <- rep(tmp.mod.efa, ngroups) } if (length(tmp.mod.rv) == 1L) { tmp.mod.rv <- rep(tmp.mod.rv, ngroups) } # new in 0.6-7 (proposal): # - always recycle modifiers, including labels # - if ngroups > 1 AND group.label= is empty, produce a warning # (as this is a break from < 0.6-6) if (length(tmp.mod.label) == 1L) { tmp.mod.label <- rep(tmp.mod.label, ngroups) if (is.null(group.equal) || length(group.equal) == 0L) { warn.about.single.label <- TRUE } } # < 0.6-7 code: # B) here we do NOT! otherwise, it would imply an equality # constraint... # except if group.equal="loadings"! # if(length(tmp.mod.label) == 1L) { # if("loadings" %in% group.equal || # "composite.loadings" %in% group.equal) { # tmp.mod.label <- rep(tmp.mod.label, ngroups) # } else { # tmp.mod.label <- c(tmp.mod.label, rep("", (ngroups-1L)) ) # } # } } # check for wrong number of arguments if multiple groups nidx <- length(idx) if ((!is.null(tmp.mod.fixed) && nidx != length(tmp.mod.fixed)) || (!is.null(tmp.mod.start) && nidx != length(tmp.mod.start)) || (!is.null(tmp.mod.lower) && nidx != length(tmp.mod.lower)) || (!is.null(tmp.mod.upper) && nidx != length(tmp.mod.upper)) || (!is.null(tmp.mod.prior) && nidx != length(tmp.mod.prior)) || (!is.null(tmp.mod.efa) && nidx != length(tmp.mod.efa)) || (!is.null(tmp.mod.rv) && nidx != length(tmp.mod.rv)) || (!is.null(tmp.mod.label) && nidx != length(tmp.mod.label))) { el.idx <- which(tmp.list$mod.idx == el)[1L] lav_msg_stop(gettextf( "wrong number of arguments in modifier (%s) of element", lav_msg_view(tmp.mod.label, "none")), tmp.list$lhs[el.idx], tmp.list$op[el.idx], tmp.list$rhs[el.idx] ) } # apply modifiers if (!is.null(tmp.mod.fixed)) { # two options: constant or NA na.idx <- which(is.na(tmp.mod.fixed)) not.na.idx <- which(!is.na(tmp.mod.fixed)) # constant tmp.list$ustart[idx][not.na.idx] <- tmp.mod.fixed[not.na.idx] tmp.list$free[idx][not.na.idx] <- 0L # NA* modifier tmp.list$free[idx][na.idx] <- 1L # eg factor loading tmp.list$ustart[idx][na.idx] <- as.numeric(NA) } if (!is.null(tmp.mod.start)) { tmp.list$ustart[idx] <- tmp.mod.start } if (!is.null(tmp.mod.prior)) { # do we already have a `prior' column? if not, create one if (is.null(tmp.list$prior)) { tmp.list$prior <- character(length(tmp.list$lhs)) } tmp.list$prior[idx] <- tmp.mod.prior } if (!is.null(tmp.mod.efa)) { # do we already have a `efa' column? if not, create one if (is.null(tmp.list$efa)) { tmp.list$efa <- character(length(tmp.list$lhs)) } tmp.list$efa[idx] <- tmp.mod.efa } if (!is.null(tmp.mod.rv)) { # do we already have a `rv' column? if not, create one if (is.null(tmp.list$rv)) { tmp.list$rv <- character(length(tmp.list$lhs)) } tmp.list$rv[idx] <- tmp.mod.rv tmp.list$free[idx] <- 0L tmp.list$ustart[idx] <- as.numeric(NA) # } if (!is.null(tmp.mod.lower)) { # do we already have a `lower' column? if not, create one if (is.null(tmp.list$lower)) { tmp.list$lower <- rep(-Inf, length(tmp.list$lhs)) } tmp.list$lower[idx] <- as.numeric(tmp.mod.lower) } if (!is.null(tmp.mod.upper)) { # do we already have a `upper' column? if not, create one if (is.null(tmp.list$upper)) { tmp.list$upper <- rep(Inf, length(tmp.list$lhs)) } tmp.list$upper[idx] <- as.numeric(tmp.mod.upper) } if (!is.null(tmp.mod.label)) { tmp.list$label[idx] <- tmp.mod.label } } } # remove mod.idx column tmp.list$mod.idx <- NULL # warning about single label in multiple group setting? if (warn.about.single.label) { lav_msg_warn(gettext( "using a single label per parameter in a multiple group setting implies imposing equality constraints across all the groups; If this is not intended, either remove the label(s), or use a vector of labels (one for each group); See the Multiple groups section in the man page of model.syntax." )) } # if lower/upper values were added, fix non-free values to ustart values # new in 0.6-6 if (!is.null(tmp.list$lower)) { fixed.idx <- which(tmp.list$free == 0L) if (length(fixed.idx) > 0L) { tmp.list$lower[fixed.idx] <- tmp.list$ustart[fixed.idx] } } if (!is.null(tmp.list$upper)) { fixed.idx <- which(tmp.list$free == 0L) if (length(fixed.idx) > 0L) { tmp.list$upper[fixed.idx] <- tmp.list$ustart[fixed.idx] } } # if rv column is present, add rv.names to ALL rows where they are used if (!is.null(tmp.list$rv)) { rv.names <- unique(tmp.list$rv[nchar(tmp.list$rv) > 0L]) for (i in seq_len(length(rv.names))) { lhs.idx <- which(tmp.list$lhs == rv.names[i] & tmp.list$op == "=~") tmp.list$rv[lhs.idx] <- rv.names[i] } } if (lav_debug()) { cat("[lavaan DEBUG]: parameter tmp.list with MODIFIERS:\n") print(as.data.frame(tmp.list, stringsAsFactors = FALSE)) } # get 'virtual' parameter labels if (n.block.flat > 1L) { blocks <- tmp.block.lhs } else { blocks <- "group" } label <- lav_partable_labels( partable = tmp.list, blocks = blocks, group.equal = group.equal, group.partial = group.partial ) if (lav_debug()) { cat("[lavaan DEBUG]: parameter tmp.list with LABELS:\n") tmp <- tmp.list tmp$label <- label print(as.data.frame(tmp, stringsAsFactors = FALSE)) } # handle EFA equality constraints # YR 14 Jan 2020: 0.6-6 does no longer impose 'explicit' constraints # if we only need to fix a parameter to 0/1 # Note: we should also check if they are really needed: # eg., if all the factor-loadings of the 'second' set (time/group) # are constrained to be equal to the factor-loadings of the first # set, no further constraints are needed if (auto.efa && !is.null(tmp.list$efa)) { tmp.list <- lav_partable_efa_constraints( LIST = tmp.list, orthogonal.efa = orthogonal.efa, group.equal = group.equal ) } # auto.efa # handle user-specified equality constraints # lavaan 0.6-11: # two settings: # 1) simple equality constraints ONLY -> back to basics: only # duplicate 'free' numbers; no longer explicit == rows with plabels # 2) mixture of simple and other (explicit) constraints # treat them together as we did in <0.6-11 tmp.list$plabel <- paste(".p", tmp.list$id, ".", sep = "") eq.labels <- unique(label[duplicated(label)]) eq.id <- integer(length(tmp.list$lhs)) for (eq.label in eq.labels) { tmp.con.idx <- length(tmp.con) all.idx <- which(label == eq.label) # all same-label parameters ref.idx <- all.idx[1L] # the first one only other.idx <- all.idx[-1L] # the others eq.id[all.idx] <- ref.idx # new in 0.6-6: make sure lower/upper constraints are equal too if (!is.null(tmp.list$lower) && length(unique(tmp.list$lower[all.idx])) > 0L) { non.inf <- which(is.finite(tmp.list$lower[all.idx])) if (length(non.inf) > 0L) { smallest.val <- min(tmp.list$lower[all.idx][non.inf]) tmp.list$lower[all.idx] <- smallest.val } } if (!is.null(tmp.list$upper) && length(unique(tmp.list$upper[all.idx])) > 0L) { non.inf <- which(is.finite(tmp.list$upper[all.idx])) if (length(non.inf) > 0L) { largest.val <- max(tmp.list$upper[all.idx][non.inf]) tmp.list$upper[all.idx] <- largest.val } } # two possibilities: # 1. all.idx contains a fixed parameter: in this case, # we fix them all (hopefully to the same value) # 2. all.idx contains only free parameters # 1. all.idx contains a fixed parameter if (any(tmp.list$free[all.idx] == 0L)) { # which one is fixed? fixed.all <- all.idx[tmp.list$free[all.idx] == 0L] # only pick the first fixed.idx <- fixed.all[1] # sanity check: are all ustart values equal? ustart1 <- tmp.list$ustart[fixed.idx] if (!all(ustart1 == tmp.list$ustart[fixed.all])) { lav_msg_warn(gettext( "equality constraints involve fixed parameters with different values; only the first one will be used")) } # make them all fixed tmp.list$ustart[all.idx] <- tmp.list$ustart[fixed.idx] tmp.list$free[all.idx] <- 0L # not free anymore, since it must # be equal to the 'fixed' parameter # (Note: Mplus ignores this) eq.id[all.idx] <- 0L # remove from eq.id list # new in 0.6-8 (for efa + user-specified eq constraints) if (any(tmp.list$user[all.idx] %in% c(7L, 77L))) { # if involved in an efa block, store in tmp.con anyway # we may need it for the rotated solution for (o in other.idx) { tmp.con.idx <- tmp.con.idx + 1L tmp.con[[tmp.con.idx]] <- list( op = "==", lhs = tmp.list$plabel[ref.idx], rhs = tmp.list$plabel[o], user = 2L ) } } } else { # 2. all.idx contains only free parameters # old system: # - add tmp.con entry # - in 0.6-11: only if tmp.con is not empty if (!ceq.simple) { for (o in other.idx) { tmp.con.idx <- tmp.con.idx + 1L tmp.con[[tmp.con.idx]] <- list( op = "==", lhs = tmp.list$plabel[ref.idx], rhs = tmp.list$plabel[o], user = 2L ) } } else { # new system: # - set $free elements to zero, and later to ref id tmp.list$free[other.idx] <- 0L # all but the first are non-free # but will get a duplicated number } # just to trick semTools, also add something in the label # colum, *if* it is empty # update: 0.6-11 we keep this, because it shows the plabels # when eg group.equal = "loadings" for (i in all.idx) { if (nchar(tmp.list$label[i]) == 0L) { tmp.list$label[i] <- tmp.list$plabel[ref.idx] } } } # all free } # eq in eq.labels if (lav_debug()) { print(tmp.con) } # handle constraints (if any) (NOT per group, but overall - 0.4-11) if (length(tmp.con) > 0L) { n.con <- length(tmp.con) tmp.idx <- length(tmp.list$id) + seq_len(n.con) # grow tmp.list with length(tmp.con) extra rows tmp.list <- lapply(tmp.list, function(x) { if (is.character(x)) { c(x, rep("", n.con)) } else { c(x, rep(NA, n.con)) } }) # fill in some columns tmp.list$id[tmp.idx] <- tmp.idx tmp.list$lhs[tmp.idx] <- unlist(lapply(tmp.con, "[[", "lhs")) tmp.list$op[tmp.idx] <- unlist(lapply(tmp.con, "[[", "op")) tmp.list$rhs[tmp.idx] <- unlist(lapply(tmp.con, "[[", "rhs")) tmp.list$user[tmp.idx] <- unlist(lapply(tmp.con, "[[", "user")) # zero is nicer? tmp.list$free[tmp.idx] <- rep(0L, n.con) tmp.list$exo[tmp.idx] <- rep(0L, n.con) tmp.list$block[tmp.idx] <- rep(0L, n.con) if (!is.null(tmp.list$group)) { if (is.character(tmp.list$group)) { tmp.list$group[tmp.idx] <- rep("", n.con) } else { tmp.list$group[tmp.idx] <- rep(0L, n.con) } } if (!is.null(tmp.list$level)) { if (is.character(tmp.list$level)) { tmp.list$level[tmp.idx] <- rep("", n.con) } else { tmp.list$level[tmp.idx] <- rep(0L, n.con) } } if (!is.null(tmp.list$class)) { if (is.character(tmp.list$class)) { tmp.list$class[tmp.idx] <- rep("", n.con) } else { tmp.list$class[tmp.idx] <- rep(0L, n.con) } } } # put lhs of := elements in label column def.idx <- which(tmp.list$op == ":=") tmp.list$label[def.idx] <- tmp.list$lhs[def.idx] # handle effect.coding related equality constraints if (is.logical(effect.coding) && effect.coding) { effect.coding <- c("loadings", "intercepts") } else if (!is.character(effect.coding)) { lav_msg_stop(gettext("effect.coding argument must be a character string")) } if (any(c("loadings", "intercepts") %in% effect.coding)) { tmp <- list() # for each block nblocks <- lav_partable_nblocks(tmp.list) for (b in seq_len(nblocks)) { # lv's for this block/set lv.names <- unique(tmp.list$lhs[tmp.list$op == "=~" & tmp.list$block == b]) if (length(lv.names) == 0L) { next } int.plabel <- character(0L) for (lv in lv.names) { # ind.names ind.names <- tmp.list$rhs[tmp.list$op == "=~" & tmp.list$block == b & tmp.list$lhs == lv] if ("loadings" %in% effect.coding) { # factor loadings indicators of this lv loadings.idx <- which(tmp.list$op == "=~" & tmp.list$block == b & tmp.list$rhs %in% ind.names & tmp.list$lhs == lv) # all free? if (length(loadings.idx) > 0L && all(tmp.list$free[loadings.idx] > 0L)) { # add eq constraint plabel <- tmp.list$plabel[loadings.idx] # Note: we write them as # .p1. == 3 - .p2. - .p3. # instead of # 3 == .p1.+.p2.+.p3. # as this makes it easier to translate things to # JAGS/stan tmp.lhs <- plabel[1] if (length(loadings.idx) > 1L) { tmp.rhs <- paste(length(loadings.idx), "-", paste(plabel[-1], collapse = "-"), sep = "" ) } else { tmp.rhs <- length(loadings.idx) } tmp$lhs <- c(tmp$lhs, tmp.lhs) tmp$op <- c(tmp$op, "==") tmp$rhs <- c(tmp$rhs, tmp.rhs) tmp$block <- c(tmp$block, 0L) tmp$user <- c(tmp$user, 2L) tmp$ustart <- c(tmp$ustart, as.numeric(NA)) } } # loadings if ("intercepts" %in% effect.coding) { # intercepts for indicators of this lv intercepts.idx <- which(tmp.list$op == "~1" & tmp.list$block == b & tmp.list$lhs %in% ind.names) # all free? if (length(intercepts.idx) > 0L && all(tmp.list$free[intercepts.idx] > 0L)) { # 1) add eq constraint plabel <- tmp.list$plabel[intercepts.idx] tmp.lhs <- plabel[1] if (length(intercepts.idx) > 1L) { tmp.rhs <- paste("0-", paste(plabel[-1], collapse = "-"), sep = "" ) } else { tmp.rhs <- 0L } tmp$lhs <- c(tmp$lhs, tmp.lhs) tmp$op <- c(tmp$op, "==") tmp$rhs <- c(tmp$rhs, tmp.rhs) tmp$block <- c(tmp$block, 0L) tmp$user <- c(tmp$user, 2L) tmp$ustart <- c(tmp$ustart, as.numeric(NA)) # 2) release latent mean lv.int.idx <- which(tmp.list$op == "~1" & tmp.list$block == b & tmp.list$lhs == lv) # free only if automatically added if (length(lv.int.idx) > 0L && tmp.list$user[lv.int.idx] == 0L) { tmp.list$free[lv.int.idx] <- 1L } } } # intercepts } # lv } # blocks tmp.list <- lav_partable_merge(tmp.list, tmp) } # marker.int.zero if (meanstructure && marker.int.zero) { # for each block nblocks <- lav_partable_nblocks(tmp.list) for (b in seq_len(nblocks)) { # lv's for this block/set lv.names <- lav_partable_vnames(tmp.list, type = "lv.regular", block = b ) lv.marker <- lav_partable_vnames(tmp.list, type = "lv.regular", block = b ) if (length(lv.names) == 0L) { next } # markers for this block lv.marker <- lav_partable_vnames(tmp.list, type = "lv.marker", block = b ) # fix marker intercepts to zero marker.idx <- which(tmp.list$op == "~1" & tmp.list$lhs %in% lv.marker & tmp.list$block == b & tmp.list$user == 0L) tmp.list$free[marker.idx] <- 0L tmp.list$ustart[marker.idx] <- 0 # free latent means lv.idx <- which(tmp.list$op == "~1" & tmp.list$lhs %in% lv.names & tmp.list$block == b & tmp.list$user == 0L) tmp.list$free[lv.idx] <- 1L tmp.list$ustart[lv.idx] <- as.numeric(NA) } # block } # mg.lv.variances if (ngroups > 1L && "mg.lv.variances" %in% effect.coding) { tmp <- list() # do not include 'EFA' lv's if (!is.null(tmp.list$efa)) { lv.names <- unique(tmp.list$lhs[tmp.list$op == "=~" & !nchar(tmp.list$efa) > 0L]) } else { lv.names <- unique(tmp.list$lhs[tmp.list$op == "=~"]) } group.values <- lav_partable_group_values(tmp.list) for (lv in lv.names) { # factor variances lv.var.idx <- which(tmp.list$op == "~~" & tmp.list$lhs == lv & tmp.list$rhs == tmp.list$lhs & tmp.list$lhs == lv) # all free (but the first?) if (length(lv.var.idx) > 0L && all(tmp.list$free[lv.var.idx][-1] > 0L)) { # 1) add eq constraint plabel <- tmp.list$plabel[lv.var.idx] tmp.lhs <- plabel[1] if (length(lv.var.idx) > 1L) { tmp.rhs <- paste(length(lv.var.idx), "-", paste(plabel[-1], collapse = "-"), sep = "" ) } else { tmp.rhs <- length(lv.var.idx) } tmp$lhs <- c(tmp$lhs, tmp.lhs) tmp$op <- c(tmp$op, "==") tmp$rhs <- c(tmp$rhs, tmp.rhs) tmp$block <- c(tmp$block, 0L) tmp$user <- c(tmp$user, 2L) tmp$ustart <- c(tmp$ustart, as.numeric(NA)) # 2) free lv variances first group lv.var.g1.idx <- which(tmp.list$op == "~~" & tmp.list$group == group.values[1] & tmp.list$lhs == lv & tmp.list$rhs == tmp.list$lhs & tmp.list$lhs == lv) # free only if automatically added if (length(lv.var.g1.idx) > 0L && tmp.list$user[lv.var.g1.idx] == 0L) { tmp.list$free[lv.var.g1.idx] <- 1L } } } # lv tmp.list <- lav_partable_merge(tmp.list, tmp) } # mg.lv.efa.variances if (ngroups > 1L && "mg.lv.efa.variances" %in% effect.coding) { tmp <- list() # only 'EFA' lv's if (!is.null(tmp.list$efa)) { lv.names <- unique(tmp.list$lhs[tmp.list$op == "=~" & nchar(tmp.list$efa) > 0L]) } else { lv.names <- character(0L) } group.values <- lav_partable_group_values(tmp.list) for (lv in lv.names) { # factor variances lv.var.idx <- which(tmp.list$op == "~~" & tmp.list$lhs == lv & tmp.list$rhs == tmp.list$lhs & tmp.list$lhs == lv) # all free (but the first?) if (length(lv.var.idx) > 0L && all(tmp.list$free[lv.var.idx][-1] > 0L)) { # 1) add eq constraint plabel <- tmp.list$plabel[lv.var.idx] tmp.lhs <- plabel[1] if (length(lv.var.idx) > 1L) { tmp.rhs <- paste(length(lv.var.idx), "-", paste(plabel[-1], collapse = "-"), sep = "" ) } else { tmp.rhs <- length(lv.var.idx) } tmp$lhs <- c(tmp$lhs, tmp.lhs) tmp$op <- c(tmp$op, "==") tmp$rhs <- c(tmp$rhs, tmp.rhs) tmp$block <- c(tmp$block, 0L) tmp$user <- c(tmp$user, 2L) tmp$ustart <- c(tmp$ustart, as.numeric(NA)) # 2) free lv variances first group lv.var.g1.idx <- which(tmp.list$op == "~~" & tmp.list$group == group.values[1] & tmp.list$lhs == lv & tmp.list$rhs == tmp.list$lhs & tmp.list$lhs == lv) # free only if automatically added if (length(lv.var.g1.idx) > 0L && tmp.list$user[lv.var.g1.idx] == 0L) { tmp.list$free[lv.var.g1.idx] <- 1L } } } # lv tmp.list <- lav_partable_merge(tmp.list, tmp) } # count free parameters idx.free <- which(tmp.list$free > 0L) tmp.list$free[idx.free] <- seq_along(idx.free) # new in 0.6-11: add free counter to this element (as in < 0.5-18) # unless we have other constraints if (ceq.simple) { idx.equal <- which(eq.id > 0) tmp.list$free[idx.equal] <- tmp.list$free[eq.id[idx.equal]] } # new in 0.6-14: add 'da' entries to reflect data-based order of ov's # now via attribute "ovda" attr(tmp.list, "ovda") <- ov.names.data # backwards compatibility... if (!is.null(tmp.list$unco)) { tmp.list$unco[idx.free] <- seq_along(sum(tmp.list$free > 0L)) } if (lav_debug()) { cat("[lavaan DEBUG] lavParTable\n") print(as.data.frame(tmp.list)) } # data.frame? if (as.data.frame.) { tmp.list <- as.data.frame(tmp.list, stringsAsFactors = FALSE) attr(tmp.list, "ovda") <- ov.names.data } else { tmp.list <- lav_partable_set_cache(tmp.list) # add cached "pta" data } tmp.list } lavaan/R/lav_samplestats_icov.R0000644000176200001440000000267014627656441016306 0ustar liggesuserslav_samplestats_icov <- function(COV = NULL, ridge = 0.0, x.idx = integer(0L), ngroups = 1L, g = 1L) { tmp <- try(inv.chol(COV, logdet = TRUE), silent = TRUE) # what if this fails... # ridge exogenous part only (if any); this may help for GLS (but not ML) if (inherits(tmp, "try-error")) { if (length(x.idx) > 0L && ridge > 0) { # maybe, we can fix it by gently ridging the exo variances ridge.eps <- ridge diag(COV)[x.idx] <- diag(COV)[x.idx] + ridge.eps # try again tmp <- try(inv.chol(COV, logdet = TRUE), silent = TRUE) if (inherits(tmp, "try-error")) { # fatal stop after all lav_msg_stop(gettext( "sample covariance matrix is not positive-definite")) } else { cov.log.det <- attr(tmp, "logdet") attr(tmp, "logdet") <- NULL icov <- tmp # give a warning if (ngroups > 1) { lav_msg_warn(gettextf( "sample covariance matrix in group: %s is not positive-definite", g)) } else { lav_msg_warn(gettext( "sample covariance matrix is not positive-definite")) } } } else { # fatal stop lav_msg_stop(gettext("sample covariance matrix is not positive-definite")) } } else { cov.log.det <- attr(tmp, "logdet") attr(tmp, "logdet") <- NULL icov <- tmp } list(icov = icov, cov.log.det = cov.log.det) } lavaan/R/lav_model_properties.R0000644000176200001440000000636414627656441016306 0ustar liggesusers# collect information about the model that we can use # (eg. is theta diagonal or not, is the structurual model recursive or not, # is the model just a regression model, etc) # # initial version: YR 15 March 2021 # - YR 05 Oct 2021: use det(I - B) to check if B is acyclic # - YR 11 Nov 2021: if no latents, and conditional.x = TRUE, we may have no # beta matrix # note: there is no 'lavmodel' yet, because we call this in lav_model.R lav_model_properties <- function(GLIST, lavpartable = NULL, nmat = NULL, m.free.idx = NULL) { lavpta <- lav_partable_attributes(lavpartable) nblocks <- lavpta$nblocks # is the model a univariate/multivariate linear multiple regression # model (per block)? uvreg <- logical(nblocks) uvord <- logical(nblocks) mvreg <- logical(nblocks) acyclic <- rep(as.logical(NA), nblocks) bowfree <- rep(as.logical(NA), nblocks) nexo <- integer(nblocks) for (g in seq_len(nblocks)) { # at least 1 regression if (length(lavpta$vnames$eqs.y[[g]]) == 0L) { next } # find beta index for this block mm.in.block <- 1:nmat[g] + cumsum(c(0L, nmat))[g] MLIST <- GLIST[mm.in.block] beta.idx <- which(names(MLIST) == "beta") + cumsum(c(0L, nmat))[g] psi.idx <- which(names(MLIST) == "psi") + cumsum(c(0L, nmat))[g] if (length(beta.idx) > 0L) { # 1. acyclic? B <- GLIST[[beta.idx]] # keep fixed values (if any); fill in 1 in all 'free' positions B[m.free.idx[[beta.idx]]] <- 1 IminB <- diag(nrow(B)) - B # if B is acyclic, we should be able to permute the rows/cols of B # so that B is upper/lower triangular, and so det(I-B) = 1 if (det(IminB) == 1) { acyclic[g] <- TRUE } else { acyclic[g] <- FALSE } # 2. bow-free? B.one <- as.integer(B != 0) Psi <- GLIST[[psi.idx]] # keep fixed values (if any); fill in 1 in all 'free' positions Psi[m.free.idx[[psi.idx]]] <- 1 Psi.one <- as.integer(Psi != 0) Both.one <- B.one + Psi.one if (any(Both.one > 1)) { bowfree[g] <- FALSE } else { bowfree[g] <- TRUE } } else { # perhaps conditional.x = TRUE? # if there is no BETA, then we only have Gamma, and the # system must be acyclic acyclic[g] <- TRUE # and also bowfree bowfree[g] <- TRUE } # no latent variables, at least 1 dependent variable if (lavpta$nfac[[g]] > 0L) { next } # no mediators if (length(lavpta$vnames$eqs.y[[g]]) != length(lavpta$vnames$ov.y[[g]])) { next } # categorical y? if (length(lavpta$vnames$ov.ord[[g]]) > 0L) { # we only flag the univariate version if (length(lavpta$vnames$ov.ord[[g]]) == 1L && length(lavpta$vnames$ov.y[[g]]) == 1L && lavpta$vnames$ov.ord[[g]][1] == lavpta$vnames$ov.y[[g]][1]) { uvord[g] <- TRUE } # mvreg? } else { if (length(lavpta$vnames$ov.y[[g]]) > 1L) { mvreg[g] <- TRUE } else { uvreg[g] <- TRUE } } nexo[g] <- length(lavpta$vnames$eqs.x[[g]]) } # g modprop <- list( uvreg = uvreg, uvord = uvord, mvreg = mvreg, nexo = nexo, acyclic = acyclic, bowfree = bowfree ) modprop } lavaan/R/ctr_pml_plrt.R0000644000176200001440000003444514627656440014571 0ustar liggesusersctr_pml_plrt <- function(lavobject = NULL, lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavpartable = NULL, lavoptions = NULL, x = NULL, VCOV = NULL, lavcache = NULL) { lavpta <- NULL if (!is.null(lavobject)) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavoptions <- lavobject@Options lavsamplestats <- lavobject@SampleStats lavcache <- lavobject@Cache lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavpta <- lavobject@pta } if (is.null(lavpta)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } if (is.null(x)) { # compute 'fx' = objective function value # (NOTE: since 0.5-18, NOT divided by N!!) fx <- lav_model_objective( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache ) H0.fx <- as.numeric(fx) H0.fx.group <- attr(fx, "fx.group") } else { H0.fx <- attr(attr(x, "fx"), "fx.pml") H0.fx.group <- attr(attr(x, "fx"), "fx.group") } # fit a saturated model 'fittedSat' ModelSat <- lav_partable_unrestricted( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavpta = lavpta, lavsamplestats = lavsamplestats ) # FIXME: se="none", test="none"?? Options <- lavoptions Options$se <- "none" Options$test <- "none" Options$baseline <- FALSE Options$h1 <- FALSE fittedSat <- lavaan(ModelSat, slotOptions = Options, verbose = FALSE, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache ) fx <- lav_model_objective( lavmodel = fittedSat@Model, lavsamplestats = fittedSat@SampleStats, lavdata = fittedSat@Data, lavcache = fittedSat@Cache ) SAT.fx <- as.numeric(fx) SAT.fx.group <- attr(fx, "fx.group") # we also need a `saturated model', but where the moments are based # on the model-implied sample statistics under H0 ModelSat2 <- lav_partable_unrestricted( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavsamplestats = NULL, sample.cov = computeSigmaHat(lavmodel), sample.mean = computeMuHat(lavmodel), sample.th = computeTH(lavmodel), sample.th.idx = lavsamplestats@th.idx ) Options2 <- Options Options2$optim.method <- "none" Options2$optim.force.converged <- TRUE Options2$check.start <- FALSE Options2$check.gradient <- FALSE Options2$check.post <- FALSE Options2$check.vcov <- FALSE fittedSat2 <- lavaan(ModelSat2, slotOptions = Options2, verbose = FALSE, slotSampleStats = lavsamplestats, slotData = lavdata, slotCache = lavcache ) # the code below was contributed by Myrsini Katsikatsou (Jan 2015) # for now, only a single group is supported: # g = 1L ########################### The code for PLRT for overall goodness of fit # First define the number of non-redundant elements of the (fitted) # covariance/correlation matrix of the underlying variables. # nvar <- lavmodel@nvar[[g]] # dSat <- nvar*(nvar-1)/2 # if(length(lavmodel@num.idx[[g]]) > 0L) { # dSat <- dSat + length(lavmodel@num.idx[[g]]) # } # select `free' parameters (excluding thresholds) from fittedSat2 model PT.Sat2 <- fittedSat2@ParTable dSat.idx <- PT.Sat2$free[PT.Sat2$free > 0L & PT.Sat2$op != "|"] # remove thresholds # Secondly, we need to specify the indices of the rows/columns of vcov(), # hessian, and variability matrix that refer to all SEM parameters # except thresholds. PT <- lavpartable index.par <- PT$free[PT$free > 0L & PT$op != "|"] # Thirdly, specify the sample size. # nsize <- lavdata@nobs[[g]] nsize <- lavsamplestats@ntotal # Now we can proceed to the computation of the quantities needed for PLRT. # Briefly, to say that PLRT is equal to the difference of two quadratic forms. # To compute the first and second moment adjusted PLRT we should compute # the asymptotic mean and variance of each quadratic quantity as well as # their asymptotic covariance. ##### Section 1. Compute the asymptotic mean and variance ##### of the first quadratic quantity # Below I assume that lavobject is the output of lavaan function. I guess # vcov(lavobject) can be substituted by VCOV object insed lavaan function # defined at lines 703 -708. But what is the object inside lavaan function # for getHessian(lavobject)? if (is.null(VCOV)) { lavoptions$se <- "robust.huber.white" VCOV <- lav_model_vcov( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache ) } InvG_to_psipsi_attheta0 <- (lavsamplestats@ntotal * VCOV)[index.par, index.par, drop = FALSE] # G^psipsi(theta0) # below the lavaan function getHessian is used # Hattheta0 <- (-1) * H0.Hessian # Hattheta0 <- H0.Hessian # InvHattheta0 <- solve(Hattheta0) InvHattheta0 <- attr(VCOV, "E.inv") InvH_to_psipsi_attheta0 <- InvHattheta0[index.par, index.par, drop = FALSE] # H^psipsi(theta0) if (lavmodel@eq.constraints) { IN <- InvH_to_psipsi_attheta0 IN.npar <- ncol(IN) # create `bordered' matrix if (nrow(lavmodel@con.jac) > 0L) { H <- lavmodel@con.jac[, index.par, drop = FALSE] inactive.idx <- attr(H, "inactive.idx") lambda <- lavmodel@con.lambda # lagrangean coefs if (length(inactive.idx) > 0L) { H <- H[-inactive.idx, , drop = FALSE] lambda <- lambda[-inactive.idx] } if (nrow(H) > 0L) { H0 <- matrix(0, nrow(H), nrow(H)) H10 <- matrix(0, ncol(IN), nrow(H)) DL <- 2 * diag(lambda, nrow(H), nrow(H)) # FIXME: better include inactive + slacks?? E3 <- rbind( cbind(IN, H10, t(H)), cbind(t(H10), DL, H0), cbind(H, H0, H0) ) Inv_of_InvH_to_psipsi_attheta0 <- MASS::ginv(IN)[1:IN.npar, 1:IN.npar, drop = FALSE] } else { Inv_of_InvH_to_psipsi_attheta0 <- solve(IN) } } } else { # YR 26 June 2018: check for empty index.par (eg independence model) if (length(index.par) > 0L) { Inv_of_InvH_to_psipsi_attheta0 <- solve(InvH_to_psipsi_attheta0) # [H^psipsi(theta0)]^(-1) } else { Inv_of_InvH_to_psipsi_attheta0 <- matrix(0, 0, 0) } } H0tmp_prod1 <- Inv_of_InvH_to_psipsi_attheta0 %*% InvG_to_psipsi_attheta0 H0tmp_prod2 <- H0tmp_prod1 %*% H0tmp_prod1 E_tww <- sum(diag(H0tmp_prod1)) # expected mean of first quadratic quantity var_tww <- 2 * sum(diag(H0tmp_prod2)) # variance of first quadratic quantity ##### Section 2: Compute the asymptotic mean and variance ##### of the second quadratic quantity. # Now we need to evaluate the fitted (polychoric) correlation/ covariance # matrix using the estimates of SEM parameters derived under the fitted model # which is the model of the null hypothesis. We also need to compute the # vcov matrix of these estimates (estimates of polychoric correlations) # as well as the related hessian and variability matrix. tmp.options <- fittedSat2@Options tmp.options$se <- lavoptions$se VCOV.Sat2 <- lav_model_vcov( lavmodel = fittedSat2@Model, lavsamplestats = fittedSat2@SampleStats, lavoptions = tmp.options, lavdata = fittedSat2@Data, lavpartable = fittedSat2@ParTable, lavcache = fittedSat2@Cache, use.ginv = TRUE ) InvG_to_sigmasigma_attheta0 <- lavsamplestats@ntotal * VCOV.Sat2[dSat.idx, dSat.idx, drop = FALSE] # G^sigmasigma(theta0) # Hattheta0 <- (-1)* getHessian(fittedSat2) # Hattheta0 <- getHessian(fittedSat2) # InvHattheta0 <- solve(Hattheta0) InvHattheta0 <- attr(VCOV.Sat2, "E.inv") InvH_to_sigmasigma_attheta0 <- InvHattheta0[dSat.idx, dSat.idx, drop = FALSE] # H^sigmasigma(theta0) # Inv_of_InvH_to_sigmasigma_attheta0 <- solve(InvH_to_sigmasigma_attheta0) # #[H^sigmasigma(theta0)]^(-1) Inv_of_InvH_to_sigmasigma_attheta0 <- MASS::ginv(InvH_to_sigmasigma_attheta0, tol = .Machine$double.eps^(3 / 4) ) H1tmp_prod1 <- Inv_of_InvH_to_sigmasigma_attheta0 %*% InvG_to_sigmasigma_attheta0 H1tmp_prod2 <- H1tmp_prod1 %*% H1tmp_prod1 E_tzz <- sum(diag(H1tmp_prod1)) # expected mean of the second # quadratic quantity var_tzz <- 2 * sum(diag(H1tmp_prod2)) # variance of the second # quadratic quantity ##### Section 3: Compute the asymptotic covariance of ##### the two quadratic quantities drhodpsi_MAT <- vector("list", length = lavsamplestats@ngroups) group.values <- lav_partable_group_values(fittedSat2@ParTable) for (g in 1:lavsamplestats@ngroups) { # delta.g <- computeDelta(lavmodel)[[g]] # [[1]] to be substituted by g? # The above gives the derivatives of thresholds and polychoric correlations # with respect to SEM param (including thresholds) evaluated under H0. # From deltamat we need to exclude the rows and columns referring # to thresholds. # For this: # order of the rows: first the thresholds, then the correlations # we need to map the rows of delta.g to the rows/cols of H_at_vartheta0 # of H1 PT <- fittedSat2@ParTable PT$label <- lav_partable_labels(PT) free.idx <- which(PT$free > 0 & PT$op != "|" & PT$group == group.values[g]) PARLABEL <- PT$label[free.idx] # for now, we can assume that computeDelta will always return # the thresholds first, then the correlations # # later, we should add a (working) add.labels = TRUE option to # computeDelta # th.names <- lavobject@pta$vnames$th[[g]] # ov.names <- lavobject@pta$vnames$ov[[g]] # th.names <- lavNames(lavpartable, "th") # ov.names <- lavNames(lavpartable, "ov.nox") # ov.names.x <- lavNames(lavpartable, "ov.x") # tmp <- utils::combn(ov.names, 2) # cor.names <- paste(tmp[1,], "~~", tmp[2,], sep = "") # added by YR - 22 Okt 2017 ##################################### # ov.names.x <- lavNames(lavpartable, "ov.x") # if(length(ov.names.x)) { # slope.names <- apply(expand.grid(ov.names, ov.names.x), 1L, # paste, collapse = "~") # } else { # slope.names <- character(0L) # } ################################################################# # NAMES <- c(th.names, slope.names, cor.names) # added by YR - 26 April 2018, for 0.6-1 # we now can get 'labelled' delta rownames delta.g <- lav_object_inspect_delta_internal( lavmodel = lavmodel, lavdata = lavdata, lavpartable = lavpartable, add.labels = TRUE, add.class = FALSE, drop.list.single.group = FALSE )[[g]] NAMES <- rownames(delta.g) if (g > 1L) { NAMES <- paste(NAMES, ".g", g, sep = "") } par.idx <- match(PARLABEL, NAMES) if (any(is.na(par.idx))) { lav_msg_warn(gettextf( "mismatch between DELTA labels and PAR labels! PARLABEL: %1$s, DELTA LABELS: %2$s", lav_msg_view(PARLABEL), lav_msg_view(NAMES))) } drhodpsi_MAT[[g]] <- delta.g[par.idx, index.par, drop = FALSE] } drhodpsi_mat <- do.call(rbind, drhodpsi_MAT) tmp_prod <- t(drhodpsi_mat) %*% Inv_of_InvH_to_sigmasigma_attheta0 %*% drhodpsi_mat %*% InvG_to_psipsi_attheta0 %*% H0tmp_prod1 cov_tzztww <- 2 * sum(diag(tmp_prod)) ##### Section 4: compute the adjusted PLRT and its p-value # PLRTH0Sat <- 2*nsize*(lavfit@fx - fittedSat@Fit@fx) PLRTH0Sat <- 2 * (H0.fx - SAT.fx) PLRTH0Sat.group <- 2 * (H0.fx.group - SAT.fx.group) asym_mean_PLRTH0Sat <- E_tzz - E_tww # catch zero value for asym_mean_PLRTH0Sat if (asym_mean_PLRTH0Sat == 0) { asym_var_PLRTH0Sat <- 0 scaling.factor <- as.numeric(NA) FSA_PLRT_SEM <- as.numeric(NA) adjusted_df <- as.integer(NA) pvalue <- as.numeric(NA) } else if (any(is.na(c(var_tzz, var_tww, cov_tzztww)))) { asym_var_PLRTH0Sat <- as.numeric(NA) scaling.factor <- as.numeric(NA) FSA_PLRT_SEM <- as.numeric(NA) adjusted_df <- as.integer(NA) pvalue <- as.numeric(NA) } else { asym_var_PLRTH0Sat <- var_tzz + var_tww - 2 * cov_tzztww scaling.factor <- (asym_mean_PLRTH0Sat / (asym_var_PLRTH0Sat / 2)) FSA_PLRT_SEM <- (asym_mean_PLRTH0Sat / (asym_var_PLRTH0Sat / 2)) * PLRTH0Sat adjusted_df <- (asym_mean_PLRTH0Sat * asym_mean_PLRTH0Sat) / (asym_var_PLRTH0Sat / 2) # In some very few cases (simulations show very few cases in small # sample sizes) the adjusted_df is a negative number, we should then # print a warning like: "The adjusted df is computed to be a negative number # and for this the first and second moment adjusted PLRT is not computed." if (scaling.factor > 0) { pvalue <- 1 - pchisq(FSA_PLRT_SEM, df = adjusted_df) } else { pvalue <- as.numeric(NA) } } list( PLRTH0Sat = PLRTH0Sat, PLRTH0Sat.group = PLRTH0Sat.group, stat = FSA_PLRT_SEM, df = adjusted_df, p.value = pvalue, scaling.factor = scaling.factor ) } ############################################################################ ctr_pml_aic_bic <- function(lavobject) { ########################## The code for PL version fo AIC and BIC # The following should be done because it is not the pl log-likelihood # that is maximized but a fit function that should be minimized. So, we # should find the value of log-PL at the estimated parameters through the # value of the fitted function. # The following may need to be updated if we change the fit function # so that it is correct for the case of missing values as well. logPL <- lavobject@optim$logl nsize <- lavobject@SampleStats@ntotal # inverted observed unit information H.inv <- lavTech(lavobject, "inverted.information.observed") # first order unit information J <- lavTech(lavobject, "information.first.order") # trace (J %*% H.inv) = sum (J * t(H.inv)) dimTheta <- sum(J * H.inv) # computations of PL versions of AIC and BIC PL_AIC <- (-2) * logPL + 2 * dimTheta PL_BIC <- (-2) * logPL + dimTheta * log(nsize) list(logPL = logPL, PL_AIC = PL_AIC, PL_BIC = PL_BIC) } lavaan/R/zzz_OLDNAMES.R0000644000176200001440000000073114627656441014177 0ustar liggesusers# keep 'old' names for some function names that have been used # (or are still being used) by external packages lavJacobianD <- lav_func_jacobian_simple lavJacobianC <- lav_func_jacobian_complex lavGradientC <- lav_func_gradient_complex # Myrsini getHessian <- lav_object_inspect_hessian getVariability <- lav_object_inspect_firstorder # rsem computeExpectedInformation <- lav_model_information_expected # only for simsem .... getParameterLabels <- lav_partable_labels lavaan/R/lav_predict_y.R0000644000176200001440000002407114627656441014707 0ustar liggesusers# This file will (eventually) contain functions that can be used to # 'predict' the values of outcome variables (y), given the values of # input variables (x). # first version YR 2 Nov 2022 # method = "conditional.mean" is based on the following article: # Mark de Rooij, Julian D. Karch, Marjolein Fokkema, Zsuzsa Bakk, Bunga Citra # Pratiwi & Henk Kelderman (2022) SEM-Based Out-of-Sample Predictions, # StructuralEquation Modeling: A Multidisciplinary Journal # DOI:10.1080/10705511.2022.2061494 # YR 31 Jan 2023: we always 'force' meanstructure = TRUE (for now) # main function lavPredictY <- function(object, newdata = NULL, ynames = lavNames(object, "ov.y"), xnames = lavNames(object, "ov.x"), method = "conditional.mean", label = TRUE, assemble = TRUE, force.zero.mean = FALSE, lambda = 0) { stopifnot(inherits(object, "lavaan")) lavmodel <- object@Model lavdata <- object@Data lavimplied <- object@implied # check meanstructure if (!lavmodel@meanstructure) { lavimplied$mean <- lapply(object@SampleStats@mean, as.matrix) } # need full data set if (is.null(newdata)) { # use internal copy: if (lavdata@data.type != "full") { lav_msg_stop(gettext( "sample statistics were used for fitting and newdata is empty" )) } else if (is.null(lavdata@X[[1]])) { lav_msg_stop(gettext("no local copy of data; FIXME!")) } else { data.obs <- lavdata@X ov.names <- lavdata@ov.names } # eXo <- lavdata@eXo } else { # newdata is given! # create lavData object OV <- lavdata@ov newData <- lavData( data = newdata, group = lavdata@group, ov.names = lavdata@ov.names, ov.names.x = lavdata@ov.names.x, ordered = OV$name[OV$type == "ordered"], lavoptions = list( std.ov = lavdata@std.ov, group.label = lavdata@group.label, missing = "ml.x", # always! warn = TRUE ), allow.single.case = TRUE ) # if ordered, check if number of levels is still the same (new in 0.6-7) if (lavmodel@categorical) { orig.ordered.idx <- which(lavdata@ov$type == "ordered") orig.ordered.lev <- lavdata@ov$nlev[orig.ordered.idx] match.new.idx <- match( lavdata@ov$name[orig.ordered.idx], newData@ov$name ) new.ordered.lev <- newData@ov$nlev[match.new.idx] if (any(orig.ordered.lev - new.ordered.lev != 0)) { lav_msg_stop(gettext( "mismatch number of categories for some ordered variables in newdata compared to original data." )) } } data.obs <- newData@X # eXo <- newData@eXo ov.names <- newData@ov.names } # newdata # check ynames if (length(ynames) == 0L) { lav_msg_stop(gettext( "please specify the y-variables in the ynames= argument" )) } else if (!is.list(ynames)) { ynames <- rep(list(ynames), lavdata@ngroups) } # check xnames if (length(xnames) == 0L) { lav_msg_stop(gettext( "please specify the x-variables in the xnames= argument" )) } else if (!is.list(xnames)) { xnames <- rep(list(xnames), lavdata@ngroups) } # create y.idx and x.idx y.idx <- x.idx <- vector("list", lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { # ynames in ov.names for this group? missing.idx <- which(!ynames[[g]] %in% ov.names[[g]]) if (length(missing.idx) > 0L) { lav_msg_stop( gettext( "some variable names in ynames do not appear in the dataset:" ), lav_msg_view(ynames[[g]][missing.idx], "none") ) } else { y.idx[[g]] <- match(ynames[[g]], ov.names[[g]]) } # xnames in ov.names for this group? missing.idx <- which(!xnames[[g]] %in% ov.names[[g]]) if (length(missing.idx) > 0L) { lav_msg_stop( gettext( "some variable names in xnames do not appear in the dataset:" ), lav_msg_view(xnames[[g]][missing.idx], "none") ) } else { x.idx[[g]] <- match(xnames[[g]], ov.names[[g]]) } } # prediction method method <- tolower(method) if (method == "conditional.mean") { out <- lav_predict_y_conditional_mean( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavimplied = lavimplied, data.obs = data.obs, y.idx = y.idx, x.idx = x.idx, force.zero.mean = force.zero.mean, lambda = lambda ) } else { lav_msg_stop(gettext("method must be \"conditional.mean\" (for now).")) } # label? if (label) { # column names for (g in seq_len(lavdata@ngroups)) { colnames(out[[g]]) <- ynames[[g]] } # group.labels if (lavdata@ngroups > 1L) { names(out) <- lavdata@group.label } } # lavaan.matrix out <- lapply(out, "class<-", c("lavaan.matrix", "matrix")) if (lavdata@ngroups == 1L) { res <- out[[1L]] } else { res <- out } # assemble multiple groups into a single data.frame? if (lavdata@ngroups > 1L && assemble) { if (!is.null(newdata)) { lavdata <- newData } DATA <- matrix(as.numeric(NA), nrow = sum(unlist(lavdata@norig)), ncol = ncol(out[[1L]]) ) # assume == per g colnames(DATA) <- colnames(out[[1L]]) for (g in seq_len(lavdata@ngroups)) { DATA[lavdata@case.idx[[g]], ] <- out[[g]] } DATA <- as.data.frame(DATA, stringsAsFactors = FALSE) if (!is.null(newdata)) { DATA[, lavdata@group] <- newdata[, lavdata@group] } else { # add group DATA[, lavdata@group] <- rep(as.character(NA), nrow(DATA)) if (lavdata@missing == "listwise") { # we will loose the group label of omitted variables! DATA[unlist(lavdata@case.idx), lavdata@group] <- rep(lavdata@group.label, unlist(lavdata@nobs)) } else { DATA[unlist(lavdata@case.idx), lavdata@group] <- rep(lavdata@group.label, unlist(lavdata@norig)) } } res <- DATA } res } # method = "conditional.mean" lav_predict_y_conditional_mean <- function( lavobject = NULL, # for convenience # object ingredients lavmodel = NULL, lavdata = NULL, lavimplied = NULL, # new data data.obs = NULL, # y and x y.idx = NULL, x.idx = NULL, # options force.zero.mean = FALSE, lambda = lambda, level = 1L) { # not used for now # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data # lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), # !is.null(lavsamplestats), !is.null(lavimplied) ) } # data.obs? if (is.null(data.obs)) { data.obs <- lavdata@X } # checks if (lavmodel@categorical) { lav_msg_stop(gettext("no support for categorical data (yet).")) } if (lavdata@nlevels > 1L) { lav_msg_stop(gettext("no support for multilevel data (yet).")) } # conditional.x? if (lavmodel@conditional.x) { SigmaHat <- computeSigmaHatJoint(lavmodel) if (lavmodel@meanstructure) { MuHat <- computeMuHatJoint(lavmodel) } } else { SigmaHat <- lavimplied$cov MuHat <- lavimplied$mean } # output container YPRED <- vector("list", length = lavdata@ngroups) # run over all groups for (g in 1:lavdata@ngroups) { # multiple levels? if (lavdata@nlevels > 1L) { # TODO! lav_msg_stop(gettext("no support for multilevel data (yet)!")) } else { data.obs.g <- data.obs[[g]] # model-implied variance-covariance matrix for this group cov.g <- SigmaHat[[g]] # model-implied mean vector for this group if (force.zero.mean) { mean.g <- rep(0, ncol(data.obs.g)) } else { mean.g <- as.numeric(MuHat[[g]]) } # indices (in ov.names) y.idx.g <- y.idx[[g]] x.idx.g <- x.idx[[g]] # partition y/x Sxx <- cov.g[x.idx.g, x.idx.g, drop = FALSE] Sxy <- cov.g[x.idx.g, y.idx.g, drop = FALSE] # x-data only Xtest <- data.obs.g[, x.idx.g, drop = FALSE] # mx/my mx <- mean.g[x.idx.g] my <- mean.g[y.idx.g] # center using mx Xtest <- t(t(Xtest) - mx) # Apply regularization Sxx <- Sxx + lambda * diag(nrow(Sxx)) # prediction rule tmp <- Xtest %*% solve(Sxx, Sxy) YPRED[[g]] <- t(t(tmp) + my) } # single level } # g YPRED } # Takes a sequence of lambdas and performs k-fold cross-validation to determine # the best lambda lavPredictY_cv <- function( object, data = NULL, xnames = lavNames(object, "ov.x"), ynames = lavNames(object, "ov.y"), n.folds = 10L, lambda.seq = seq(0, 1, 0.1)) { # object should be (or inherit from) a lavaan object stopifnot(inherits(object, "lavaan")) # results container results <- matrix(as.numeric(NA), nrow = length(lambda.seq) * n.folds, ncol = 2L ) colnames(results) <- c("mse", "lambda") # shuffle folds folds <- sample(rep(1:n.folds, length.out = nrow(data))) # extract Y-data Y <- as.matrix(data[, ynames, drop = FALSE]) j <- 0L for (i in 1:n.folds) { indis <- which(folds == i) fold.fit <- try(update(object, data = data[-indis, , drop = FALSE], warn = FALSE ), silent = TRUE) if (inherits(fold.fit, "try-error")) { lav_msg_warn(gettext("failed fit in fold %s", i)) next } for (l in lambda.seq) { j <- j + 1L yhat <- lavPredictY( fold.fit, newdata = data[indis, , drop = FALSE], xnames = xnames, ynames = ynames, lambda = l ) y.error <- Y[indis, , drop = FALSE] - yhat mse <- mean(y.error * y.error) results[j, ] <- c(mse, l) } } # Group by lambda and determine average MSE per group avg <- aggregate(results[, "mse"], by = list(results[, "lambda"]), FUN = mean, na.rm = TRUE ) avg <- avg[order(avg[, 2]), ] names(avg) <- c("lambda", "mse") lambda.min <- avg[1L, "lambda"] list(results = avg, lambda.min = lambda.min) } lavaan/R/lav_mplus.R0000644000176200001440000000242414627656441014063 0ustar liggesusers# read in information from Mplus difftest output, return as list # # line 1: test statistic (unscaled) # line 2: number of groups # line 3: number of sample statistics (ndat) # line 4: number of free parameters (npar) # delta (ndat x npar) # P1 (E.inv) lav_matrix_vechr(npar x npar) # V1 (NVarCov) lav_matrix_vechr(npar x npar) lavutils_mplus_readdifftest <- function(file = "deriv.dat") { ### FIXME: does not work for multiple groups yet!!! raw <- scan(file, quiet = TRUE) T1 <- raw[1] # function value (usually T1 * 2 * nobs to get X2) ngroups <- as.integer(raw[2]) ndat <- as.integer(raw[3]) npar <- as.integer(raw[4]) pstar <- npar * (npar + 1) / 2 # delta offset <- 4L delta_raw <- raw[offset + seq_len(npar * ndat)] Delta <- matrix(delta_raw, nrow = ndat, ncol = npar, byrow = TRUE) # P1 offset <- 4L + npar * ndat p1_raw <- raw[offset + seq_len(pstar)] P1 <- lav_matrix_lower2full(p1_raw) # (robust) NACOV npar offset <- 4L + npar * ndat + pstar nacov_raw <- raw[offset + seq_len(pstar)] V1 <- lav_matrix_lower2full(nacov_raw) # just for fun, M1 # M1 <- (P1 - P1 %*% H %*% solve(t(H) %*% P1 %*% H) %*% t(H) %*% P1) %*% V1 list( T1 = T1, ngroups = ngroups, ndat = ndat, npar = npar, pstar = pstar, Delta = Delta, P1 = P1, V1 = V1 ) } lavaan/R/lav_modification.R0000644000176200001440000002335014627656441015371 0ustar liggesusers# univariate modification indices # modindices <- function(object, standardized = TRUE, cov.std = TRUE, information = "expected", # power statistics? power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, # customize output sort. = FALSE, minimum.value = 0.0, maximum.number = nrow(LIST), free.remove = TRUE, na.remove = TRUE, op = NULL) { # check if model has converged if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_warn(gettext("model did not converge")) } # not ready for estimator = "PML" if (object@Options$estimator == "PML") { lav_msg_stop(gettext( "modification indices for estimator PML are not implemented yet.")) } # new in 0.6-17: check if the model contains equality constraints if (object@Model@eq.constraints) { lav_msg_warn( gettext("the modindices() function ignores equality constraints; use lavTestScore() to assess the impact of releasing one or multiple constraints.") ) } # sanity check if (power) { standardized <- TRUE } # extended list (fixed-to-zero parameters) strict.exo <- FALSE if (object@Model@conditional.x) { strict.exo <- TRUE } FULL <- lav_partable_full( partable = lav_partable_set_cache(object@ParTable, object@pta), free = TRUE, start = TRUE, strict.exo = strict.exo ) FULL$free <- rep(1L, nrow(FULL)) FULL$user <- rep(10L, nrow(FULL)) FIT <- lav_object_extended(object, add = FULL, all.free = TRUE) LIST <- FIT@ParTable # compute information matrix 'extended model' # ALWAYS use *expected* information (for now) Information <- lavTech(FIT, paste("information", information, sep = ".")) # compute gradient 'extended model' score <- lavTech(FIT, "gradient.logl") # Saris, Satorra & Sorbom 1987 # partition Q into Q_11, Q_22 and Q_12/Q_21 # which elements of Q correspond with 'free' and 'nonfree' parameters? model.idx <- LIST$free[LIST$free > 0L & LIST$user != 10L] extra.idx <- LIST$free[LIST$free > 0L & LIST$user == 10L] # catch empty extra.idx (no modification indices!) if (length(extra.idx) == 0L) { # 2 possibilities: either model is saturated, or we have constraints if (object@test[[1]]$df == 0) { lav_msg_warn(gettext( "list with extra parameters is empty; model is saturated")) } else { lav_msg_warn(gettext( "list with extra parameters is empty; to release equality constraints, use lavTestScore()")) } LIST <- data.frame( lhs = character(0), op = character(0), rhs = character(0), group = integer(0), mi = numeric(0), epc = numeric(0), sepc.lv = numeric(0), sepc.all = numeric(0), sepc.nox = numeric(0) ) return(LIST) } # partition I11 <- Information[extra.idx, extra.idx, drop = FALSE] I12 <- Information[extra.idx, model.idx, drop = FALSE] I21 <- Information[model.idx, extra.idx, drop = FALSE] I22 <- Information[model.idx, model.idx, drop = FALSE] # ALWAYS use *expected* information (for now) I22.inv <- try( lavTech(object, paste("inverted.information", information, sep = "." )), silent = TRUE ) # just in case... if (inherits(I22.inv, "try-error")) { lav_msg_stop(gettext( "could not compute modification indices; information matrix is singular")) } V <- I11 - I12 %*% I22.inv %*% I21 V.diag <- diag(V) # dirty hack: catch very small or negative values in diag(V) # this is needed eg when parameters are not identified if freed-up; idx <- which(V.diag < .Machine$double.eps^(1 / 3)) # was 1/2 <0.6-14 if (length(idx) > 0L) { V.diag[idx] <- as.numeric(NA) } # create and fill in mi if (object@Data@nlevels == 1L) { N <- object@SampleStats@ntotal if (object@Model@estimator %in% ("ML")) { score <- -1 * score # due to gradient.logl } } else { # total number of clusters (over groups) N <- 0 for (g in 1:object@SampleStats@ngroups) { N <- N + object@Data@Lp[[g]]$nclusters[[2]] } # score <- score * (2 * object@SampleStats@ntotal) / N score <- score / 2 # -2 * LRT } mi <- numeric(length(score)) mi[extra.idx] <- N * (score[extra.idx] * score[extra.idx]) / V.diag if (length(model.idx) > 0L) { mi[model.idx] <- N * (score[model.idx] * score[model.idx]) / diag(I22) } LIST$mi <- rep(as.numeric(NA), length(LIST$lhs)) LIST$mi[LIST$free > 0] <- mi # handle equality constraints (if any) # eq.idx <- which(LIST$op == "==") # if(length(eq.idx) > 0L) { # OUT <- lavTestScore(object, warn = FALSE) # LIST$mi[ eq.idx ] <- OUT$uni$X2 # } # scaled? # if(length(object@test) > 1L) { # LIST$mi.scaled <- LIST$mi / object@test[[2]]$scaling.factor # } # EPC d <- (-1 * N) * score # needed? probably not; just in case d[which(abs(d) < 1e-15)] <- 1.0 LIST$epc[LIST$free > 0] <- mi / d # standardize? if (standardized) { EPC <- LIST$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & LIST$exo == 0L) EPC[var.idx] <- LIST$est[var.idx] } # two problems: # - EPC of variances can be negative, and that is # perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(LIST$op == "~~" & LIST$lhs == LIST$rhs & abs(EPC) < sqrt(.Machine$double.eps)) if (length(small.idx) > 0L) { EPC[small.idx] <- as.numeric(NA) } # get the sign EPC.sign <- sign(LIST$epc) LIST$sepc.lv <- EPC.sign * lav_standardize_lv(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.lv[small.idx] <- 0 } LIST$sepc.all <- EPC.sign * lav_standardize_all(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.all[small.idx] <- 0 } LIST$sepc.nox <- EPC.sign * lav_standardize_all_nox(object, partable = LIST, est = abs(EPC), cov.std = cov.std ) if (length(small.idx) > 0L) { LIST$sepc.nox[small.idx] <- 0 } } # power? if (power) { LIST$delta <- delta # FIXME: this is using epc in unstandardized metric # this would be much more useful in standardized metric # we need a lav_standardize_all.reverse function... LIST$ncp <- (LIST$mi / (LIST$epc * LIST$epc)) * (delta * delta) LIST$power <- 1 - pchisq(qchisq((1.0 - alpha), df = 1), df = 1, ncp = LIST$ncp ) LIST$decision <- character(length(LIST$power)) # five possibilities (Table 6 in Saris, Satorra, van der Veld, 2009) mi.significant <- ifelse(1 - pchisq(LIST$mi, df = 1) < alpha, TRUE, FALSE ) high.power <- LIST$power > high.power # FIXME: sepc.all or epc?? # epc.high <- abs(LIST$sepc.all) > LIST$delta epc.high <- abs(LIST$epc) > LIST$delta LIST$decision[which(!mi.significant & !high.power)] <- "(i)" LIST$decision[which(mi.significant & !high.power)] <- "**(m)**" LIST$decision[which(!mi.significant & high.power)] <- "(nm)" LIST$decision[which(mi.significant & high.power & !epc.high)] <- "epc:nm" LIST$decision[which(mi.significant & high.power & epc.high)] <- "*epc:m*" # LIST$decision[ which(mi.significant & high.power) ] <- "epc" # LIST$decision[ which(mi.significant & !high.power) ] <- "***" # LIST$decision[ which(!mi.significant & !high.power) ] <- "(i)" } # remove rows corresponding to 'fixed.x' exogenous parameters # exo.idx <- which(LIST$exo == 1L & nchar(LIST$plabel) > 0L) # if(length(exo.idx) > 0L) { # LIST <- LIST[-exo.idx,] # } # remove some columns LIST$id <- LIST$ustart <- LIST$exo <- LIST$label <- LIST$plabel <- NULL LIST$start <- LIST$free <- LIST$est <- LIST$se <- LIST$prior <- NULL LIST$upper <- LIST$lower <- NULL if (power) { LIST$sepc.lv <- LIST$sepc.nox <- NULL } # create data.frame LIST <- as.data.frame(LIST, stringsAsFactors = FALSE) class(LIST) <- c("lavaan.data.frame", "data.frame") # remove rows corresponding to 'old' free parameters if (free.remove) { old.idx <- which(LIST$user != 10L) if (length(old.idx) > 0L) { LIST <- LIST[-old.idx, ] } } # remove rows corresponding to 'equality' constraints eq.idx <- which(LIST$op == "==") if (length(eq.idx) > 0L) { LIST <- LIST[-eq.idx, ] } # remove even more columns LIST$user <- NULL # remove block/group/level is only single block if (lav_partable_nblocks(LIST) == 1L) { LIST$block <- NULL LIST$group <- NULL LIST$level <- NULL } # sort? if (sort.) { LIST <- LIST[order(LIST$mi, decreasing = TRUE), ] } if (minimum.value > 0.0) { LIST <- LIST[!is.na(LIST$mi) & LIST$mi > minimum.value, ] } if (maximum.number < nrow(LIST)) { LIST <- LIST[seq_len(maximum.number), ] } if (na.remove) { idx <- which(is.na(LIST$mi)) if (length(idx) > 0) { LIST <- LIST[-idx, ] } } if (!is.null(op)) { idx <- LIST$op %in% op if (length(idx) > 0) { LIST <- LIST[idx, ] } } # add header # TODO: small explanation of the columns in the header? # attr(LIST, "header") <- # c("modification indices for newly added parameters only; to\n", # "see the effects of releasing equality constraints, use the\n", # "lavTestScore() function") LIST } # aliases modificationIndices <- modificationindices <- modindices lavaan/R/lav_lavaan_step00_init.R0000644000176200001440000001430114627656441016400 0ustar liggesuserslav_lavaan_step00_parameters <- function(matchcall = NULL, syscall = NULL, dotdotdot = NULL) { # 1. to resolve a problem where parameter 'cl' is matched to 'cluster' # and shouldn't # 2. to apply defaut options for cfa/sem/growth functions # 3. if dotdotdot$control present, copy to dotdotdot$... for # optim.method, # optim.force.converged, # gradient -> optim.gradient !!! overwritten by dotdotdot$gradient # if present # init_nelder_mead -> optim.init_nelder_mead mc <- matchcall sc <- syscall ddd <- dotdotdot # catch partial matching of 'cl' (expanded to cluster) if (!is.null(sc[["cl"]]) && is.null(sc[["cluster"]]) && !is.null(mc[["cluster"]])) { mc[["cl"]] <- mc[["cluster"]] mc[["cluster"]] <- NULL ddd$cl <- sc[["cl"]] } if (!is.null(mc$cluster)) mc$cluster <- eval(mc$cluster, parent.frame(2)) # default options if (any(ddd$model.type == c("sem", "cfa", "growth"))) { # default options for sem/cfa or growth defaults <- list( int.ov.free = ddd$model.type != "growth", int.lv.free = ddd$model.type == "growth", auto.fix.first = TRUE, # (re)set in lav_options_set auto.fix.single = TRUE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, auto.th = TRUE, auto.delta = TRUE, auto.efa = TRUE ) for (dflt.i in seq_along(defaults)) { argname <- names(defaults)[dflt.i] if (is.null(mc[[argname]])) { mc[[argname]] <- defaults[[dflt.i]] ddd[[argname]] <- defaults[[dflt.i]] } } } # backwards compatibility, control= argument (<0.5-23) if (!is.null(ddd$control)) { # optim.method if (!is.null(ddd$control$optim.method)) { ddd$optim.method <- ddd$control$optim.method } # cor.optim.method if (!is.null(ddd$control$cor.optim.method)) { # ignore it silently } # control$optim.force.converged if (!is.null(ddd$control$optim.force.converged)) { ddd$optim.force.converged <- ddd$control$optim.force.converged } # gradient if (!is.null(ddd$control$gradient)) { ddd$optim.gradient <- ddd$control$gradient } if (!is.null(ddd$gradient)) { ddd$optim.gradient <- ddd$gradient } # init_nelder_mead if (!is.null(ddd$control$init_nelder_mead)) { ddd$optim.init_nelder_mead <- ddd$control$init_nelder_mead } } list(mc = mc, dotdotdot = ddd) } lav_lavaan_step00_checkdata <- function(data = NULL, dotdotdot = NULL, sample.cov = NULL, sample.nobs = NULL, sample.mean = NULL, sample.th = NULL, NACOV = NULL, # nolint WLS.V = NULL, # nolint ov.order = NULL) { # if data not NULL: # if it is an 'enriched' data.frame (e.g. a tibble), simplify to an # ordinary data.frame # if class is 'lavMoments': # check if it contains sample.cov and sample.nobs (***error*** if not) # if sample.mean, sample.th, NACOV and/or WLS.V present, # copy to corresponding arguments of the function # if lavOptions present in data, copy those that are not provided # in the function call to dotdotdot$... # set data to NULL # if it is a function --> ***error*** # TODO: other tests are present in lavData, should we copy them here ??? # if NACOV or WLS.V not NULL, set ov.order to "data" if (!is.null(data)) { if (inherits(data, "data.frame")) { # just in case it is not a traditional data.frame data <- as.data.frame(data) } else if (inherits(data, "lavMoments")) { # This object must contain summary statistics # e.g., created by lavaan.mi::poolSat # set required-data arguments if ("sample.cov" %in% names(data)) { sample.cov <- data$sample.cov } else { lav_msg_stop(gettext( "When data= is of class lavMoments, it must contain sample.cov")) } if ("sample.nobs" %in% names(data)) { sample.nobs <- data$sample.nobs } else { lav_msg_stop(gettext( "When data= is of class lavMoments, it must contain sample.nobs")) } # check for optional-data arguments if ("sample.mean" %in% names(data)) sample.mean <- data$sample.mean if ("sample.th" %in% names(data)) sample.th <- data$sample.th if ("NACOV" %in% names(data)) NACOV <- data$NACOV # nolint if ("WLS.V" %in% names(data)) WLS.V <- data$WLS.V # nolint # set other args not included in dotdotdot if (length(data$lavOptions)) { newdots <- setdiff(names(data$lavOptions), names(dotdotdot)) if (length(newdots)) { for (dd in newdots) dotdotdot[[dd]] <- data$lavOptions[[dd]] } } # FIXME: Should WLS.V be an I(dentity) matrix when ULS is requested? # Unused for point estimates, but still used to scale/shift test # if (!is.null(dotdotdot$estimator)) { # if (grepl(pattern = "ULS", x = toupper(dotdotdot$estimator[1L])) && # !is.null(WLS.V)) { # # set to diagonal # if (is.list(WLS.V)) { # WLS.V <- lapply(WLS.V, function(w) {diag(w) <- 1 ; return(w) }) # } else diag(WLS.V) <- 1 # } # } # get rid of data= argument data <- NULL } if (is.function(data)) { lav_msg_stop(gettext("data is a function; it should be a data.frame")) } } # new in 0.6-14: if NACOV and/or WLS.V are provided, we force # ov.order="data" for now # until we have reliable code to re-arrange/select col/rows for # of NACOV/WLS.V based on the model-based ov.names if (!is.null(NACOV) || !is.null(WLS.V)) { ov.order <- "data" } list( data = data, dotdotdot = dotdotdot, sample.cov = sample.cov, sample.nobs = sample.nobs, sample.mean = sample.mean, sample.th = sample.th, NACOV = NACOV, WLS.V = WLS.V, ov.order = ov.order ) } lavaan/R/lav_graphics.R0000644000176200001440000000034114627656441014517 0ustar liggesusers# small functions to do something useful with the common # plot commands # suggested by JEB pairs.lavaan <- function(x, group = 1L, ...) { X <- x@Data@X[[group]] colnames(X) <- x@Data@ov.names[[group]] pairs(X, ...) } lavaan/R/lav_fsr.R0000644000176200001440000001027114627656441013514 0ustar liggesusers# compute the jacobian: dtheta_2/dtheta_1: # # theta_2: - in the rows # - the croon corrections, expressed as # 1) scaled offsets (scoffset), and # 2) scaling factors # theta_1: - in the columns # - the free parameters of the measurement model # lav_fsr_delta21 <- function(object, FSM = NULL) { lavmodel <- object@Model nmat <- lavmodel@nmat NCOL <- lavmodel@nx.free m.el.idx <- x.el.idx <- vector("list", length = length(lavmodel@GLIST)) for (mm in seq_len(length(lavmodel@GLIST))) { m.el.idx[[mm]] <- lavmodel@m.free.idx[[mm]] x.el.idx[[mm]] <- lavmodel@x.free.idx[[mm]] # handle symmetric matrices if (lavmodel@isSymmetric[mm]) { # since we use 'x.free.idx', only symmetric elements # are duplicated (not the equal ones, only in x.free.free) dix <- duplicated(x.el.idx[[mm]]) if (any(dix)) { m.el.idx[[mm]] <- m.el.idx[[mm]][!dix] x.el.idx[[mm]] <- x.el.idx[[mm]][!dix] } } } # Delta per group (or block?) Delta <- vector("list", length = lavmodel@ngroups) for (g in 1:lavmodel@ngroups) { fsm <- FSM[[g]] # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- lavmodel@GLIST[mm.in.group] nrow.scoffset <- ncol(MLIST$lambda) nrow.scale <- ncol(MLIST$lambda) NROW <- nrow.scoffset + nrow.scale Delta.group <- matrix(0, nrow = NROW, ncol = NCOL) # prepare some computations AL.inv <- solve(fsm %*% MLIST$lambda) ATA <- fsm %*% MLIST$theta %*% t(fsm) for (mm in mm.in.group) { mname <- names(lavmodel@GLIST)[mm] # skip empty ones if (!length(m.el.idx[[mm]])) next if (mname == "lambda") { dL <- (-1 * (ATA %*% AL.inv + AL.inv %*% ATA) %*% (AL.inv %x% AL.inv) %*% fsm) delta.scoffset <- dL delta.scale <- fsm ## only ok for 1 row!!! delta <- rbind(delta.scoffset, delta.scale) Delta.group[, x.el.idx[[mm]]] <- delta[, m.el.idx[[mm]]] } else if (mname == "theta") { dT <- lav_matrix_vec((t(AL.inv) %*% fsm) %x% (t(fsm) %*% AL.inv)) delta.scoffset <- dT delta.scale <- matrix(0, nrow = nrow.scale, ncol = length(MLIST$theta) ) delta <- rbind(delta.scoffset, delta.scale) Delta.group[, x.el.idx[[mm]]] <- delta[, m.el.idx[[mm]]] } else if (mname %in% c("psi", "nu", "alpha")) { # zero next } else { lav_msg_stop(gettextf( "model matrix %s is not lambda/theta/psi", mname)) } } # mm Delta[[g]] <- Delta.group } # g Delta } lav_fsr_pa2si <- function(PT = NULL, LVINFO) { PT.orig <- PT # remove se column (if any) if (!is.null(PT$se)) { PT$se <- NULL } # ngroups ngroups <- lav_partable_ngroups(PT) lhs <- rhs <- op <- character(0) group <- block <- level <- free <- exo <- integer(0) ustart <- est <- start <- numeric(0) for (g in seq_len(ngroups)) { nMM <- length(LVINFO[[g]]) for (mm in seq_len(nMM)) { lvinfo <- LVINFO[[g]][[mm]] lv.names <- lvinfo$lv.names nfac <- length(lv.names) if (nfac > 1L) { lav_msg_stop(gettext("more than 1 factor in measurement block")) } LV <- lv.names ind <- paste(LV, ".si", sep = "") scoffset <- lvinfo$scoffset[1, 1] scale <- lvinfo$scale[1, 1] lhs <- c(lhs, LV, ind, ind, ind) op <- c(op, "=~", "~~", "~*~", "~1") rhs <- c(rhs, ind, ind, ind, "") block <- c(block, rep(g, 4L)) free <- c(free, 0L, 1L, 1L, 0L) ustart <- c(ustart, 1, scoffset, scale, 0) exo <- c(exo, rep(0L, 4L)) group <- c(group, rep(g, 4L)) start <- c(start, 1, scoffset, scale, 0) est <- c(est, 1, scoffset, scale, 0) } } # ree counter idx.free <- which(free > 0) free[idx.free] <- max(PT$free) + 1:length(idx.free) LIST <- list( id = max(PT$id) + 1:length(lhs), lhs = lhs, op = op, rhs = rhs, user = rep(10L, length(lhs)), block = block, group = group, level = rep(1L, length(lhs)), free = free, ustart = ustart, exo = exo, start = start, est = est ) PT.si <- lav_partable_merge(PT, LIST) PT.si } lavaan/R/lav_test_yuan_bentler.R0000644000176200001440000003242314627656441016453 0ustar liggesusers# - 0.6-13: fix multiple-group UG^2 bug (reported by Gronneberg, Foldnes and # Moss) when Satterthwaite = TRUE, ngroups > 1, and eq constraints. # # Note however that Satterthwaite = FALSE always (for now), so # the fix has no (visible) effect lav_test_yuan_bentler <- function(lavobject = NULL, lavsamplestats = NULL, lavmodel = NULL, lavimplied = NULL, lavh1 = NULL, lavoptions = NULL, lavdata = NULL, TEST.unscaled = NULL, E.inv = NULL, B0.group = NULL, test = "yuan.bentler", mimic = "lavaan", # method = "default", ug2.old.approach = FALSE, return.ugamma = FALSE) { TEST <- list() if (!is.null(lavobject)) { lavsamplestats <- lavobject@SampleStats lavmodel <- lavobject@Model lavoptions <- lavobject@Options lavpartable <- lavobject@ParTable lavimplied <- lavobject@implied lavh1 <- lavobject@h1 lavdata <- lavobject@Data TEST$standard <- lavobject@test[[1]] } else { TEST$standard <- TEST.unscaled } # ug2.old.approach if (missing(ug2.old.approach)) { if (!is.null(lavoptions$ug2.old.approach)) { ug2.old.approach <- lavoptions$ug2.old.approach } else { ug2.old.approach <- FALSE } } # E.inv ok? if (length(lavoptions$information) == 1L && length(lavoptions$h1.information) == 1L && length(lavoptions$observed.information) == 1L) { E.inv.recompute <- FALSE } else if ((lavoptions$information[1] == lavoptions$information[2]) && (lavoptions$h1.information[1] == lavoptions$h1.information[2]) && (lavoptions$information[2] == "expected" || lavoptions$observed.information[1] == lavoptions$observed.information[2])) { E.inv.recompute <- FALSE } else { E.inv.recompute <- TRUE # change information options lavoptions$information[1] <- lavoptions$information[2] lavoptions$h1.information[1] <- lavoptions$h1.information[2] lavoptions$observed.information[1] <- lavoptions$observed.information[2] } if (!is.null(E.inv)) { E.inv.recompute <- FALSE # user-provided } # check test if (!all(test %in% c( "yuan.bentler", "yuan.bentler.mplus" ))) { lav_msg_warn(gettext("test must be one of `yuan.bentler', or `yuan.bentler.mplus'; will use `yuan.bentler' only")) test <- "yuan.bentler" } # information information <- lavoptions$information[1] # ndat ndat <- numeric(lavsamplestats@ngroups) # do we have E.inv? if (is.null(E.inv) || E.inv.recompute) { E.inv <- try( lav_model_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavoptions = lavoptions, extra = FALSE, augmented = TRUE, inverted = TRUE ), silent = TRUE ) if (inherits(E.inv, "try-error")) { if (return.ugamma) { lav_msg_warn(gettext( "could not invert information matrix needed for UGamma")) return(NULL) } else { TEST$standard$stat <- as.numeric(NA) TEST$standard$stat.group <- rep(as.numeric(NA), lavdata@ngroups) TEST$standard$pvalue <- as.numeric(NA) TEST[[test[1]]] <- c(TEST$standard, scaling.factor = as.numeric(NA), shift.parameter = as.numeric(NA), label = character(0) ) lav_msg_warn(gettext("could not invert information [matrix needed for robust test statistic")) TEST[[test[1]]]$test <- test[1] # to prevent lavTestLRT error when robust test is detected for some but not all models return(TEST) } } } # catch df == 0 if (TEST$standard$df == 0L || TEST$standard$df < 0) { TEST[[test[1]]] <- c(TEST$standard, scaling.factor = as.numeric(NA), label = character(0) ) TEST[[test[1]]]$test <- test[1] # to prevent lavTestLRT error when robust test is detected for some but not all models return(TEST) } # mean and variance adjusted? Satterthwaite <- FALSE # for now # if(any(test %in% c("mean.var.adjusted", "scaled.shifted"))) { # Satterthwaite <- TRUE # } # FIXME: should we not always use 'unstructured' here? # if the model is, say, the independence model, the # 'structured' information (A1) will be so far away from B1 # that we will end up with 'NA' h1.options <- lavoptions if (test == "yuan.bentler.mplus") { # always 'unstructured' H1 information h1.options$h1.information <- "unstructured" } # A1 is usually expected or observed A1.group <- lav_model_h1_information( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = h1.options ) # B1 is always first.order B1.group <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = h1.options ) if (test == "yuan.bentler.mplus") { if (is.null(B0.group)) { B0 <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavh1 = lavh1, lavoptions = lavoptions, extra = TRUE, check.pd = FALSE, augmented = FALSE, inverted = FALSE ) B0.group <- attr(B0, "B0.group") } trace.UGamma <- lav_test_yuan_bentler_mplus_trace( lavsamplestats = lavsamplestats, A1.group = A1.group, B1.group = B1.group, B0.group = B0.group, E.inv = E.inv, meanstructure = lavmodel@meanstructure ) } else if (test == "yuan.bentler") { # compute Delta Delta <- computeDelta(lavmodel = lavmodel) # compute Omega/Gamma Omega <- lav_model_h1_omega( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions ) # compute trace 'U %*% Gamma' (or 'U %*% Omega') trace.UGamma <- lav_test_yuan_bentler_trace( lavsamplestats = lavsamplestats, meanstructure = lavmodel@meanstructure, A1.group = A1.group, B1.group = B1.group, Delta = Delta, Omega = Omega, E.inv = E.inv, ug2.old.approach = ug2.old.approach, Satterthwaite = FALSE ) # for now } # unscaled test df <- TEST$standard$df scaling.factor <- trace.UGamma / df if (scaling.factor < 0) scaling.factor <- as.numeric(NA) chisq.scaled <- TEST$standard$stat / scaling.factor pvalue.scaled <- 1 - pchisq(chisq.scaled, df) ndat <- sum(attr(trace.UGamma, "h1.ndat")) npar <- lavmodel@nx.free scaling.factor.h1 <- sum(attr(trace.UGamma, "h1")) / ndat scaling.factor.h0 <- sum(attr(trace.UGamma, "h0")) / npar trace.UGamma2 <- attr(trace.UGamma, "trace.UGamma2") attributes(trace.UGamma) <- NULL if ("yuan.bentler" %in% test) { TEST$yuan.bentler <- list( test = test, stat = chisq.scaled, stat.group = (TEST$standard$stat.group / scaling.factor), df = df, pvalue = pvalue.scaled, scaling.factor = scaling.factor, scaling.factor.h1 = scaling.factor.h1, scaling.factor.h0 = scaling.factor.h0, label = "Yuan-Bentler correction", trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, scaled.test.stat = TEST$standard$stat, scaled.test = TEST$standard$test ) } else if ("yuan.bentler.mplus" %in% test) { TEST$yuan.bentler.mplus <- list( test = test, stat = chisq.scaled, stat.group = (TEST$standard$stat.group / scaling.factor), df = df, pvalue = pvalue.scaled, scaling.factor = scaling.factor, scaling.factor.h1 = scaling.factor.h1, scaling.factor.h0 = scaling.factor.h0, label = "Yuan-Bentler correction (Mplus variant)", trace.UGamma = trace.UGamma, trace.UGamma2 = as.numeric(NA), scaled.test.stat = TEST$standard$stat, scaled.test = TEST$standard$test ) } TEST } lav_test_yuan_bentler_trace <- function(lavsamplestats = lavsamplestats, meanstructure = TRUE, A1.group = NULL, B1.group = NULL, Delta = NULL, Omega = NULL, E.inv = NULL, ug2.old.approach = FALSE, Satterthwaite = FALSE) { # we always assume a meanstructure (nope, not any longer, since 0.6) # meanstructure <- TRUE ngroups <- lavsamplestats@ngroups trace.h1 <- attr(Omega, "trace.h1") h1.ndat <- attr(Omega, "h1.ndat") if (ug2.old.approach || !Satterthwaite) { trace.UGamma <- numeric(ngroups) trace.UGamma2 <- numeric(ngroups) trace.h0 <- numeric(ngroups) for (g in 1:ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal A1 <- A1.group[[g]] * fg B1 <- B1.group[[g]] * fg DELTA <- Delta[[g]] Gamma.g <- Omega[[g]] / fg D.Einv.tD <- DELTA %*% tcrossprod(E.inv, DELTA) # trace.h1[g] <- sum( B1 * t( A1.inv ) ) # fg cancels out: trace.h1[g] <- sum( fg*B1 * t( 1/fg*A1.inv ) ) trace.h0[g] <- sum(B1 * D.Einv.tD) # trace.UGamma[g] <- trace.h1[g] - trace.h0[g] U <- A1 - A1 %*% D.Einv.tD %*% A1 trace.UGamma[g] <- sum(U * Gamma.g) if (Satterthwaite) { UG <- U %*% Gamma.g trace.UGamma2[g] <- sum(UG * t(UG)) } } # g trace.UGamma <- sum(trace.UGamma) attr(trace.UGamma, "h1") <- trace.h1 attr(trace.UGamma, "h0") <- trace.h0 attr(trace.UGamma, "h1.ndat") <- h1.ndat if (Satterthwaite) { attr(trace.UGamma, "trace.UGamma2") <- sum(trace.UGamma2) } } else { trace.UGamma <- trace.UGamma2 <- UG <- as.numeric(NA) fg <- unlist(lavsamplestats@nobs) / lavsamplestats@ntotal # if(Satterthwaite) { A1.f <- A1.group for (g in 1:ngroups) { A1.f[[g]] <- A1.group[[g]] * fg[g] } A1.all <- lav_matrix_bdiag(A1.f) B1.f <- B1.group for (g in 1:ngroups) { B1.f[[g]] <- B1.group[[g]] * fg[g] } B1.all <- lav_matrix_bdiag(B1.f) Gamma.f <- Omega for (g in 1:ngroups) { Gamma.f[[g]] <- 1 / fg[g] * Omega[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) Delta.all <- do.call("rbind", Delta) D.Einv.tD <- Delta.all %*% tcrossprod(E.inv, Delta.all) trace.h0 <- sum(B1.all * D.Einv.tD) U.all <- A1.all - A1.all %*% D.Einv.tD %*% A1.all trace.UGamma <- sum(U.all * Gamma.all) attr(trace.UGamma, "h1") <- sum(trace.h1) attr(trace.UGamma, "h0") <- trace.h0 attr(trace.UGamma, "h1.ndat") <- sum(h1.ndat) if (Satterthwaite) { UG <- U.all %*% Gamma.all trace.UGamma2 <- sum(UG * t(UG)) attr(trace.UGamma, "trace.UGamma2") <- trace.UGamma2 } # } else { } trace.UGamma } lav_test_yuan_bentler_mplus_trace <- function(lavsamplestats = NULL, A1.group = NULL, B1.group = NULL, B0.group = NULL, E.inv = NULL, meanstructure = TRUE) { # typical for Mplus: # - do NOT use the YB formula, but use an approximation # relying on A0 ~= Delta' A1 Delta and the same for B0 # # NOTE: if A0 is based on the hessian, then A0 only approximates # Delta' A1 Delta # # - always use h1.information = "unstructured"!!! ngroups <- lavsamplestats@ngroups trace.UGamma <- numeric(lavsamplestats@ngroups) trace.h1 <- numeric(lavsamplestats@ngroups) trace.h0 <- numeric(lavsamplestats@ngroups) h1.ndat <- numeric(lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { # group weight fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal A1 <- A1.group[[g]] B1 <- B1.group[[g]] # mask independent 'fixed-x' variables zero.idx <- which(diag(A1) == 0) if (length(zero.idx) > 0L) { A1.inv <- matrix(0, nrow(A1), ncol(A1)) a1 <- A1[-zero.idx, -zero.idx] a1.inv <- solve(a1) A1.inv[-zero.idx, -zero.idx] <- a1.inv } else { A1.inv <- solve(A1) } h1.ndat[g] <- ncol(A1) - length(zero.idx) # if data is complete, why not just A1 %*% Gamma? trace.h1[g] <- sum(B1 * t(A1.inv)) trace.h0[g] <- fg * sum(B0.group[[g]] * t(E.inv)) trace.UGamma[g] <- (trace.h1[g] - trace.h0[g]) } # we take the sum here trace.UGamma <- sum(trace.UGamma) attr(trace.UGamma, "h1") <- trace.h1 attr(trace.UGamma, "h0") <- trace.h0 attr(trace.UGamma, "h1.ndat") <- h1.ndat trace.UGamma } lavaan/R/lav_model_h1_omega.R0000644000176200001440000001053414627656440015563 0ustar liggesusers# compute 'Omega' == A1^{-1} B1 A1^{-1} # where A1 is the expected/observed information matrix of the unrestricted (h1) # model, and B1 is the first-order information matrix of the unrestricted (h1) # model # # but the exact result will depend on the options: # for 'A': # - omega.information ("expected" or "observed") # - omega.h1.information ("structured" or "unstructured") # for 'B': # - omega.information.meat ("first-order") # - omega.h1.information.meat ("structured" or "unstructured") # # special case: if data is complete, A is expected/unstructured, and B is # unstructured, we get (sample-based) 'Gamma' # # YR 28 Oct 2020 lav_model_h1_omega <- function(lavobject = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL) { if (!is.null(lavobject) && inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavdata <- lavobject@Data lavimplied <- lavobject@implied if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) } lavcache <- lavobject@Cache lavoptions <- lavobject@Options } # sanity check if (length(lavh1) == 0L) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } if (length(lavimplied) == 0L) { lavimplied <- lav_model_implied(lavmodel = lavmodel) } # set options for A A1.options <- lavoptions A1.options$information <- lavoptions$omega.information A1.options$h1.information <- lavoptions$omega.h1.information B1.options <- lavoptions B1.options$information <- lavoptions$omega.information.meat # unused B1.options$h1.information <- lavoptions$omega.h1.information.meat # information information <- lavoptions$omega.information # compute A1 (per group) if (information == "observed") { A1 <- lav_model_h1_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = A1.options ) } else if (information == "expected") { A1 <- lav_model_h1_information_expected( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = A1.options ) } else if (information == "first.order") { # not needed? A1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = A1.options ) } # compute B1 (per group) B1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = B1.options ) # return Omega per group Omega <- vector("list", length = lavdata@ngroups) trace.h1 <- numeric(lavdata@ngroups) h1.ndat <- numeric(lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { A1.g <- A1[[g]] B1.g <- B1[[g]] # mask independent 'fixed-x' variables zero.idx <- which(diag(A1.g) == 0) if (length(zero.idx) > 0L) { A1.inv <- matrix(0, nrow(A1.g), ncol(A1.g)) a1 <- A1.g[-zero.idx, -zero.idx, drop = FALSE] a1.inv <- solve(a1) A1.inv[-zero.idx, -zero.idx] <- a1.inv } else { A1.inv <- solve(A1.g) } trace.h1[g] <- sum(B1.g * t(A1.inv)) h1.ndat[g] <- ncol(A1.g) - length(zero.idx) Omega[[g]] <- A1.inv %*% B1.g %*% A1.inv } # store trace.h1 as an attribute (to be used in yuan-bentler) attr(Omega, "trace.h1") <- trace.h1 attr(Omega, "h1.ndat") <- h1.ndat attr(Omega, "A.information") <- paste(A1.options$information, A1.options$h1.information, sep = "." ) attr(Omega, "B.information") <- paste(B1.options$information, B1.options$h1.information, sep = "." ) Omega } lavaan/R/lav_efa_pace.R0000644000176200001440000001302014627656441014440 0ustar liggesusers# Albert (1944a/b) & Ihara & Kano 1986 method to estimate residual variances # of indicators using a PArtitioned Covariance matrix Estimator (PACE) # # The implementation is based on Cudeck 1991: # Cudeck, R. (1991). Noniterative factor analysis estimators, with algorithms # for subset and instrumental variable selection. Journal of Educational # Statistics, 16(1), 35-52. # YR -- 14 FEB 2020 # - 'fast' version; only (2*nfactors + 1) iterations are needed # - scale-invariant (by default) # - always assuming unit variances for the factors lav_efa_pace <- function(S, nfactors = 1L, p.idx = seq_len(ncol(S)), reflect = TRUE, order.lv.by = "none", use.R = TRUE, theta.only = TRUE) { S <- unname(S) nvar <- ncol(S) theta <- numeric(nvar) stopifnot(nfactors < nvar / 2) # because subset selection is not scale-invariant, we transform # S to R, compute theta based on R, and then rescale again if (use.R) { s.var <- diag(S) R <- stats::cov2cor(S) } else { R <- S } # find principal variables ('largest' sub-block) A <- R # row indices v.r <- integer(0L) # column indices v.c <- integer(0L) for (h in seq_len(nfactors)) { # mask mask.idx <- c(v.r, v.c) tmp <- abs(A) if (length(mask.idx) > 0L) { tmp[mask.idx, ] <- 0 tmp[, mask.idx] <- 0 } diag(tmp) <- 0 # find maximum off-diagonal element idx <- which(tmp == max(tmp), arr.ind = TRUE, useNames = FALSE)[1, ] k <- idx[1] l <- idx[2] v.r <- c(v.r, k) v.c <- c(v.c, l) # non-symmetric sweep operator a.kl <- A[k, l] if (abs(a.kl) < sqrt(.Machine$double.eps)) { out <- A out[k, ] <- 0 out[, l] <- 0 } else { out <- A - tcrossprod(A[, l], A[k, ]) / a.kl out[k, ] <- A[k, ] / a.kl out[, l] <- -A[, l] / a.kl out[k, l] <- 1 / a.kl } A <- out } # diagonal elements are estimates of theta # for all variables not in (v.r, v.c) all.idx <- seq_len(nvar) v.r.init <- v.r v.c.init <- v.c other.idx <- all.idx[-c(v.r, v.c)] theta[other.idx] <- diag(A)[other.idx] # now fill in theta for the 2*m remaining variables in c(v.r.init, v.c.init) for (i in p.idx) { if (i %in% other.idx) { next } # row indices v.r <- integer(0L) # column indices v.c <- integer(0L) A <- R for (h in seq_len(nfactors)) { # mask mask.idx <- c(i, v.r, v.c) tmp <- abs(A) tmp[mask.idx, ] <- 0 tmp[, mask.idx] <- 0 diag(tmp) <- 0 # find maximum off-diagonal element idx <- which(tmp == max(tmp), arr.ind = TRUE, useNames = FALSE)[1, ] k <- idx[1] l <- idx[2] v.r <- c(v.r, k) v.c <- c(v.c, l) # non-symmetric sweep operator a.kl <- A[k, l] if (abs(a.kl) < sqrt(.Machine$double.eps)) { out <- A out[k, ] <- 0 out[, l] <- 0 } else { out <- A - tcrossprod(A[, l], A[k, ]) / a.kl out[k, ] <- A[k, ] / a.kl out[, l] <- -A[, l] / a.kl out[k, l] <- 1 / a.kl } A <- out } # diagonal element is estimate of theta theta[i] <- A[i, i] } # return theta elements only if (theta.only) { # rescale back to S metric if (use.R) { theta <- theta * s.var } return(theta[p.idx]) } # compute LAMBDA using the 'eigenvalue' method EV <- eigen(R, symmetric = TRUE) S.sqrt <- EV$vectors %*% sqrt(diag(EV$values)) %*% t(EV$vectors) S.inv.sqrt <- EV$vectors %*% sqrt(diag(1 / EV$values)) %*% t(EV$vectors) RTR <- S.inv.sqrt %*% diag(theta) %*% S.inv.sqrt EV <- eigen(RTR, symmetric = TRUE) Omega.m <- EV$vectors[, 1L + nvar - seq_len(nfactors), drop = FALSE] gamma.m <- EV$values[1L + nvar - seq_len(nfactors)] Gamma.m <- diag(gamma.m, nrow = nfactors, ncol = nfactors) # Cuceck 1991 page 37 bottom of the page: LAMBDA.dot <- S.sqrt %*% Omega.m %*% sqrt(diag(nfactors) - Gamma.m) if (use.R) { # IF (and only if) the input is a correlation matrix, # we must rescale so that the diag(R.implied) == 1 # R.unscaled <- tcrossprod(LAMBDA.dot) + diag(theta) # r.var.inv <- 1/diag(R.unscaled) # LAMBDA/THETA in correlation metric # LAMBDA.R <- sqrt(r.var.inv) * LAMBDA.dot # THETA.R <- diag(r.var.inv * theta) # convert to 'S' metric LAMBDA <- sqrt(s.var) * LAMBDA.dot THETA <- diag(s.var * theta) } else { LAMBDA <- LAMBDA.dot THETA <- diag(theta) } # reflect so that column sum is always positive if (reflect) { SUM <- colSums(LAMBDA) neg.idx <- which(SUM < 0) if (length(neg.idx) > 0L) { LAMBDA[, neg.idx] <- -1 * LAMBDA[, neg.idx, drop = FALSE] } } # reorder the columns if (order.lv.by == "sumofsquares") { L2 <- LAMBDA * LAMBDA order.idx <- base::order(colSums(L2), decreasing = TRUE) } else if (order.lv.by == "index") { # reorder using Asparouhov & Muthen 2009 criterion (see Appendix D) max.loading <- apply(abs(LAMBDA), 2, max) # 1: per factor, number of the loadings that are at least 0.8 of the # highest loading of the factor # 2: mean of the index numbers average.index <- sapply(seq_len(ncol(LAMBDA)), function(i) { mean(which(abs(LAMBDA[, i]) >= 0.8 * max.loading[i])) }) # order of the factors order.idx <- base::order(average.index) } else if (order.lv.by == "none") { order.idx <- seq_len(ncol(LAMBDA)) } else { lav_msg_stop(gettext("order must be index, sumofsquares or none")) } LAMBDA <- LAMBDA[, order.idx, drop = FALSE] list(LAMBDA = LAMBDA, THETA = THETA) } lavaan/R/lav_test.R0000644000176200001440000005607614627656441013716 0ustar liggesusers# chi-square test statistic: # comparing the current model versus the saturated/unrestricted model # TDJ 9 April 2024: Add a (hidden) function to update the @test slot # when the user provides a custom h1 model. Called by # lav_object_summary and lav_fit_measures(_check_baseline) lavTest <- function(lavobject, test = "standard", scaled.test = "standard", output = "list", drop.list.single = TRUE) { # check output output.valid <- c("list", "text") if (!any(output == output.valid)) { lav_msg_stop(gettextf( "%1$s argument must be either %2$s", "output", lav_msg_view(output.valid, "or") )) } # extract 'test' slot TEST <- lavobject@test # which test? if (!missing(test)) { # check 'test' if (!is.character(test)) { lav_msg_stop( gettextf("%s should be a character string.", "test")) } else { test <- lav_test_rename(test, check = TRUE) } # check scaled.test if (!missing(scaled.test)) { if (!is.character(scaled.test)) { lav_msg_stop( gettextf("%s should be a character string.", "scaled.test")) } else { scaled.test <- lav_test_rename(scaled.test, check = TRUE) } # merge test <- unique(c(test, scaled.test)) # but "standard" must always be first standard.idx <- which(test == "standard") if (length(standard.idx) > 0L && standard.idx != 1L) { test <- c("standard", test[-standard.idx]) } } if (test[1] == "none") { return(list()) } else if (any(test %in% c("bootstrap", "bollen.stine"))) { lav_msg_stop(gettext( "please use bootstrapLavaan() to obtain a bootstrap based test statistic." )) } # check if we already have it: if (all(test %in% names(TEST))) { info.attr <- attr(TEST, "info") test.idx <- which(names(TEST) %in% test) TEST <- TEST[test.idx] attr(TEST, "info") <- info.attr } else { # redo ALL of them, even if already have some in TEST # later, we will allow to also change the options (like information) # and this should be reflected in the 'info' attribute # fill-in test in Options slot lavobject@Options$test <- test # fill-in scaled.test in Options slot lavobject@Options$scaled.test <- scaled.test # get requested test statistics TEST <- lav_model_test(lavobject = lavobject) } } if (output == "list") { # remove 'info' attribute attr(TEST, "info") <- NULL # select only those that were requested (eg remove standard) test.idx <- which(names(TEST) %in% test) TEST <- TEST[test.idx] # if only 1 test, drop outer list if (length(TEST) == 1L && drop.list.single) { TEST <- TEST[[1]] } return(TEST) } else { lav_test_print(TEST) } invisible(TEST) } # allow for 'flexible' names for the test statistics # 0.6-13: if multiple names, order them in such a way # that the 'scaled' variants appear after the others lav_test_rename <- function(test, check = FALSE) { test <- tolower(test) if (length(target.idx <- which(test %in% c("standard", "chisq", "chi", "chi-square", "chi.square"))) > 0L) { test[target.idx] <- "standard" } if (length(target.idx <- which(test %in% c( "satorra", "sb", "satorra.bentler", "satorra-bentler", "m.adjusted", "m", "mean.adjusted", "mean-adjusted" ))) > 0L) { test[target.idx] <- "satorra.bentler" } if (length(target.idx <- which(test %in% c("yuan", "yb", "yuan.bentler", "yuan-bentler"))) > 0L) { test[target.idx] <- "yuan.bentler" } if (length(target.idx <- which(test %in% c( "yuan.bentler.mplus", "yuan-bentler.mplus", "yuan-bentler-mplus" ))) > 0L) { test[target.idx] <- "yuan.bentler.mplus" } if (length(target.idx <- which(test %in% c( "mean.var.adjusted", "mean-var-adjusted", "mv", "second.order", "satterthwaite", "mv.adjusted" ))) > 0L) { test[target.idx] <- "mean.var.adjusted" } if (length(target.idx <- which(test %in% c( "mplus6", "scale.shift", "scaled.shifted", "scaled-shifted" ))) > 0L) { test[target.idx] <- "scaled.shifted" } if (length(target.idx <- which(test %in% c("bootstrap", "boot", "bollen.stine", "bollen-stine"))) > 0L) { test[target.idx] <- "bollen.stine" } if (length(target.idx <- which(test %in% c( "browne", "residual", "residuals", "browne.residual", "browne.residuals", "residual-based", "residual.based", "browne.residuals.adf", "browne.residual.adf" ))) > 0L) { test[target.idx] <- "browne.residual.adf" } if (length(target.idx <- which(test %in% c("browne.residuals.nt", "browne.residual.nt"))) > 0L) { test[target.idx] <- "browne.residual.nt" } if (length(target.idx <- which(test %in% c("browne.residual.adf.model"))) > 0L) { test[target.idx] <- "browne.residual.adf.model" } if (length(target.idx <- which(test %in% c( "browne.residuals.nt.model", "browne.residual.nt.model", "rls", "browne.rls", "nt.rls", "nt-rls", "ntrls" ))) > 0L) { test[target.idx] <- "browne.residual.nt.model" } # check? if (check) { # report unknown values bad.idx <- which(!test %in% c( "standard", "none", "default", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.adjusted", "mean.var.adjusted", "scaled.shifted", "bollen.stine", "browne.residual.nt", "browne.residual.nt.model", "browne.residual.adf", "browne.residual.adf.model" )) if (length(bad.idx) > 0L) { lav_msg_stop(sprintf( ngettext( length(test[bad.idx]), "invalid value in %1$s argument: %2$s.", "invalid values in %1$s argument: %2$s." ), "test", lav_msg_view(test[bad.idx], log.sep = "none") )) } # if 'default' is included, length(test) must be 1 if (length(test) > 1L && any("default" == test)) { lav_msg_stop( gettextf("if test= argument contains \"%s\", it cannot contain additional elements", "default")) } # if 'none' is included, length(test) must be 1 if (length(test) > 1L && any("none" == test)) { lav_msg_stop( gettextf("if test= argument contains \"%s\" it cannot contain additional elements", "none")) } } # reorder: first nonscaled, then scaled nonscaled.idx <- which(test %in% c( "standard", "none", "default", "bollen.stine", "browne.residual.nt", "browne.residual.nt.model", "browne.residual.adf", "browne.residual.adf.model" )) scaled.idx <- which(test %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.adjusted", "mean.var.adjusted", "scaled.shifted" )) test <- c(test[nonscaled.idx], test[scaled.idx]) test } lav_model_test <- function(lavobject = NULL, lavmodel = NULL, lavpartable = NULL, lavsamplestats = NULL, lavimplied = NULL, lavh1 = list(), lavoptions = NULL, x = NULL, VCOV = NULL, lavcache = NULL, lavdata = NULL, lavloglik = NULL, test.UGamma.eigvals = FALSE) { # lavobject? if (!is.null(lavobject)) { lavmodel <- lavobject@Model lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied lavh1 <- lavobject@h1 lavoptions <- lavobject@Options x <- lavobject@optim$x fx <- lavobject@optim[["fx"]] fx.group <- lavobject@optim[["fx.group"]] attr(fx, "fx.group") <- fx.group attr(x, "fx") <- fx VCOV <- lavobject@vcov$vcov lavcache <- lavobject@Cache lavdata <- lavobject@Data lavloglik <- lavobject@loglik } # backwards compatibility if (is.null(lavoptions$scaled.test)) { lavoptions$scaled.test <- "standard" } test <- test.orig <- lavoptions$test TEST <- list() # degrees of freedom (ignoring constraints) df <- lav_partable_df(lavpartable) # handle equality constraints (note: we ignore inequality constraints, # active or not!) # we use the rank of con.jac (even if the constraints are nonlinear) if (nrow(lavmodel@con.jac) > 0L) { ceq.idx <- attr(lavmodel@con.jac, "ceq.idx") if (length(ceq.idx) > 0L) { neq <- qr(lavmodel@con.jac[ceq.idx, , drop = FALSE])$rank df <- df + neq } } else if (lavmodel@ceq.simple.only) { # needed?? ndat <- lav_partable_ndat(lavpartable) npar <- max(lavpartable$free) df <- ndat - npar } # shortcut: return empty list if one of the conditions below is true: # - test == "none" # - df < 0 # - estimator == "MML" if (test[1] == "none" || df < 0L || lavoptions$estimator == "MML") { TEST[[1]] <- list( test = test[1], stat = as.numeric(NA), stat.group = as.numeric(NA), df = df, refdistr = "unknown", pvalue = as.numeric(NA) ) if (length(test) > 1L) { TEST[[2]] <- list( test = test[2], stat = as.numeric(NA), stat.group = as.numeric(NA), df = df, refdistr = "unknown", pvalue = as.numeric(NA) ) } attr(TEST, "info") <- list( ngroups = lavdata@ngroups, group.label = lavdata@group.label, information = lavoptions$information, h1.information = lavoptions$h1.information, observed.information = lavoptions$observed.information ) return(TEST) } ###################### ## TEST == STANDARD ## ###################### # get chisq value, per group # PML if (lavoptions$estimator == "PML" && test[1] != "none") { # attention! # if the thresholds are saturated (ie, nuisance parameters) # we should use the ctr_pml_plrt() function. # # BUT, if the thresholds are structured (eg equality constraints) # then we MUST use the ctr_pml_plrt2() function. # # This was not done automatically < 0.6-6 # thresholds.structured <- FALSE # check th.idx <- which(lavpartable$op == "|") if (any(lavpartable$free[th.idx] == 0L)) { thresholds.structured <- TRUE } eq.idx <- which(lavpartable$op == "==") if (length(eq.idx) > 0L) { th.labels <- lavpartable$plabel[th.idx] eq.labels <- unique(c( lavpartable$lhs[eq.idx], lavpartable$rhs[eq.idx] )) if (any(th.labels %in% eq.labels)) { thresholds.structured <- TRUE } } # switch between ctr_pml_plrt() and ctr_pml_plrt2() if (thresholds.structured) { pml_plrt <- ctr_pml_plrt2 } else { pml_plrt <- ctr_pml_plrt } PML <- pml_plrt( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavoptions = lavoptions, x = x, VCOV = VCOV, lavcache = lavcache, lavsamplestats = lavsamplestats, lavpartable = lavpartable ) # get chi.group from PML, since we compare to `unrestricted' model, # NOT observed data chisq.group <- PML$PLRTH0Sat.group # twolevel } else if (lavdata@nlevels > 1L) { if (length(lavh1) > 0L) { # LRT chisq.group <- -2 * (lavloglik$loglik.group - lavh1$logl$loglik.group) } else { chisq.group <- rep(as.numeric(NA), lavdata@ngroups) } } else { # get fx.group fx <- attr(x, "fx") fx.group <- attr(fx, "fx.group") # always compute `standard' test statistic ## FIXME: the NFAC is now implicit in the computation of fx... NFAC <- 2 * unlist(lavsamplestats@nobs) if (lavoptions$estimator == "ML" && lavoptions$likelihood == "wishart") { # first divide by two NFAC <- NFAC / 2 NFAC <- NFAC - 1 NFAC <- NFAC * 2 } else if (lavoptions$estimator == "DLS") { NFAC <- NFAC / 2 NFAC <- NFAC - 1 NFAC <- NFAC * 2 } chisq.group <- fx.group * NFAC } # check for negative values chisq.group[chisq.group < 0] <- 0.0 # global test statistic chisq <- sum(chisq.group) # reference distribution: always chi-square, except for the # non-robust version of ULS and PML if (lavoptions$estimator == "ULS" || lavoptions$estimator == "PML") { refdistr <- "unknown" pvalue <- as.numeric(NA) } else { refdistr <- "chisq" # pvalue ### FIXME: what if df=0? NA? or 1? or 0? # this is not trivial, since # 1 - pchisq(0, df=0) = 1 # but # 1 - pchisq(0.00000000001, df=0) = 0 # and # 1 - pchisq(0, df=0, ncp=0) = 0 # # This is due to different definitions of limits (from the left, # or from the right) # # From 0.5-17 onwards, we will use NA if df=0, to be consistent if (df == 0) { pvalue <- as.numeric(NA) } else { pvalue <- 1 - pchisq(chisq, df) } } TEST[["standard"]] <- list( test = "standard", stat = chisq, stat.group = chisq.group, df = df, refdistr = refdistr, pvalue = pvalue ) if (length(test) == 1L && test == "standard") { # we are done attr(TEST, "info") <- list( ngroups = lavdata@ngroups, group.label = lavdata@group.label, information = lavoptions$information, h1.information = lavoptions$h1.information, observed.information = lavoptions$observed.information ) return(TEST) } else { # strip 'standard' from test list if (length(test) > 1L) { standard.idx <- which(test == "standard") if (length(standard.idx) > 0L) { test <- test[-standard.idx] } } } ###################### ## additional tests ## # new in 0.6-5 ###################### for (this.test in test) { if (lavoptions$estimator == "PML") { if (this.test == "mean.var.adjusted") { LABEL <- "mean+var adjusted correction (PML)" TEST[[this.test]] <- list( test = this.test, stat = PML$stat, stat.group = TEST[[1]]$stat.group * PML$scaling.factor, df = PML$df, pvalue = PML$p.value, scaling.factor = 1 / PML$scaling.factor, label = LABEL, shift.parameter = as.numeric(NA), trace.UGamma = as.numeric(NA), trace.UGamma4 = as.numeric(NA), trace.UGamma2 = as.numeric(NA), UGamma.eigenvalues = as.numeric(NA) ) } else { lav_msg_warn(gettextf("test option %s not available for estimator PML", this.test)) } } else if (this.test %in% c( "browne.residual.adf", "browne.residual.adf.model", "browne.residual.nt", "browne.residual.nt.model" )) { ADF <- TRUE if (this.test %in% c( "browne.residual.nt", "browne.residual.nt.model" )) { ADF <- FALSE } model.based <- FALSE if (this.test %in% c( "browne.residual.adf.model", "browne.residual.nt.model" )) { model.based <- TRUE } out <- lav_test_browne( lavobject = NULL, lavdata = lavdata, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavpartable = lavpartable, lavoptions = lavoptions, lavh1 = lavh1, lavimplied = lavimplied, ADF = ADF, model.based = model.based ) TEST[[this.test]] <- out } else if (this.test %in% c( "satorra.bentler", "mean.var.adjusted", "scaled.shifted" )) { # which test statistic shall we scale? unscaled.TEST <- TEST[[1]] if (lavoptions$scaled.test != "standard") { idx <- which(test.orig == lavoptions$scaled.test) if (length(idx) > 0L) { unscaled.TEST <- TEST[[idx[1]]] } else { lav_msg_warn(gettextf( "scaled.test [%1$s] not found among available (non scaled) tests: %2$s. Using standard test instead.", lavoptions$scaled.test, lav_msg_view(test))) } } out <- lav_test_satorra_bentler( lavobject = NULL, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavimplied = lavimplied, lavdata = lavdata, lavoptions = lavoptions, TEST.unscaled = unscaled.TEST, E.inv = attr(VCOV, "E.inv"), Delta = attr(VCOV, "Delta"), WLS.V = attr(VCOV, "WLS.V"), Gamma = attr(VCOV, "Gamma"), test = this.test, mimic = lavoptions$mimic, method = "original", # since 0.6-13 return.ugamma = FALSE ) TEST[[this.test]] <- out[[this.test]] } else if (this.test %in% c( "yuan.bentler", "yuan.bentler.mplus" )) { # which test statistic shall we scale? unscaled.TEST <- TEST[[1]] if (lavoptions$scaled.test != "standard") { idx <- which(test.orig == lavoptions$scaled.test) if (length(idx) > 0L) { unscaled.TEST <- TEST[[idx[1]]] } else { lav_msg_warn(gettextf( "scaled.test [%1$s] not found among available (non scaled) tests: %2$s. Using standard test instead.", lavoptions$scaled.test, lav_msg_view(test))) } } out <- lav_test_yuan_bentler( lavobject = NULL, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, TEST.unscaled = unscaled.TEST, E.inv = attr(VCOV, "E.inv"), B0.group = attr(VCOV, "B0.group"), test = this.test, mimic = lavoptions$mimic, # method = "default", return.ugamma = FALSE ) TEST[[this.test]] <- out[[this.test]] } else if (this.test == "bollen.stine") { # check if we have bootstrap lavdata BOOT.TEST <- attr(VCOV, "BOOT.TEST") if (is.null(BOOT.TEST)) { if (!is.null(lavoptions$bootstrap)) { R <- lavoptions$bootstrap } else { R <- 1000L } boot.type <- "bollen.stine" BOOT.TEST <- lav_bootstrap_internal( object = NULL, lavmodel. = lavmodel, lavsamplestats. = lavsamplestats, lavpartable. = lavpartable, lavoptions. = lavoptions, lavdata. = lavdata, R = R, type = boot.type, FUN = "test" ) # new in 0.6-12: always warn for failed and nonadmissible error.idx <- attr(BOOT.TEST, "error.idx") nfailed <- length(attr(BOOT.TEST, "error.idx")) # zero if NULL if (nfailed > 0L) { lav_msg_warn(gettextf( "%d bootstrap runs failed or did not converge.", nfailed )) } notok <- length(attr(BOOT.TEST, "nonadmissible")) # zero if NULL if (notok > 0L) { lav_msg_warn(gettextf( "%d bootstrap runs resulted in nonadmissible solutions.", notok )) } if (length(error.idx) > 0L) { # new in 0.6-13: we must still remove them! BOOT.TEST <- BOOT.TEST[-error.idx, , drop = FALSE] # this also drops the attributes } BOOT.TEST <- drop(BOOT.TEST) } # bootstrap p-value boot.larger <- sum(BOOT.TEST > chisq) boot.length <- length(BOOT.TEST) pvalue.boot <- boot.larger / boot.length TEST[[this.test]] <- list( test = this.test, stat = chisq, stat.group = chisq.group, df = df, pvalue = pvalue.boot, refdistr = "bootstrap", boot.T = BOOT.TEST, boot.larger = boot.larger, boot.length = boot.length ) } } # additional tests # add additional information as an attribute, needed for independent # printing attr(TEST, "info") <- list( ngroups = lavdata@ngroups, group.label = lavdata@group.label, information = lavoptions$information, h1.information = lavoptions$h1.information, observed.information = lavoptions$observed.information ) TEST } lav_update_test_custom_h1 <- function(lav_obj_h0, lav_obj_h1) { stopifnot(inherits(lav_obj_h0, "lavaan")) stopifnot(inherits(lav_obj_h1, "lavaan")) ## this breaks if object not nested in (df >=) h1, so check df stopifnot(lav_obj_h0@test[[1]]$df >= lav_obj_h1@test[[1]]$df) ## remove any other (potentially hidden) h1 model from BOTH objects lav_obj_h0@external$h1.model <- NULL lav_obj_h1@external$h1.model <- NULL ## save old @test slot as template ## (so the @test[[1]]$df don't change while looping over tests to update) newTEST <- lav_obj_h0@test ## assemble a call to lavTestLRT() lrtCallTemplate <- list(quote(lavTestLRT), object = quote(lav_obj_h1), quote(lav_obj_h0)) # in ... ## can only update tests available in both objects testNames0 <- names(lav_obj_h0@test) testNames1 <- names(lav_obj_h1@test) testNames <- intersect(testNames0, testNames1) ## loop over those tests for (tn in testNames) { lrtCall <- lrtCallTemplate ## conditional arguments: if (tn == "standard") { lrtCall$method <- "standard" } else if (tn %in% c("scaled.shifted","mean.var.adjusted")) { if (lav_obj_h0@Options$estimator == "PML") { lrtCall$method <- "mean.var.adjusted.PLRT" } else { lrtCall$method <- "satorra.2000" } lrtCall$scaled.shifted <- tn == "scaled.shifted" } else if (tn %in% c("satorra.bentler", "yuan.bentler","yuan.bentler.mplus")) { lrtCall$test <- tn } else if (grepl(pattern = "browne", x = tn)) { lrtCall$type <- tn } else { #TODO? # - if (tn %in% c("bootstrap", "bollen.stine")) next # - any other possibilities in @test? } ## get new test if (lav_obj_h0@test[[1]]$df == lav_obj_h1@test[[1]]$df) { ## suppress warning about == df ANOVA <- suppressWarnings(eval(as.call(lrtCall))) } else { ## maybe some other informative warning would be important to see ANOVA <- eval(as.call(lrtCall)) } ## replace old @test[[tn]] values newTEST[[tn]]$stat.group <- NULL # avoid wrong stats in summary() header? newTEST[[tn]]$stat <- ANOVA["lav_obj_h0" , "Chisq diff"] newTEST[[tn]]$df <- ANOVA["lav_obj_h0" , "Df diff" ] newTEST[[tn]]$pvalue <- ANOVA["lav_obj_h0" , "Pr(>Chisq)"] if (!is.null(newTEST[[tn]]$scaling.factor)) { newTEST[[tn]]$scaling.factor <- attr(ANOVA, "scale")[2] # first row is NA } if (!is.null(newTEST[[tn]]$shift.parameter)) { newTEST[[tn]]$shift.parameter <- attr(ANOVA, "shift")[2] # first row is NA } else { ## unless scaled.shifted, RMSEA is calculated from $standard$stat and ## df == sum($trace.UGamma). Reverse-engineer from $scaling factor: newTEST[[tn]]$trace.UGamma <- newTEST[[tn]]$df * newTEST[[tn]]$scaling.factor } ## should not be necessary to replace $trace.UGamma2 ## nor to replace $scaling.factor.h0/h1 } # end loop over tests ## assign updated @test slot and return lav_obj_h0@test <- newTEST lav_obj_h0 } lavaan/R/lav_export.R0000644000176200001440000001277414627656441014255 0ustar liggesusers# export `lavaan' lav model description to third-party software # lavExport <- function(object, target = "lavaan", prefix = "sem", dir.name = "lavExport", export = TRUE) { stopifnot(inherits(object, "lavaan")) target <- tolower(target) # check for conditional.x = TRUE # if(object@Model@conditional.x) { # stop("lavaan ERROR: this function is not (yet) available if conditional.x = TRUE") # } ngroups <- object@Data@ngroups if (ngroups > 1L) { group.label2 <- paste(".", object@Data@group.label, sep = "") } else { group.label2 <- "" } data.file <- paste(prefix, group.label2, ".", target, ".raw", sep = "") # 2. create syntax file if (target == "lavaan") { header <- "" syntax <- lav2lavaan(object) footer <- "" out <- paste(header, syntax, footer, sep = "") } else if (target == "mplus") { header <- lav_mplus_header( data.file = data.file, group.label = object@Data@group.label, ov.names = c( vnames(object@ParTable, "ov"), object@Data@sampling.weights ), ov.ord.names = vnames(object@ParTable, "ov.ord"), weight.name = object@Data@sampling.weights, listwise = lavInspect(object, "options")$missing == "listwise", estimator = lav_mplus_estimator(object), information = lavInspect(object, "options")$information, meanstructure = lavInspect(object, "meanstructure"), data.type = object@Data@data.type, nobs = object@Data@nobs[[1L]] ) syntax <- lav2mplus(object, group.label = object@Data@group.label) footer <- paste("OUTPUT:\n sampstat standardized tech1;\n") out <- paste(header, syntax, footer, sep = "") } else if (target == "lisrel") { syntax <- lav2lisrel(object) } else if (target == "eqs") { syntax <- lav2eqs(object) } else if (target == "sem") { syntax <- lav2sem(object) } else if (target == "openmx") { syntax <- lav2openmx(object) } else { lav_msg_stop(gettextf("target %s has not been implemented yet", target)) } # export to file? if (export) { dir.create(path = dir.name) input.file <- paste(dir.name, "/", prefix, ".", target, ".in", sep = "") cat(out, file = input.file, sep = "") # write data (if available) if (identical(object@Data@data.type, "full")) { for (g in 1:ngroups) { if (is.null(object@Data@eXo[[g]])) { DATA <- object@Data@X[[g]] } else { DATA <- cbind(object@Data@X[[g]], object@Data@eXo[[g]]) } if (!is.null(object@Data@weights[[g]])) { DATA <- cbind(DATA, object@Data@weights[[g]]) } write.table(DATA, file = paste(dir.name, "/", data.file[g], sep = ""), na = "-999999", col.names = FALSE, row.names = FALSE, quote = FALSE ) } } else if (identical(object@Data@data.type, "moment")) { for (g in 1:ngroups) { DATA <- object@SampleStats@cov[[g]] write.table(DATA, file = paste(dir.name, "/", data.file[g], sep = ""), na = "-999999", col.names = FALSE, row.names = FALSE, quote = FALSE ) } } else { lav_msg_warn(gettext("not data available")) } return(invisible(out)) } else { # just return the syntax file for inspection class(out) <- c("lavaan.character", "character") } out } lav2check <- function(lav) { if (inherits(lav, "lavaan")) { lav <- lav@ParTable } else if (is.list(lav)) { # nothing to do } else { lav_msg_stop(gettext("lav must be of class `lavaan' or a parTable")) } # check syntax if (is.null(lav$ustart)) lav$ustart <- lav$est # check if free is missing if (is.null(lav$free)) lav$free <- rep(0L, length(lav$ustart)) # check if label is missing if (is.null(lav$label)) lav$label <- rep("", length(lav$ustart)) # check if group is missing if (is.null(lav$group)) lav$group <- rep(1L, length(lav$ustart)) # if eq.id not all zero, create labels instead # if(!is.null(lav$eq.id) && !all(lav$eq.id == 0L)) { # lav$label <- paste("p",as.character(lav$eq.id), sep="") # lav$label[lav$label == "p0"] <- "" # } lav } ## FIXME: this is completely UNFINISHED (just used to quickly get something) lav2lavaan <- lav2lav <- function(lav) { lav <- lav2check(lav) header <- "# this model syntax is autogenerated by lavExport\n" footer <- "\n" # intercepts int.idx <- which(lav$op == "~1") lav$op[int.idx] <- "~" lav$rhs[int.idx] <- "1" # spacing around operator lav$op <- paste(" ", lav$op, " ", sep = "") lav2 <- ifelse(lav$free != 0L, ifelse(lav$label == "", paste(lav$lhs, lav$op, lav$rhs, sep = ""), paste(lav$lhs, lav$op, lav$label, "*", lav$rhs, sep = "" ) ), ifelse(lav$label == "", paste(lav$lhs, lav$op, lav$ustart, "*", lav$rhs, sep = "" ), paste(lav$lhs, lav$op, lav$ustart, "*", lav$rhs, "+", lav$label, "*", lav$rhs, sep = "" ) ) ) body <- paste(lav2, collapse = "\n") out <- paste(header, body, footer, sep = "") class(out) <- c("lavaan.character", "character") out } lav2lisrel <- function(lav) { lav <- lav2check(lav) lav_msg_stop(gettext("this function needs revision")) } lav2eqs <- function(lav) { lav <- lav2check(lav) lav_msg_stop(gettext("this function needs revision")) } lav2sem <- function(lav) { lav <- lav2check(lav) lav_msg_stop(gettext("this function needs revision")) } lav2openmx <- function(lav) { lav <- lav2check(lav) lav_msg_stop(gettext("this function needs revision")) } lavaan/R/lav_mvreg_cluster.R0000644000176200001440000011141714627656441015607 0ustar liggesusers# loglikelihood clustered/twolevel data -- conditional.x = TRUE # YR: first version around Sept 2021 # take model-implied mean+variance matrices, and reorder/augment them # to facilitate computing of (log)likelihood in the two-level case # when conditional.x = TRUE: # - sigma.w and sigma.b: same dimensions, level-1 'Y' variables only # - sigma.zz: level-2 variables only # - sigma.yz: cov(level-1, level-2) # - beta.w: beta y within part # - beta.b: beta y between part # - beta.z: beta z (between-only) lav_mvreg_cluster_implied22l <- function(Lp = NULL, implied = NULL, Res.Int.W = NULL, Res.Int.B = NULL, Res.Pi.W = NULL, Res.Pi.B = NULL, Res.Sigma.W = NULL, Res.Sigma.B = NULL) { if (!is.null(implied)) { # FIXME: only for single-group analysis! Res.Sigma.W <- implied$res.cov[[1]] Res.Int.W <- implied$res.int[[1]] Res.Pi.W <- implied$res.slopes[[1]] Res.Sigma.B <- implied$res.cov[[2]] Res.Int.B <- implied$res.int[[2]] Res.Pi.B <- implied$res.slopes[[2]] } # within/between idx within.x.idx <- Lp$within.x.idx[[1]] between.y.idx <- Lp$between.y.idx[[2]] between.x.idx <- Lp$between.x.idx[[2]] # ov.idx per level ov.idx <- Lp$ov.idx # 'tilde' matrices: ALL variables within and between p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # only 'y' ov.y.idx <- Lp$ov.y.idx # two levels only (for now) ov.y.idx1 <- ov.y.idx[[1]] ov.y.idx2 <- ov.y.idx[[2]] # Sigma.W.tilde Sigma.W.tilde <- matrix(0, p.tilde, p.tilde) Sigma.W.tilde[ov.y.idx1, ov.y.idx1] <- Res.Sigma.W # INT.W.tilde INT.W.tilde <- matrix(0, p.tilde, 1L) INT.W.tilde[ov.y.idx1, 1L] <- Res.Int.W # PI.W.tilde PI.W.tilde <- matrix(0, p.tilde, ncol(Res.Pi.W)) PI.W.tilde[ov.y.idx1, ] <- Res.Pi.W BETA.W.tilde <- rbind(t(INT.W.tilde), t(PI.W.tilde)) # Sigma.B.tilde Sigma.B.tilde <- matrix(0, p.tilde, p.tilde) Sigma.B.tilde[ov.y.idx2, ov.y.idx2] <- Res.Sigma.B # INT.B.tilde INT.B.tilde <- matrix(0, p.tilde, 1L) INT.B.tilde[ov.y.idx2, 1L] <- Res.Int.B # PI.B.tilde PI.B.tilde <- matrix(0, p.tilde, ncol(Res.Pi.B)) PI.B.tilde[ov.y.idx2, ] <- Res.Pi.B BETA.B.tilde <- rbind(t(INT.B.tilde), t(PI.B.tilde)) if (length(between.y.idx) > 0L) { rm.idx <- c(within.x.idx, between.x.idx, between.y.idx) # between AND x beta.z <- BETA.B.tilde[, between.y.idx, drop = FALSE] beta.b <- BETA.B.tilde[, -rm.idx, drop = FALSE] beta.w <- BETA.W.tilde[, -rm.idx, drop = FALSE] sigma.zz <- Sigma.B.tilde[between.y.idx, between.y.idx, drop = FALSE] sigma.yz <- Sigma.B.tilde[-rm.idx, between.y.idx, drop = FALSE] sigma.b <- Sigma.B.tilde[-rm.idx, -rm.idx, drop = FALSE] sigma.w <- Sigma.W.tilde[-rm.idx, -rm.idx, drop = FALSE] } else { rm.idx <- c(within.x.idx, between.x.idx) # all 'x' beta.z <- matrix(0, 0L, 0L) sigma.zz <- matrix(0, 0L, 0L) beta.b <- BETA.B.tilde[, -rm.idx, drop = FALSE] beta.w <- BETA.W.tilde[, -rm.idx, drop = FALSE] sigma.b <- Sigma.B.tilde[-rm.idx, -rm.idx, drop = FALSE] sigma.w <- Sigma.W.tilde[-rm.idx, -rm.idx, drop = FALSE] sigma.yz <- matrix(0, nrow(sigma.w), 0L) } # beta.wb # FIXme: not correct if some 'x' are splitted (overlap) # but because we ALWAYS treat splitted-x as 'y', this is not a problem beta.wb <- rbind(beta.w, beta.b[-1, , drop = FALSE]) beta.wb[1, ] <- beta.wb[1, , drop = FALSE] + beta.b[1, , drop = FALSE] list( sigma.w = sigma.w, sigma.b = sigma.b, sigma.zz = sigma.zz, sigma.yz = sigma.yz, beta.w = beta.w, beta.b = beta.b, beta.z = beta.z, beta.wb = beta.wb ) } # recreate implied matrices from 2L matrices lav_mvreg_cluster_2l2implied <- function(Lp, sigma.w = NULL, sigma.b = NULL, sigma.zz = NULL, sigma.yz = NULL, beta.w = NULL, beta.b = NULL, beta.z = NULL) { # within/between idx within.x.idx <- Lp$within.x.idx[[1]] between.y.idx <- Lp$between.y.idx[[2]] between.x.idx <- Lp$between.x.idx[[2]] # ov.idx per level ov.idx <- Lp$ov.idx # 'tilde' matrices: ALL variables within and between p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) # only 'y' ov.y.idx <- Lp$ov.y.idx # two levels only (for now) ov.y.idx1 <- ov.y.idx[[1]] ov.y.idx2 <- ov.y.idx[[2]] # Sigma.W.tilde Sigma.W.tilde <- matrix(0, p.tilde, p.tilde) Sigma.W.tilde[ov.y.idx1, ov.y.idx1] <- sigma.w # INT.W.tilde INT.W.tilde <- matrix(0, p.tilde, 1L) INT.W.tilde[ov.y.idx1, 1L] <- beta.w[1L, ] # PI.W.tilde PI.W.tilde <- matrix(0, p.tilde, nrow(beta.w) - 1L) PI.W.tilde[ov.y.idx1, ] <- t(beta.w[-1L, ]) # Sigma.B.tilde Sigma.B.tilde <- matrix(0, p.tilde, p.tilde) Sigma.B.tilde[ov.y.idx1, ov.y.idx1] <- sigma.b # INT.B.tilde INT.B.tilde <- matrix(0, p.tilde, 1L) INT.B.tilde[ov.y.idx1, 1L] <- beta.b[1L, ] # PI.B.tilde PI.B.tilde <- matrix(0, p.tilde, nrow(beta.b) - 1L) PI.B.tilde[ov.y.idx1, ] <- t(beta.b[-1L, ]) if (length(between.y.idx) > 0L) { INT.B.tilde[between.y.idx, 1L] <- beta.z[1L, ] PI.B.tilde[between.y.idx, ] <- t(beta.z[-1L, ]) Sigma.B.tilde[between.y.idx, between.y.idx] <- sigma.zz Sigma.B.tilde[ov.y.idx1, between.y.idx] <- sigma.yz Sigma.B.tilde[between.y.idx, ov.y.idx1] <- t(sigma.yz) } Res.Sigma.W <- Sigma.W.tilde[ov.y.idx1, ov.y.idx1, drop = FALSE] Res.Int.W <- INT.W.tilde[ov.y.idx1, , drop = FALSE] Res.Pi.W <- PI.W.tilde[ov.y.idx1, , drop = FALSE] Res.Sigma.B <- Sigma.B.tilde[ov.y.idx2, ov.y.idx2, drop = FALSE] Res.Int.B <- INT.B.tilde[ov.y.idx2, , drop = FALSE] Res.Pi.B <- PI.B.tilde[ov.y.idx2, , drop = FALSE] implied <- list( res.cov = list(Res.Sigma.W, Res.Sigma.B), res.int = list(Res.Int.W, Res.Int.B), res.slopes = list(Res.Pi.W, Res.Pi.B) ) # Note: cov.x and mean.x must be added by the caller implied } lav_mvreg_cluster_loglik_samplestats_2l <- function(YLp = NULL, Lp = NULL, Res.Sigma.W = NULL, Res.Int.W = NULL, Res.Pi.W = NULL, Res.Sigma.B = NULL, Res.Int.B = NULL, Res.Pi.B = NULL, out = NULL, # 2l Sinv.method = "eigen", log2pi = FALSE, minus.two = TRUE) { # map implied to 2l matrices if (is.null(out)) { out <- lav_mvreg_cluster_implied22l( Lp = Lp, implied = NULL, Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B ) } sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz beta.w <- out$beta.w beta.b <- out$beta.b beta.z <- out$beta.z beta.wb <- out$beta.wb # check for beta.wb if (is.null(out$beta.wb)) { beta.wb <- rbind(beta.w, beta.b[-1, , drop = FALSE]) beta.wb[1, ] <- beta.wb[1, , drop = FALSE] + beta.b[1, , drop = FALSE] } # log 2*pi LOG.2PI <- log(2 * pi) # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] n.s <- Lp$cluster.size.ns[[2]] # dependent 'y' level-2 ('Z') only variables? between.y.idx <- Lp$between.y.idx[[2]] # extract (the many) sample statistics from YLp sample.wb <- YLp[[2]]$sample.wb sample.YYres.wb1 <- YLp[[2]]$sample.YYres.wb1 sample.XX.wb1 <- YLp[[2]]$sample.XX.wb1 sample.wb2 <- YLp[[2]]$sample.wb2 sample.YYres.wb2 <- YLp[[2]]$sample.YYres.wb2 sample.YresX.wb2 <- YLp[[2]]$sample.YresX.wb2 sample.XX.wb2 <- YLp[[2]]$sample.XX.wb2 sample.clz.Y2.res <- YLp[[2]]$sample.clz.Y2.res sample.clz.Y2.XX <- YLp[[2]]$sample.clz.Y2.XX sample.clz.Y2.B <- YLp[[2]]$sample.clz.Y2.B if (length(between.y.idx) > 0L) { sample.clz.ZZ.res <- YLp[[2]]$sample.clz.ZZ.res sample.clz.ZZ.XX <- YLp[[2]]$sample.clz.ZZ.XX sample.clz.ZZ.B <- YLp[[2]]$sample.clz.ZZ.B sample.clz.YZ.res <- YLp[[2]]$sample.clz.YZ.res sample.clz.YZ.XX <- YLp[[2]]$sample.clz.YZ.XX sample.clz.YresXZ <- YLp[[2]]$sample.clz.YresXZ # zero? sample.clz.XWZres <- YLp[[2]]$sample.clz.XWZres } # reconstruct S.PW wb1.diff <- sample.wb - beta.wb Y1Y1.wb.res <- (sample.YYres.wb1 + t(wb1.diff) %*% sample.XX.wb1 %*% (wb1.diff)) # this one is weighted -- not the same as crossprod(Y2w.res) wb2.diff <- sample.wb2 - beta.wb Y2Y2w.res <- (sample.YYres.wb2 + sample.YresX.wb2 %*% (wb2.diff) + t(wb2.diff) %*% t(sample.YresX.wb2) + t(wb2.diff) %*% sample.XX.wb2 %*% (wb2.diff)) S.PW <- (Y1Y1.wb.res - Y2Y2w.res) / sum(cluster.size - 1) # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse( S = sigma.w, logdet = TRUE ) sigma.w.logdet <- attr(sigma.w.inv, "logdet") if (length(between.y.idx) > 0L) { sigma.zz.inv <- lav_matrix_symmetric_inverse( S = sigma.zz, logdet = TRUE ) sigma.zz.logdet <- attr(sigma.zz.inv, "logdet") sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy } else { sigma.b.z <- sigma.b } # min 2* logliklihood DIST <- numeric(ncluster.sizes) LOGDET <- numeric(ncluster.sizes) CONST <- numeric(ncluster.sizes) for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] # data between nj.idx <- which(cluster.size == nj) y2.diff <- sample.clz.Y2.B[[clz]] - beta.wb Y2Yc.yy <- (sample.clz.Y2.res[[clz]] + t(y2.diff) %*% sample.clz.Y2.XX[[clz]] %*% (y2.diff)) if (length(between.y.idx) > 0L) { zz.diff <- sample.clz.ZZ.B[[clz]] - beta.z Y2Yc.zz <- (sample.clz.ZZ.res[[clz]] + t(zz.diff) %*% sample.clz.ZZ.XX[[clz]] %*% (zz.diff)) Y2Yc.yz <- (sample.clz.YZ.res[[clz]] + sample.clz.YresXZ[[clz]] %*% zz.diff + # zero? t(y2.diff) %*% sample.clz.XWZres[[clz]] + t(y2.diff) %*% sample.clz.YZ.XX[[clz]] %*% zz.diff) } # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse( S = sigma.j, logdet = TRUE ) sigma.j.logdet <- attr(sigma.j.inv, "logdet") if (length(between.y.idx) > 0L) { sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi # part 1 -- zz Vinv.11 <- sigma.zz.inv + nj * (sigma.zi.zy %*% sigma.ji.yz.zi) q.zz <- sum(Vinv.11 * Y2Yc.zz) # part 2 -- yz q.yz <- -nj * sum(sigma.ji.yz.zi * Y2Yc.yz) } else { q.zz <- q.yz <- sigma.zz.logdet <- 0 } # part 5 -- yyc q.yyc <- -nj * sum(sigma.j.inv * Y2Yc.yy) if (log2pi) { P <- nj * nrow(sigma.w) + nrow(sigma.zz) CONST[clz] <- P * LOG.2PI } LOGDET[clz] <- sigma.zz.logdet + sigma.j.logdet DIST[clz] <- q.zz + 2 * q.yz - q.yyc } # q.yya + q.yyb q.W <- sum(cluster.size - 1) * sum(sigma.w.inv * S.PW) # logdet within part L.W <- sum(cluster.size - 1) * sigma.w.logdet # -2*times logl (without the constant) (for optimization) loglik <- sum(LOGDET * n.s) + sum(DIST) + q.W + L.W if (log2pi) { loglik <- loglik + sum(CONST * n.s) } # functions below compute -2 * logl if (!minus.two) { loglik <- loglik / (-2) } loglik } # first derivative -2*logl wrt Beta.W, Beta.B, Sigma.W, Sigma.B lav_mvreg_cluster_dlogl_2l_samplestats <- function(YLp = NULL, Lp = NULL, Res.Sigma.W = NULL, Res.Int.W = NULL, Res.Pi.W = NULL, Res.Sigma.B = NULL, Res.Int.B = NULL, Res.Pi.B = NULL, out = NULL, # 2l return.list = FALSE, Sinv.method = "eigen") { # map implied to 2l matrices if (is.null(out)) { out <- lav_mvreg_cluster_implied22l( Lp = Lp, implied = NULL, Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B ) } sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz beta.w <- out$beta.w beta.b <- out$beta.b beta.z <- out$beta.z beta.wb <- out$beta.wb # check for beta.wb if (is.null(out$beta.wb)) { beta.wb <- rbind(beta.w, beta.b[-1, , drop = FALSE]) beta.wb[1, ] <- beta.wb[1, , drop = FALSE] + beta.b[1, , drop = FALSE] } # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.sizes <- Lp$cluster.sizes[[2]] ncluster.sizes <- Lp$ncluster.sizes[[2]] n.s <- Lp$cluster.size.ns[[2]] within.x.idx <- Lp$within.x.idx[[1]] between.y.idx <- Lp$between.y.idx[[2]] w1.idx <- seq_len(length(within.x.idx) + 1L) b1.idx <- c(1L, seq_len(nrow(beta.wb))[-w1.idx]) # extract (the many) sample statistics from YLp sample.wb <- YLp[[2]]$sample.wb sample.YYres.wb1 <- YLp[[2]]$sample.YYres.wb1 sample.XX.wb1 <- YLp[[2]]$sample.XX.wb1 sample.wb2 <- YLp[[2]]$sample.wb2 sample.YYres.wb2 <- YLp[[2]]$sample.YYres.wb2 sample.YresX.wb2 <- YLp[[2]]$sample.YresX.wb2 sample.XX.wb2 <- YLp[[2]]$sample.XX.wb2 sample.clz.Y2.res <- YLp[[2]]$sample.clz.Y2.res sample.clz.Y2.XX <- YLp[[2]]$sample.clz.Y2.XX sample.clz.Y2.B <- YLp[[2]]$sample.clz.Y2.B if (length(between.y.idx) > 0L) { sample.clz.ZZ.res <- YLp[[2]]$sample.clz.ZZ.res sample.clz.ZZ.XX <- YLp[[2]]$sample.clz.ZZ.XX sample.clz.ZZ.B <- YLp[[2]]$sample.clz.ZZ.B sample.clz.YZ.res <- YLp[[2]]$sample.clz.YZ.res sample.clz.YZ.XX <- YLp[[2]]$sample.clz.YZ.XX sample.clz.YresXZ <- YLp[[2]]$sample.clz.YresXZ # zero? sample.clz.XWZres <- YLp[[2]]$sample.clz.XWZres } # reconstruct S.PW wb1.diff <- sample.wb - beta.wb Y1Y1.wb.res <- (sample.YYres.wb1 + t(wb1.diff) %*% sample.XX.wb1 %*% (wb1.diff)) # this one is weighted -- not the same as crossprod(Y2w.res) wb2.diff <- sample.wb2 - beta.wb Y2Y2w.res <- (sample.YYres.wb2 + sample.YresX.wb2 %*% (wb2.diff) + t(wb2.diff) %*% t(sample.YresX.wb2) + t(wb2.diff) %*% sample.XX.wb2 %*% (wb2.diff)) S.PW <- (Y1Y1.wb.res - Y2Y2w.res) / sum(cluster.size - 1) # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse(S = sigma.w) G.beta.w <- matrix(0, ncluster.sizes, length(beta.w)) G.beta.b <- matrix(0, ncluster.sizes, length(beta.b)) G.beta.wb <- matrix(0, ncluster.sizes, length(beta.wb)) G.sigma.w1 <- matrix(0, ncluster.sizes, length(lav_matrix_vech(sigma.w))) G.sigma.b <- matrix(0, ncluster.sizes, length(lav_matrix_vech(sigma.b))) if (length(between.y.idx) > 0L) { G.beta.z <- matrix(0, ncluster.sizes, length(beta.z)) G.sigma.zz <- matrix(0, ncluster.sizes, length(lav_matrix_vech(sigma.zz))) G.sigma.yz <- matrix(0, ncluster.sizes, length(sigma.yz)) sigma.zz.inv <- lav_matrix_symmetric_inverse(S = sigma.zz) sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] y2.diff <- sample.clz.Y2.B[[clz]] - beta.wb XX.y2.diff <- sample.clz.Y2.XX[[clz]] %*% y2.diff Y2Yc.yy <- sample.clz.Y2.res[[clz]] + crossprod(y2.diff, XX.y2.diff) zz.diff <- sample.clz.ZZ.B[[clz]] - beta.z Y2Yc.zz <- (sample.clz.ZZ.res[[clz]] + t(zz.diff) %*% sample.clz.ZZ.XX[[clz]] %*% (zz.diff)) Y2Yc.yz <- (sample.clz.YZ.res[[clz]] + sample.clz.YresXZ[[clz]] %*% zz.diff + # zero? t(y2.diff) %*% sample.clz.XWZres[[clz]] + t(y2.diff) %*% sample.clz.YZ.XX[[clz]] %*% zz.diff) # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse(S = sigma.j) sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi sigma.zi.zy.ji <- t(sigma.ji.yz.zi) sigma.ji.yz <- sigma.j.inv %*% sigma.yz ns.sigma.j.inv <- n.s[clz] * sigma.j.inv ns.sigma.zz.inv <- n.s[clz] * sigma.zz.inv ns.sigma.yz <- n.s[clz] * sigma.yz ns.sigma.ji.yz.zi <- n.s[clz] * sigma.ji.yz.zi # common parts ZZ.zi.yz.ji <- Y2Yc.zz %*% sigma.zi.zy.ji ji.YZ.zi <- sigma.j.inv %*% Y2Yc.yz %*% sigma.zz.inv jYZj.yy <- sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv jYZj.yz <- tcrossprod(ji.YZ.zi, sigma.ji.yz) jYZj.zz <- sigma.ji.yz.zi %*% ZZ.zi.yz.ji jYZj <- nj * (jYZj.yy + jYZj.zz - jYZj.yz - t(jYZj.yz)) # SIGMA.W (between part) g.sigma.w1 <- ns.sigma.j.inv - jYZj tmp <- g.sigma.w1 * 2 diag(tmp) <- diag(g.sigma.w1) G.sigma.w1[clz, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * g.sigma.w1 tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.sigma.b[clz, ] <- lav_matrix_vech(tmp) # SIGMA.ZZ YZ1 <- ZZ.zi.yz.ji %*% sigma.yz YZ2 <- crossprod(Y2Yc.yz, sigma.ji.yz) tmp <- (t(sigma.yz) %*% g.sigma.w1 %*% sigma.yz - 1 / nj * Y2Yc.zz - t(YZ1) - YZ1 + t(YZ2) + YZ2) g.sigma.zz <- (ns.sigma.zz.inv + nj * sigma.zz.inv %*% tmp %*% sigma.zz.inv) tmp <- g.sigma.zz * 2 diag(tmp) <- diag(g.sigma.zz) G.sigma.zz[clz, ] <- lav_matrix_vech(tmp) # SIGMA.ZY tmp1 <- crossprod(ZZ.zi.yz.ji, sigma.zz.inv) tmp2 <- ns.sigma.ji.yz.zi tmp3 <- ji.YZ.zi tmp4 <- jYZj %*% sigma.yz.zi g.sigma.yz <- 2 * nj * (tmp1 - tmp2 - tmp3 + tmp4) G.sigma.yz[clz, ] <- lav_matrix_vec(g.sigma.yz) # BETA.Z A <- (sigma.zz.inv + nj * (sigma.zi.zy.ji %*% sigma.yz.zi)) # symm! B <- nj * (sigma.zi.zy.ji) tmp.z <- (sample.clz.ZZ.XX[[clz]] %*% zz.diff %*% A - (t(sample.clz.YresXZ[[clz]]) + t(sample.clz.YZ.XX[[clz]]) %*% y2.diff) %*% t(B)) G.beta.z[clz, ] <- as.vector(-2 * tmp.z) # BETA.W (between part only) + BETA.B tmp <- (sample.clz.XWZres[[clz]] + sample.clz.YZ.XX[[clz]] %*% zz.diff) out.b <- tmp %*% sigma.zi.zy.ji - XX.y2.diff %*% sigma.j.inv out.w <- out.b + XX.y2.diff %*% sigma.w.inv tmp.b <- out.b[b1.idx, , drop = FALSE] tmp.w <- out.w[w1.idx, , drop = FALSE] G.beta.b[clz, ] <- as.vector(2 * nj * tmp.b) G.beta.w[clz, ] <- as.vector(2 * nj * tmp.w) } # clz d.beta.w1 <- matrix(colSums(G.beta.w), nrow(beta.w), ncol(beta.w)) d.beta.b <- matrix(colSums(G.beta.b), nrow(beta.b), ncol(beta.b)) d.sigma.w1 <- lav_matrix_vech_reverse(colSums(G.sigma.w1)) d.sigma.b <- lav_matrix_vech_reverse(colSums(G.sigma.b)) # z d.beta.z <- matrix(colSums(G.beta.z), nrow(beta.z), ncol(beta.z)) d.sigma.zz <- lav_matrix_vech_reverse(colSums(G.sigma.zz)) d.sigma.yz <- matrix(colSums(G.sigma.yz), nrow(sigma.yz), ncol(sigma.yz)) } # between.y.idx else { # no beween.y.idx for (clz in seq_len(ncluster.sizes)) { # cluster size nj <- cluster.sizes[clz] y2.diff <- sample.clz.Y2.B[[clz]] - beta.wb XX.y2.diff <- sample.clz.Y2.XX[[clz]] %*% y2.diff Y2Yc.yy <- sample.clz.Y2.res[[clz]] + crossprod(y2.diff, XX.y2.diff) # construct sigma.j sigma.j <- (nj * sigma.b) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse(S = sigma.j) # common part jYYj <- nj * sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv # SIGMA.W (between part) g.sigma.w1 <- (n.s[clz] * sigma.j.inv) - jYYj tmp <- g.sigma.w1 * 2 diag(tmp) <- diag(g.sigma.w1) G.sigma.w1[clz, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * g.sigma.w1 tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.sigma.b[clz, ] <- lav_matrix_vech(tmp) # BETA.W (between part only) + BETA.B out.b <- -1 * XX.y2.diff %*% sigma.j.inv out.w <- out.b + XX.y2.diff %*% sigma.w.inv tmp.b <- out.b[b1.idx, , drop = FALSE] tmp.w <- out.w[w1.idx, , drop = FALSE] G.beta.b[clz, ] <- as.vector(2 * nj * tmp.b) G.beta.w[clz, ] <- as.vector(2 * nj * tmp.w) } # cl d.beta.w1 <- matrix(colSums(G.beta.w), nrow(beta.w), ncol(beta.w)) d.beta.b <- matrix(colSums(G.beta.b), nrow(beta.b), ncol(beta.b)) d.sigma.w1 <- lav_matrix_vech_reverse(colSums(G.sigma.w1)) d.sigma.b <- lav_matrix_vech_reverse(colSums(G.sigma.b)) # z d.beta.z <- matrix(0, 0L, 0L) d.sigma.zz <- matrix(0, 0L, 0L) d.sigma.yz <- matrix(0, 0L, 0L) } # no-between-y # Sigma.W (bis) d.sigma.w2 <- sum(cluster.size - 1) * (sigma.w.inv - sigma.w.inv %*% S.PW %*% sigma.w.inv) tmp <- d.sigma.w2 * 2 diag(tmp) <- diag(d.sigma.w2) d.sigma.w2 <- tmp d.sigma.w <- d.sigma.w1 + d.sigma.w2 # beta.w (bis) d.beta.w2 <- -2 * (sample.XX.wb1 %*% (sample.wb - beta.wb))[w1.idx, , drop = FALSE] %*% sigma.w.inv d.beta.w <- d.beta.w1 + d.beta.w2 # rearrange dimplied <- lav_mvreg_cluster_2l2implied(Lp, sigma.w = d.sigma.w, sigma.b = d.sigma.b, sigma.zz = d.sigma.zz, sigma.yz = d.sigma.yz, beta.w = d.beta.w, beta.b = d.beta.b, beta.z = d.beta.z ) if (return.list) { return(dimplied) } # as a single vector out <- c( drop(dimplied$res.int[[1]]), lav_matrix_vec(dimplied$res.slopes[[1]]), lav_matrix_vech(dimplied$res.cov[[1]]), drop(dimplied$res.int[[2]]), lav_matrix_vec(dimplied$res.slopes[[2]]), lav_matrix_vech(dimplied$res.cov[[2]]) ) out } # cluster-wise scores -2*logl wrt Beta.W, Beta.B, Sigma.W, Sigma.B lav_mvreg_cluster_scores_2l <- function(Y1 = NULL, YLp = NULL, Lp = NULL, Res.Sigma.W = NULL, Res.Int.W = NULL, Res.Pi.W = NULL, Res.Sigma.B = NULL, Res.Int.B = NULL, Res.Pi.B = NULL, out = NULL, # 2l Sinv.method = "eigen") { # map implied to 2l matrices if (is.null(out)) { out <- lav_mvreg_cluster_implied22l( Lp = Lp, implied = NULL, Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B ) } sigma.w <- out$sigma.w sigma.b <- out$sigma.b sigma.zz <- out$sigma.zz sigma.yz <- out$sigma.yz beta.w <- out$beta.w beta.b <- out$beta.b beta.z <- out$beta.z beta.wb <- out$beta.wb # check for beta.wb if (is.null(out$beta.wb)) { beta.wb <- rbind(beta.w, beta.b[-1, , drop = FALSE]) beta.wb[1, ] <- beta.wb[1, , drop = FALSE] + beta.b[1, , drop = FALSE] } # Lp nclusters <- Lp$nclusters[[2]] cluster.size <- Lp$cluster.size[[2]] cluster.idx <- Lp$cluster.idx[[2]] within.x.idx <- Lp$within.x.idx[[1]] between.idx <- Lp$between.idx[[2]] between.y.idx <- Lp$between.y.idx[[2]] between.x.idx <- Lp$between.x.idx[[2]] y1.idx <- Lp$ov.y.idx[[1]] x1.idx <- c(within.x.idx, between.x.idx) # in that order # residuals for 'Y' Y1.wb <- Y1[, y1.idx, drop = FALSE] if (length(x1.idx) > 0L) { EXO.wb <- cbind(1, Y1[, x1.idx, drop = FALSE]) Y1.wb.hat <- EXO.wb %*% beta.wb Y1.wb.res <- Y1.wb - Y1.wb.hat } else { Y1.wb.res <- Y1.wb } # residuals 'Y' (level 2) Y2 <- YLp[[2]]$Y2 if (length(x1.idx) > 0L) { EXO.wb2 <- cbind(1, Y2[, x1.idx, drop = FALSE]) Y2w.res <- Y2[, y1.idx, drop = FALSE] - EXO.wb2 %*% beta.wb } else { EXO.wb2 <- matrix(1, nrow(Y2), 1L) Y2w.res <- Y2[, y1.idx, drop = FALSE] } # residual 'Z' (level 2) if (length(between.y.idx) > 0L) { if (length(between.x.idx) > 0L) { EXO.z <- cbind(1, Y2[, between.x.idx, drop = FALSE]) Y2.z <- Y2[, between.y.idx, drop = FALSE] Y2z.res <- Y2.z - EXO.z %*% beta.z # sample.z # XX.z <- crossprod(EXO.z) # sample.z <- try(solve(XX.z, crossprod(EXO.z, Y2.z))) # if(inherits(sample.z, "try-error")) { # sample.z <- MASS::ginv(XX.z) %*% crossprod(EXO.z, Y2.z) # } # sample.wb2 # sample.wb2 <- YLp[[2]]$sample.wb2 } else { Y2z.res <- Y2[, between.y.idx, drop = FALSE] } } # common parts: sigma.w.inv <- lav_matrix_symmetric_inverse(S = sigma.w) G.beta.w1 <- matrix(0, nclusters, length(beta.w)) G.beta.b <- matrix(0, nclusters, length(beta.b)) G.beta.wb <- matrix(0, nclusters, length(beta.wb)) G.sigma.w1 <- matrix(0, nclusters, length(lav_matrix_vech(sigma.w))) G.sigma.b <- matrix(0, nclusters, length(lav_matrix_vech(sigma.b))) if (length(between.y.idx) > 0L) { G.beta.z <- matrix(0, nclusters, length(beta.z)) G.sigma.zz <- matrix(0, nclusters, length(lav_matrix_vech(sigma.zz))) G.sigma.yz <- matrix(0, nclusters, length(sigma.yz)) sigma.zz.inv <- lav_matrix_symmetric_inverse(S = sigma.zz) sigma.yz.zi <- sigma.yz %*% sigma.zz.inv sigma.zi.zy <- t(sigma.yz.zi) sigma.b.z <- sigma.b - sigma.yz %*% sigma.zi.zy for (cl in seq_len(nclusters)) { # cluster size nj <- cluster.size[cl] # data within for the cluster (centered) Y1m <- Y1.wb.res[cluster.idx == cl, , drop = FALSE] yc <- Y2w.res[cl, ] # data between zc <- Y2z.res[cl, ] Y2Yc.yy <- tcrossprod(Y2w.res[cl, ]) Y2Yc.zz <- tcrossprod(Y2z.res[cl, ]) Y2Yc.yz <- tcrossprod(Y2w.res[cl, ], Y2z.res[cl, ]) # construct sigma.j sigma.j <- (nj * sigma.b.z) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse(S = sigma.j) sigma.ji.yz.zi <- sigma.j.inv %*% sigma.yz.zi sigma.zi.zy.ji <- t(sigma.ji.yz.zi) sigma.ji.yz <- sigma.j.inv %*% sigma.yz # common parts ZZ.zi.yz.ji <- Y2Yc.zz %*% sigma.zi.zy.ji ji.YZ.zi <- sigma.j.inv %*% Y2Yc.yz %*% sigma.zz.inv jYZj.yy <- sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv jYZj.yz <- tcrossprod(ji.YZ.zi, sigma.ji.yz) jYZj.zz <- sigma.ji.yz.zi %*% ZZ.zi.yz.ji jYZj <- nj * (jYZj.yy + jYZj.zz - jYZj.yz - t(jYZj.yz)) # SIGMA.W (between part) g.sigma.w1 <- sigma.j.inv - jYZj tmp <- g.sigma.w1 * 2 diag(tmp) <- diag(g.sigma.w1) G.sigma.w1[cl, ] <- lav_matrix_vech(tmp) # SIGMA.W (within part) # g.sigma.w2 <- ( (nj-1) * sigma.w.inv # - sigma.w.inv %*% (crossprod(Y1m) - nj*Y2Yc.yy) %*% sigma.w.inv ) # tmp <- g.sigma.w2*2; diag(tmp) <- diag(g.sigma.w2) # G.sigma.w2[cl,] <- lav_matrix_vech(tmp) # G.sigma.w[cl,] <- G.sigma.w1[cl,] + G.sigma.w2[cl,] # SIGMA.B g.sigma.b <- nj * g.sigma.w1 tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.sigma.b[cl, ] <- lav_matrix_vech(tmp) # SIGMA.ZZ YZ1 <- ZZ.zi.yz.ji %*% sigma.yz YZ2 <- crossprod(Y2Yc.yz, sigma.ji.yz) tmp <- (t(sigma.yz) %*% g.sigma.w1 %*% sigma.yz - (1 / nj * Y2Yc.zz + t(YZ1) + YZ1 - t(YZ2) - YZ2)) g.sigma.zz <- (sigma.zz.inv + nj * sigma.zz.inv %*% tmp %*% sigma.zz.inv) tmp <- g.sigma.zz * 2 diag(tmp) <- diag(g.sigma.zz) G.sigma.zz[cl, ] <- lav_matrix_vech(tmp) # SIGMA.ZY # g.sigma.yz <- 2 * nj * ( # (sigma.j.inv %*% # (sigma.yz.zi %*% Y2Yc.zz - sigma.yz - Y2Yc.yz) # + jYZj %*% sigma.yz) %*% sigma.zz.inv ) tmp1 <- crossprod(ZZ.zi.yz.ji, sigma.zz.inv) tmp2 <- sigma.ji.yz.zi tmp3 <- ji.YZ.zi tmp4 <- jYZj %*% sigma.yz.zi g.sigma.yz <- 2 * nj * (tmp1 - tmp2 - tmp3 + tmp4) G.sigma.yz[cl, ] <- lav_matrix_vec(g.sigma.yz) # BETA.Z # here, we avoid the (sample.z - beta.z) approach exo.z <- cbind(1, Y2[cl, between.x.idx, drop = FALSE]) tmp1 <- (sigma.zz.inv + nj * (sigma.zi.zy.ji %*% sigma.yz.zi)) %*% zc tmp2 <- nj * (sigma.zi.zy.ji) %*% yc tmp.z <- crossprod(exo.z, drop(tmp1 - tmp2)) G.beta.z[cl, ] <- as.vector(-2 * tmp.z) # BETA.W # exo.w <- cbind(1, # Y1[cluster.idx == cl, within.x.idx, drop = FALSE]) # G.beta.w[cl,] <- as.vector( 2 * t(exo.w) %*% ( # matrix(1, nj, 1) %x% (zc %*% sigma.zi.zy.ji - # yc %*% sigma.j.inv + # yc %*% sigma.w.inv) - # Y1m %*% sigma.w.inv) ) # BETA.W (between part only) exo2.w <- cbind(1, Y2[cl, within.x.idx, drop = FALSE]) tmp2 <- (zc %*% sigma.zi.zy.ji - yc %*% sigma.j.inv + yc %*% sigma.w.inv) G.beta.w1[cl, ] <- as.vector(2 * nj * crossprod(exo2.w, tmp2)) # BETA.W (within part only) # exo.w <- cbind(1, # Y1[cluster.idx == cl, within.x.idx, drop = FALSE]) # tmp1 <- - Y1m %*% sigma.w.inv # G.beta.ww <- as.vector( 2 * crossprod(exo.w, tmp1) ) # G.beta.w[cl,] <- G.beta.w1 + G.beta.ww # G.beta.w2[cl,] <- G.beta.ww # BETA.B exo2.b <- cbind(1, Y2[cl, between.x.idx, drop = FALSE]) tmp <- (zc %*% sigma.zi.zy.ji - yc %*% sigma.j.inv) G.beta.b[cl, ] <- as.vector(2 * nj * crossprod(exo2.b, tmp)) } # cl } # between.y.idx else { # no beween.y.idx for (cl in seq_len(nclusters)) { # cluster size nj <- cluster.size[cl] # data within for the cluster (centered) Y1m <- Y1.wb.res[cluster.idx == cl, , drop = FALSE] yc <- Y2w.res[cl, ] # data between Y2Yc.yy <- tcrossprod(Y2w.res[cl, ]) # construct sigma.j sigma.j <- (nj * sigma.b) + sigma.w sigma.j.inv <- lav_matrix_symmetric_inverse(S = sigma.j) # common part jYYj <- nj * sigma.j.inv %*% Y2Yc.yy %*% sigma.j.inv # SIGMA.W # g.sigma.w <- ( (nj-1) * sigma.w.inv # - sigma.w.inv %*% (crossprod(Y1m) - nj*Y2Yc.yy) %*% sigma.w.inv # + sigma.j.inv - jYYj ) # tmp <- g.sigma.w*2; diag(tmp) <- diag(g.sigma.w) # G.sigma.w[cl,] <- lav_matrix_vech(tmp) # SIGMA.W (between part) g.sigma.w1 <- sigma.j.inv - jYYj tmp <- g.sigma.w1 * 2 diag(tmp) <- diag(g.sigma.w1) G.sigma.w1[cl, ] <- lav_matrix_vech(tmp) # SIGMA.B g.sigma.b <- nj * (sigma.j.inv - jYYj) tmp <- g.sigma.b * 2 diag(tmp) <- diag(g.sigma.b) G.sigma.b[cl, ] <- lav_matrix_vech(tmp) # BETA.W (between part only) exo2.w <- cbind(1, Y2[cl, within.x.idx, drop = FALSE]) tmp2 <- (-yc %*% sigma.j.inv + yc %*% sigma.w.inv) G.beta.w1[cl, ] <- as.vector(2 * nj * crossprod(exo2.w, tmp2)) # BETA.B exo2.b <- cbind(1, Y2[cl, between.x.idx, drop = FALSE]) tmp <- -yc %*% sigma.j.inv G.beta.b[cl, ] <- as.vector(2 * nj * crossprod(exo2.b, tmp)) } # cl } # no-between-y # beta.w (bis) # d.beta.w2 <- -2 * t(EXO.wb[,1:(length(within.x.idx) + 1L), drop = FALSE]) %*% Y1.wb.res %*% sigma.w.inv Y1.wb.res.i <- Y1.wb.res %*% sigma.w.inv w1.idx <- seq_len(length(within.x.idx) + 1L) a1.idx <- rep(w1.idx, times = ncol(Y1.wb.res.i)) b1.idx <- rep(seq_len(ncol(Y1.wb.res.i)), each = length(w1.idx)) TMP <- EXO.wb[, a1.idx, drop = FALSE] * Y1.wb.res.i[, b1.idx, drop = FALSE] G.beta.w2 <- -2 * rowsum.default(TMP, cluster.idx, reorder = FALSE, na.rm = TRUE ) G.beta.w <- G.beta.w1 + G.beta.w2 # Sigma.W (bis) # d.sigma.w2 <- sum(cluster.size - 1) * ( sigma.w.inv # - sigma.w.inv %*% S.PW %*% sigma.w.inv ) # tmp <- d.sigma.w2*2; diag(tmp) <- diag(d.sigma.w2) # d.sigma.w2 <- tmp # g.sigma.w2 <- ( (nj-1) * sigma.w.inv # - sigma.w.inv %*% (crossprod(Y1m) - nj*Y2Yc.yy) %*% sigma.w.inv ) Y1a.res <- Y1.wb.res - Y2w.res[cluster.idx, , drop = FALSE] Y1a.res.i <- Y1a.res %*% sigma.w.inv idx1 <- lav_matrix_vech_col_idx(nrow(sigma.w)) idx2 <- lav_matrix_vech_row_idx(nrow(sigma.w)) SW2 <- matrix(lav_matrix_vech(sigma.w.inv), nrow = nclusters, length(lav_matrix_vech(sigma.w.inv)), byrow = TRUE ) SW2 <- SW2 * (cluster.size - 1) TMP <- Y1a.res.i[, idx1, drop = FALSE] * Y1a.res.i[, idx2, drop = FALSE] TMP2 <- rowsum.default(TMP, cluster.idx, reorder = FALSE, na.rm = TRUE) G.sigma.w2 <- 2 * (SW2 - TMP2) diagh.idx <- lav_matrix_diagh_idx(nrow(sigma.w)) G.sigma.w2[, diagh.idx] <- G.sigma.w2[, diagh.idx, drop = FALSE] / 2 G.sigma.w <- G.sigma.w1 + G.sigma.w2 # rearrange columns to Res.Int.W, Res.Pi.W, Res.Sigma.W, # Res.Int.B, Res.Pi.B, Res.Sigma.B # ov.idx per level ov.idx <- Lp$ov.idx # 'tilde' matrices: ALL variables within and between p.tilde <- length(unique(c(ov.idx[[1]], ov.idx[[2]]))) p.tilde.star <- p.tilde * (p.tilde + 1) / 2 B.tilde <- lav_matrix_vech_reverse(seq_len(p.tilde.star)) # only 'y' ov.y.idx <- Lp$ov.y.idx # two levels only (for now) ov.y.idx1 <- ov.y.idx[[1]] ov.y.idx2 <- ov.y.idx[[2]] # WITHIN (is easy) BETA.W.idx <- matrix(seq_len(length(beta.w)), nrow(beta.w), ncol(beta.w)) BETA.B.idx <- matrix(seq_len(length(beta.b)), nrow(beta.b), ncol(beta.b)) Res.Int.W <- G.beta.w[, BETA.W.idx[1L, ], drop = FALSE] Res.Pi.W <- G.beta.w[, lav_matrix_vecr(BETA.W.idx[-1L, ]), drop = FALSE] Res.Sigma.W <- G.sigma.w # Sigma.B Sigma.B.tilde <- matrix(0, nclusters, p.tilde.star) col.idx <- lav_matrix_vech(B.tilde[ov.y.idx1, ov.y.idx1, drop = FALSE]) Sigma.B.tilde[, col.idx] <- G.sigma.b # Int.B BETA.B.tilde <- matrix(seq_len(nrow(beta.b) * p.tilde), nrow(beta.b), p.tilde) Int.B <- matrix(0, nclusters, p.tilde) Int.B[, ov.y.idx1] <- G.beta.b[, BETA.B.idx[1L, ]] # Pi.B Pi.B <- matrix(0, nclusters, p.tilde * (nrow(beta.b) - 1L)) col.idx <- lav_matrix_vecr(BETA.B.tilde[-1L, ov.y.idx1, drop = FALSE]) Pi.B[, col.idx] <- G.beta.b[, lav_matrix_vecr(BETA.B.idx[-1L, ]), drop = FALSE] if (length(between.y.idx) > 0L) { # Sigma.B: add yz/zz parts col.idx <- lav_matrix_vec(B.tilde[ov.y.idx1, between.y.idx, drop = FALSE]) Sigma.B.tilde[, col.idx] <- G.sigma.yz col.idx <- lav_matrix_vech(B.tilde[between.y.idx, between.y.idx, drop = FALSE ]) Sigma.B.tilde[, col.idx] <- G.sigma.zz # Int.B: add z-part BETA.Z.idx <- matrix(seq_len(length(beta.z)), nrow(beta.z), ncol(beta.z)) Int.B[, between.y.idx] <- G.beta.z[, BETA.Z.idx[1L, ], drop = FALSE] # Pi.B: add beta.z col.idx <- lav_matrix_vecr(BETA.B.tilde[-1L, between.y.idx, drop = FALSE]) Pi.B[, col.idx] <- G.beta.z[, lav_matrix_vecr(BETA.Z.idx[-1L, ]), drop = FALSE] } # only extract ov.y.idx2 for BETWEEN col.idx <- lav_matrix_vech(B.tilde[ov.y.idx2, ov.y.idx2, drop = FALSE]) Res.Sigma.B <- Sigma.B.tilde[, col.idx, drop = FALSE] Res.Int.B <- Int.B[, ov.y.idx2, drop = FALSE] col.idx <- lav_matrix_vecr(BETA.B.tilde[-1, ov.y.idx2]) Res.Pi.B <- Pi.B[, col.idx, drop = FALSE] SCORES <- cbind( Res.Int.W, Res.Pi.W, Res.Sigma.W, Res.Int.B, Res.Pi.B, Res.Sigma.B ) SCORES } # first-order information: outer crossprod of scores per cluster lav_mvreg_cluster_information_firstorder <- function(Y1 = NULL, YLp = NULL, Lp = NULL, Res.Sigma.W = NULL, Res.Int.W = NULL, Res.Pi.W = NULL, Res.Sigma.B = NULL, Res.Int.B = NULL, Res.Pi.B = NULL, divide.by.two = FALSE, Sinv.method = "eigen") { N <- NROW(Y1) SCORES <- lav_mvreg_cluster_scores_2l( Y1 = Y1, YLp = YLp, Lp = Lp, Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B, Sinv.method = Sinv.method ) # divide by 2 (if we want scores wrt objective function) if (divide.by.two) { SCORES <- SCORES / 2 } # unit information information <- crossprod(SCORES) / Lp$nclusters[[2]] information } lavaan/R/lav_efa_utils.R0000644000176200001440000000231014627656441014670 0ustar liggesusers# utility function related to EFA # generate 'efa' syntax for a single block of factors lav_syntax_efa <- function(ov.names = NULL, nfactors = 1L, twolevel = FALSE) { if (twolevel) { tmp <- lav_syntax_efa(ov.names = ov.names, nfactors = nfactors) model <- c("level: 1", tmp, "level: 2", tmp) } else { model <- character(nfactors) for (f in seq_len(nfactors)) { txt <- paste('efa("efa")*f', f, " =~ ", paste(ov.names, collapse = " + "), sep = "" ) model[f] <- txt } } model } # extract *standardized* loadings from efaList lav_efa_get_loadings <- function(object, ...) { # kill object$loadings if present object[["loadings"]] <- NULL out <- lapply(object, function(x) { STD <- lavTech(x, "std", add.class = TRUE, add.labels = TRUE, list.by.group = FALSE ) lambda.idx <- which(names(STD) == "lambda") LAMBDA <- STD[lambda.idx] names(LAMBDA) <- NULL # if only single block, drop list if (length(LAMBDA) == 1L) { LAMBDA <- LAMBDA[[1]] } else { names(LAMBDA) <- x@Data@block.label } LAMBDA }) # drop list if only a single model if (length(out) == 1L) { out <- out[[1]] } out } lavaan/R/lav_partable_cache.R0000644000176200001440000000126214627656441015637 0ustar liggesusers# store pta in attributes of partable lav_partable_set_cache <- function(partable, pta = NULL, force = FALSE) { if (!force && !is.null(attr(partable, "vnames")) && !is.null(attr(partable, "nvar"))) { return(partable) # cache already OK } if (is.null(pta)) { if (force) attr(partable, "vnames") <- NULL pta <- lav_partable_attributes(partable) } for (n in names(pta)) { attr(partable, n) <- pta[[n]] } partable } lav_partable_remove_cache <- function(partable) { attributelist <- names(attributes(partable)) for (n in attributelist) { if (n != "ovda" && n != "names") attr(partable, n) <- NULL } partable } lavaan/R/ctr_modelcov.R0000644000176200001440000000136614627656440014544 0ustar liggesusers# takes a model in lavaan syntax and the user's data and returns the covariance # matrix of observed variables. Useful so that the user can do things like # diagnose errors in the cov matrix, use cov2cor to look at the correlation # matrix, try and invert the sample covariance matrix, etc. # update 5/27/2011 JEB # changelog: using sem and inspect to get output. # This way, all arguments such as groups, etc, can be used # update 3 june 2011 YR: removed se="none" (since now implied by do.fit=FALSE) # update 13 dec 2011 YR: changed name (to avoid confusion with the # model-implied cov) inspectSampleCov <- function(model, data, ...) { fit <- sem(model, data = data, ..., do.fit = FALSE) inspect(fit, "sampstat") } lavaan/R/lav_model_wls.R0000644000176200001440000000544414627656441014715 0ustar liggesusers# compute WLS.est (as a list per group) lav_model_wls_est <- function(lavmodel = NULL, GLIST = NULL, lavimplied = NULL) { nblocks <- lavmodel@nblocks meanstructure <- lavmodel@meanstructure if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } else { correlation <- FALSE } categorical <- lavmodel@categorical group.w.free <- lavmodel@group.w.free num.idx <- lavmodel@num.idx # model-implied statistics if (is.null(lavimplied)) { lavimplied <- lav_model_implied(lavmodel, GLIST = GLIST) } WLS.est <- vector("list", length = nblocks) for (g in 1:nblocks) { if (categorical) { # order of elements is important here: # 1. thresholds + means (interleaved) # 2. slopes (if any, columnwise per exo) # 3. variances (if any) # 4. correlations (no diagonal!) if (lavmodel@conditional.x) { wls.est <- c( lavimplied$res.th[[g]], lav_matrix_vec(lavimplied$res.slopes[[g]]), diag(lavimplied$res.cov[[g]])[num.idx[[g]]], lav_matrix_vech(lavimplied$res.cov[[g]], diagonal = FALSE ) ) } else { wls.est <- c( lavimplied$th[[g]], diag(lavimplied$cov[[g]])[num.idx[[g]]], lav_matrix_vech(lavimplied$cov[[g]], diagonal = FALSE ) ) } } else { # CONTINUOUS DIAG <- TRUE if (correlation) { DIAG <- FALSE } if (lavmodel@conditional.x && lavmodel@nexo[g] > 0L) { # order = vec(Beta), where first row are intercepts # cbind(res.int, res.slopes) is t(Beta) # so we need vecr if (meanstructure) { wls.est <- c( lav_matrix_vecr( cbind( lavimplied$res.int[[g]], lavimplied$res.slopes[[g]] ) ), lav_matrix_vech(lavimplied$res.cov[[g]], diagonal = DIAG ) ) } else { wls.est <- c( lav_matrix_vecr(lavimplied$res.slopes[[g]]), lav_matrix_vech(lavimplied$res.cov[[g]], diagonal = DIAG ) ) } } else { if (meanstructure) { wls.est <- c( lavimplied$mean[[g]], lav_matrix_vech(lavimplied$cov[[g]], diagonal = DIAG ) ) } else { wls.est <- lav_matrix_vech(lavimplied$cov[[g]], diagonal = DIAG ) } } # conditional.x = FALSE } # categorical = FALSE if (group.w.free) { wls.est <- c(lavimplied$group.w[[g]], wls.est) } WLS.est[[g]] <- wls.est } WLS.est } # Note: lav_model_wls_v() is replaced by lav_model_h1_information() in 0.6-1 lavaan/R/lav_model_compute.R0000644000176200001440000005755314627656441015574 0ustar liggesuserscomputeSigmaHat <- function(lavmodel = NULL, GLIST = NULL, extra = FALSE, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat nvar <- lavmodel@nvar nblocks <- lavmodel@nblocks representation <- lavmodel@representation # return a list Sigma.hat <- vector("list", length = nblocks) for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { Sigma.hat[[g]] <- computeSigmaHat.LISREL( MLIST = MLIST, delta = delta ) } else if (representation == "RAM") { Sigma.hat[[g]] <- lav_ram_sigmahat(MLIST = MLIST, delta = delta) } else { lav_msg_stop(gettext( "only LISREL and RAM representation has been implemented for now")) } if (lav_debug()) print(Sigma.hat[[g]]) if (extra) { # check if matrix is positive definite ev <- eigen(Sigma.hat[[g]], symmetric = TRUE, only.values = TRUE)$values if (any(ev < sqrt(.Machine$double.eps)) || sum(ev) == 0) { Sigma.hat.inv <- MASS::ginv(Sigma.hat[[g]]) Sigma.hat.log.det <- log(.Machine$double.eps) attr(Sigma.hat[[g]], "po") <- FALSE attr(Sigma.hat[[g]], "inv") <- Sigma.hat.inv attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } else { ## since we already do an 'eigen' decomposition, we should ## 'reuse' that information, instead of doing a new cholesky? # EV <- eigen(Sigma.hat[[g]], symmetric = TRUE) # Sigma.hat.inv <- tcrossprod(EV$vectors / rep(EV$values, # each = length(EV$values)), EV$vectors) # Sigma.hat.log.det <- sum(log(EV$values)) ## --> No, chol() is much (x2) times faster Sigma.hat.inv <- inv.chol(Sigma.hat[[g]], logdet = TRUE) Sigma.hat.log.det <- attr(Sigma.hat.inv, "logdet") attr(Sigma.hat[[g]], "po") <- TRUE attr(Sigma.hat[[g]], "inv") <- Sigma.hat.inv attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } } } # nblocks Sigma.hat } ## only if conditional.x = TRUE ## compute the (larger) unconditional 'joint' covariance matrix (y,x) ## ## Sigma (Joint ) = [ (S11, S12), ## (S21, S22) ] where ## S11 = Sigma.res + PI %*% cov.x %*% t(PI) ## S12 = PI %*% cov.x ## S21 = cov.x %*% t(PI) ## S22 = cov.x computeSigmaHatJoint <- function(lavmodel = NULL, GLIST = NULL, extra = FALSE, delta = TRUE) { stopifnot(lavmodel@conditional.x) # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat nvar <- lavmodel@nvar nblocks <- lavmodel@nblocks representation <- lavmodel@representation # return a list Sigma.hat <- vector("list", length = nblocks) for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { res.Sigma <- computeSigmaHat.LISREL(MLIST = MLIST, delta = delta) res.int <- computeMuHat.LISREL(MLIST = MLIST) res.slopes <- computePI.LISREL(MLIST = MLIST) S.xx <- MLIST$cov.x S.yy <- res.Sigma + res.slopes %*% S.xx %*% t(res.slopes) S.yx <- res.slopes %*% S.xx S.xy <- S.xx %*% t(res.slopes) Sigma.hat[[g]] <- rbind(cbind(S.yy, S.yx), cbind(S.xy, S.xx)) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } if (lav_debug()) print(Sigma.hat[[g]]) if (extra) { # check if matrix is positive definite ev <- eigen(Sigma.hat[[g]], symmetric = TRUE, only.values = TRUE)$values if (any(ev < .Machine$double.eps) || sum(ev) == 0) { Sigma.hat.inv <- MASS::ginv(Sigma.hat[[g]]) Sigma.hat.log.det <- log(.Machine$double.eps) attr(Sigma.hat[[g]], "po") <- FALSE attr(Sigma.hat[[g]], "inv") <- Sigma.hat.inv attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } else { ## FIXME ## since we already do an 'eigen' decomposition, we should ## 'reuse' that information, instead of doing a new cholesky Sigma.hat.inv <- inv.chol(Sigma.hat[[g]], logdet = TRUE) Sigma.hat.log.det <- attr(Sigma.hat.inv, "logdet") attr(Sigma.hat[[g]], "po") <- TRUE attr(Sigma.hat[[g]], "inv") <- Sigma.hat.inv attr(Sigma.hat[[g]], "log.det") <- Sigma.hat.log.det } } } # nblocks Sigma.hat } computeMuHat <- function(lavmodel = NULL, GLIST = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat nblocks <- lavmodel@nblocks representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure # return a list Mu.hat <- vector("list", length = nblocks) for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (!meanstructure) { Mu.hat[[g]] <- numeric(lavmodel@nvar[g]) } else if (representation == "LISREL") { Mu.hat[[g]] <- computeMuHat.LISREL(MLIST = MLIST) } else if (representation == "RAM") { Mu.hat[[g]] <- lav_ram_muhat(MLIST = MLIST) } else { lav_msg_stop(gettext( "only RAM and LISREL representation has been implemented for now")) } } # nblocks Mu.hat } ## only if conditional.x = TRUE ## compute the (larger) unconditional 'joint' mean vector (y,x) ## ## Mu (Joint ) = [ Mu.y, Mu.x ] where ## Mu.y = res.int + PI %*% M.x ## Mu.x = M.x computeMuHatJoint <- function(lavmodel = NULL, GLIST = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nmat <- lavmodel@nmat nblocks <- lavmodel@nblocks representation <- lavmodel@representation meanstructure <- lavmodel@meanstructure # return a list Mu.hat <- vector("list", length = nblocks) for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] if (!meanstructure) { Mu.hat[[g]] <- numeric(lavmodel@nvar[g]) } else if (representation == "LISREL") { MLIST <- GLIST[mm.in.group] res.int <- computeMuHat.LISREL(MLIST = MLIST) res.slopes <- computePI.LISREL(MLIST = MLIST) M.x <- MLIST$mean.x Mu.y <- res.int + res.slopes %*% M.x Mu.x <- M.x Mu.hat[[g]] <- c(Mu.y, Mu.x) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } } # nblocks Mu.hat } # TH.star = DELTA.star * (th.star - pi0.star) # see Muthen 1984 eq 11 computeTH <- function(lavmodel = NULL, GLIST = NULL, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation th.idx <- lavmodel@th.idx # return a list TH <- vector("list", length = nblocks) # compute TH for each group for (g in 1:nblocks) { if (length(th.idx[[g]]) == 0) { TH[[g]] <- numeric(0L) next } # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] if (representation == "LISREL") { TH[[g]] <- computeTH.LISREL( MLIST = GLIST[mm.in.group], th.idx = th.idx[[g]], delta = delta ) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } } TH } # PI = slope structure # see Muthen 1984 eq 12 computePI <- function(lavmodel = NULL, GLIST = NULL, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation conditional.x <- lavmodel@conditional.x # return a list PI <- vector("list", length = nblocks) # compute TH for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (!conditional.x) { PI.g <- numeric(lavmodel@nvar[g]) } else if (representation == "LISREL") { PI.g <- computePI.LISREL(MLIST = MLIST, delta = delta) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } PI[[g]] <- PI.g } PI } # GW = group weight computeGW <- function(lavmodel = NULL, GLIST = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation group.w.free <- lavmodel@group.w.free # return a list GW <- vector("list", length = nblocks) # compute GW for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (!group.w.free) { GW.g <- 0.0 # FIXME } else if (representation == "LISREL") { GW.g <- as.numeric(MLIST$gw[1, 1]) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } GW[[g]] <- GW.g } # transform to proportions # gw <- unlist(GW) # gw <- exp(gw) / sum(exp(gw)) # for(g in 1:nblocks) { # GW[[g]] <- gw[g] # } GW } # *unconditional* variance/covariance matrix of Y # - same as Sigma.hat if all Y are continuous) # - if also Gamma, cov.x is used (only if categorical) computeVY <- function(lavmodel = NULL, GLIST = NULL, diagonal.only = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list VY <- vector("list", length = nblocks) # compute TH for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { VY.g <- computeVY.LISREL(MLIST = MLIST) } else if (representation == "RAM") { # does not work for categorical setting yet stopifnot(!lavmodel@categorical) # does not work if conditional.x = TRUE stopifnot(!lavmodel@conditional.x) VY.g <- lav_ram_sigmahat(MLIST = MLIST) } else { lav_msg_stop(gettext( "only RAM and LISREL representation has been implemented for now")) } if (diagonal.only) { VY[[g]] <- diag(VY.g) } else { VY[[g]] <- VY.g } } VY } # V(ETA): latent variances variances/covariances computeVETA <- function(lavmodel = NULL, GLIST = NULL, remove.dummy.lv = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list VETA <- vector("list", length = nblocks) # compute VETA for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { VETA.g <- computeVETA.LISREL(MLIST = MLIST) if (remove.dummy.lv) { # remove all dummy latent variables lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[g]], lavmodel@ov.x.dummy.lv.idx[[g]] ) if (!is.null(lv.idx)) { VETA.g <- VETA.g[-lv.idx, -lv.idx, drop = FALSE] } } } else if (representation == "RAM") { VETA.g <- lav_ram_veta(MLIST = MLIST) } else { lav_msg_stop(gettext( "only LISREL and RAM representation has been implemented for now")) } VETA[[g]] <- VETA.g } VETA } # V(ETA|x_i): latent variances variances/covariances, conditional on x_ # - this is always (I-B)^-1 PSI (I-B)^-T, after REMOVING lv dummies computeVETAx <- function(lavmodel = NULL, GLIST = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list ETA <- vector("list", length = nblocks) # compute ETA for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[g]], lavmodel@ov.x.dummy.lv.idx[[g]] ) ETA.g <- computeVETAx.LISREL( MLIST = MLIST, lv.dummy.idx = lv.idx ) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } ETA[[g]] <- ETA.g } ETA } # COV: observed+latent variances variances/covariances computeCOV <- function(lavmodel = NULL, GLIST = NULL, remove.dummy.lv = FALSE, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list COV <- vector("list", length = nblocks) # compute COV for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { COV.g <- computeCOV.LISREL(MLIST = MLIST, delta = delta) if (remove.dummy.lv) { # remove all dummy latent variables lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[g]], lavmodel@ov.x.dummy.lv.idx[[g]] ) if (!is.null(lv.idx)) { # offset for ov lambda.names <- lavmodel@dimNames[[which(names(GLIST) == "lambda")[g]]][[1L]] lv.idx <- lv.idx + length(lambda.names) COV.g <- COV.g[-lv.idx, -lv.idx, drop = FALSE] } } } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } COV[[g]] <- COV.g } COV } # E(ETA): expectation (means) of latent variables (return vector) computeEETA <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, remove.dummy.lv = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list EETA <- vector("list", length = nblocks) # compute E(ETA) for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { EETA.g <- computeEETA.LISREL(MLIST, mean.x = lavsamplestats@mean.x[[g]], sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]] ) if (remove.dummy.lv) { # remove dummy lv.dummy.idx <- c( lavmodel@ov.y.dummy.lv.idx[[g]], lavmodel@ov.x.dummy.lv.idx[[g]] ) if (length(lv.dummy.idx) > 0L) { EETA.g <- EETA.g[-lv.dummy.idx] } } } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } EETA[[g]] <- EETA.g } EETA } # E(ETA|x_i): conditional expectation (means) of latent variables # for a given value of x_i (instead of E(x_i)) computeEETAx <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, eXo = NULL, nobs = NULL, remove.dummy.lv = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list EETAx <- vector("list", length = nblocks) # compute E(ETA) for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] EXO <- eXo[[g]] if (is.null(EXO)) { # create empty matrix EXO <- matrix(0, nobs[[g]], 0L) } if (representation == "LISREL") { EETAx.g <- computeEETAx.LISREL(MLIST, eXo = EXO, N = nobs[[g]], sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]] ) if (remove.dummy.lv) { # remove dummy lv.dummy.idx <- c( lavmodel@ov.y.dummy.lv.idx[[g]], lavmodel@ov.x.dummy.lv.idx[[g]] ) if (length(lv.dummy.idx) > 0L) { EETAx.g <- EETAx.g[, -lv.dummy.idx, drop = FALSE] } } } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } EETAx[[g]] <- EETAx.g } EETAx } # return 'regular' LAMBDA computeLAMBDA <- function(lavmodel = NULL, GLIST = NULL, handle.dummy.lv = TRUE, remove.dummy.lv = FALSE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list LAMBDA <- vector("list", length = nblocks) # compute LAMBDA for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { if (handle.dummy.lv) { ov.y.dummy.ov.idx <- lavmodel@ov.y.dummy.ov.idx[[g]] ov.x.dummy.ov.idx <- lavmodel@ov.x.dummy.ov.idx[[g]] ov.y.dummy.lv.idx <- lavmodel@ov.y.dummy.lv.idx[[g]] ov.x.dummy.lv.idx <- lavmodel@ov.x.dummy.lv.idx[[g]] } else { ov.y.dummy.ov.idx <- NULL ov.x.dummy.ov.idx <- NULL ov.y.dummy.lv.idx <- NULL ov.x.dummy.lv.idx <- NULL } LAMBDA.g <- computeLAMBDA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx, remove.dummy.lv = remove.dummy.lv ) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } LAMBDA[[g]] <- LAMBDA.g } LAMBDA } # THETA: observed (residual) variances computeTHETA <- function(lavmodel = NULL, GLIST = NULL, fix = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list THETA <- vector("list", length = nblocks) # compute THETA for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { if (fix) { THETA.g <- computeTHETA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]] ) } else { THETA.g <- computeTHETA.LISREL(MLIST = MLIST) } } else if (representation == "RAM") { ov.idx <- as.integer(MLIST$ov.idx[1, ]) THETA.g <- MLIST$S[ov.idx, ov.idx, drop = FALSE] } else { lav_msg_stop(gettext( "only LISREL and RAM representation has been implemented for now")) } THETA[[g]] <- THETA.g } THETA } # NU: observed intercepts computeNU <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list NU <- vector("list", length = nblocks) # compute NU for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { NU.g <- computeNU.LISREL( MLIST = MLIST, sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]] ) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } NU[[g]] <- as.matrix(NU.g) } NU } # E(Y): expectation (mean) of observed variables # returns vector 1 x nvar computeEY <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST nblocks <- lavmodel@nblocks nmat <- lavmodel@nmat representation <- lavmodel@representation # return a list EY <- vector("list", length = nblocks) # compute E(Y) for each group for (g in 1:nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0, nmat))[g] MLIST <- GLIST[mm.in.group] if (representation == "LISREL") { EY.g <- computeEY.LISREL( MLIST = MLIST, mean.x = lavsamplestats@mean.x[[g]], sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], delta = delta ) } else { lav_msg_stop(gettext( "only representation LISREL has been implemented for now")) } EY[[g]] <- EY.g } EY } # E(Y | ETA, x_i): conditional expectation (means) of observed variables # for a given value of x_i AND eta_i computeYHAT <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, eXo = NULL, nobs = NULL, ETA = NULL, duplicate = FALSE, delta = TRUE) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST # ngroups, not nblocks! ngroups <- lavsamplestats@ngroups # return a list YHAT <- vector("list", length = ngroups) # compute YHAT for each group for (g in seq_len(ngroups)) { # which mm belong to group g? # FIXME: what if more than g blocks??? mm.in.group <- 1:lavmodel@nmat[g] + cumsum(c(0L, lavmodel@nmat))[g] MLIST <- GLIST[mm.in.group] if (is.null(eXo[[g]]) && duplicate) { Nobs <- nobs[[g]] } else { Nobs <- 1L } if (lavmodel@representation == "LISREL") { if (lavmodel@conditional.x) { YHAT[[g]] <- computeEYetax.LISREL( MLIST = MLIST, eXo = eXo[[g]], ETA = ETA[[g]], N = Nobs, sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], delta = delta ) } else { # unconditional case YHAT[[g]] <- computeEYetax3.LISREL( MLIST = MLIST, ETA = ETA[[g]], sample.mean = lavsamplestats@mean[[g]], mean.x = lavsamplestats@mean.x[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], delta = delta ) # impute back ov.y values that are NOT indicators } } else { lav_msg_stop(gettextf("representation %s not supported yet.", lavmodel@representation)) } } YHAT } lavaan/R/lav_lavaan_step03_data.R0000644000176200001440000001112414627656441016351 0ustar liggesuserslav_lavaan_step03_data <- function(slotData = NULL, # nolint lavoptions = NULL, ov.names = NULL, ov.names.y = NULL, group = NULL, data = NULL, cluster = NULL, ov.names.x = NULL, ov.names.l = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, slotParTable = NULL, # nolint ngroups = NULL, dotdotdot = NULL, flat.model = NULL, model = NULL, NACOV = NULL, # nolint WLS.V = NULL) { # nolint # # # # # # # # # # # # # 3. lavdata # # # # # # # # # # # # # # if slotData not null # copy slotData to lavdata # else # create lavdata via function lavData, setting ov.names to ov.names.y # if lavoptions$conditional.x # if lavdata$data.type is "none" # set lavoptions$do.fit to FALSE # if flat.model$est not null set lavoptions$start to "est", else set # it to "simple" # set lavoptions$se and lavoptions$test to "none" # else # if lavdata$data.type is "moment" # if estimator one of MLM, MLMV, MLR, ULSM, ULSMV, ULSMVS and NACOV # is NULL: *** error *** # if estimator one of WLS, WLSM, WLSMV, WLSMVS, DWLS and WLS.V is # NULL: *** error *** # if lavoptions$se = bootstrap: *** error *** # if slotPartable not NULL and model is lavaan-object, check equality # ngroups and lavdata$ngroups # --> *** error *** if not if (!is.null(slotData)) { lavdata <- slotData } else { if (lav_verbose()) { cat("lavdata ...") } # FIXME: ov.names should always contain both y and x! tmp.ov.names <- if (lavoptions$conditional.x) { ov.names.y } else { ov.names } lavdata <- lavData( data = data, group = group, cluster = cluster, ov.names = tmp.ov.names, ov.names.x = ov.names.x, ov.names.l = ov.names.l, ordered = ordered, sampling.weights = sampling.weights, sample.cov = sample.cov, sample.mean = sample.mean, sample.th = sample.th, sample.nobs = sample.nobs, lavoptions = lavoptions ) if (lav_verbose()) { cat(" done.\n") } } # what have we learned from the data? if (lavdata@data.type == "none") { lavoptions$do.fit <- FALSE # check if 'model' was a fitted parameter table lavoptions$start <- ifelse(is.null(flat.model$est), "simple", "est") lavoptions$se <- "none" lavoptions$test <- "none" } else if (lavdata@data.type == "moment") { # check user-specified options first if (!is.null(dotdotdot$estimator)) { if (any(dotdotdot$estimator == c( "MLM", "MLMV", "MLR", "MLR", "ULSM", "ULSMV", "ULSMVS" )) && is.null(NACOV)) { lav_msg_stop(gettextf( "estimator %s requires full data or user-provided NACOV", dotdotdot$estimator)) } else if (any(dotdotdot$estimator == c( "WLS", "WLSM", "WLSMV", "WLSMVS", "DWLS" )) && is.null(WLS.V)) { lav_msg_stop(gettextf( "estimator %s requires full data or user-provided WLS.V and NACOV", dotdotdot$estimator)) } } # catch here some options that will not work with moments if (lavoptions$se == "bootstrap") { lav_msg_stop(gettext("bootstrapping requires full data")) } # more needed? } # sanity check if (!is.null(slotParTable) || inherits(model, "lavaan")) { if (ngroups != lavdata@ngroups) { lav_msg_stop(gettext( "mismatch between number of groups in data and number of groups in model.")) } } if (lav_verbose()) { print(lavdata) } if (lav_debug()) { print(str(lavdata)) } # if lavdata@nlevels > 1L, adapt start option (for now) # until we figure out how to handle groups+blocks # if(lavdata@nlevels > 1L) { # lavoptions$start <- "simple" # } list( lavdata = lavdata, lavoptions = lavoptions ) } lavaan/R/lav_lavaanList_simulate.R0000644000176200001440000000543414627656441016730 0ustar liggesusers# lavSimulate: fit the *same* model, on simulated datasets # YR - 4 July 2016 lavSimulate <- function(pop.model = NULL, # population model model = NULL, # user model dataFunction = simulateData, dataFunction.args = list( model = pop.model, sample.nobs = 1000L ), ndat = 1000L, cmd = "sem", cmd.pop = "sem", ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL) { # dotdotdot dotdotdot <- list(...) # dotdotdot for fit.pop dotdotdot.pop <- dotdotdot dotdotdot.pop$verbose <- FALSE dotdotdot.pop$debug <- FALSE dotdotdot.pop$data <- NULL dotdotdot.pop$sample.cov <- NULL # 'fit' population model without data, to get 'true' parameters fit.pop <- do.call(cmd.pop, args = c(list(model = pop.model), dotdotdot.pop) ) # check model object if (is.null(model)) { model <- fit.pop@ParTable } # per default, use 'true' values as starting values if (is.null(dotdotdot$start)) { dotdotdot$start <- fit.pop } # no warnings during/after the simulations # add 'warn = FALSE' to args # generate simulations fit <- do.call("lavaanList", args = c(list( model = model, dataFunction = dataFunction, dataFunction.args = dataFunction.args, ndat = ndat, cmd = cmd, store.slots = store.slots, FUN = FUN, show.progress = show.progress, store.failed = store.failed, parallel = parallel, ncpus = ncpus, cl = cl ), dotdotdot)) # flag this is a simulation fit@meta$lavSimulate <- TRUE # NOTE!!! # if the model != pop.model, we may need to 'reorder' the # 'true' parameters, so they correspond to the 'model' parameters p2.id <- lav_partable_map_id_p1_in_p2( p1 = fit@ParTable, p2 = fit.pop@ParTable, stopifnotfound = FALSE ) est1 <- fit@ParTable$est na.idx <- which(is.na(p2.id)) if (length(na.idx) > 0L) { lav_msg_warn(gettext( "some estimated parameters were not mentioned in the population model; partable user model idx = ", lav_msg_view(na.idx, "none"))) # replace NA by '1' (override later!) p2.id[na.idx] <- 1L } est.pop <- fit.pop@ParTable$est[p2.id] # by default, the 'unknown' population values are set to 0.0 if (length(na.idx) > 0L) { est.pop[na.idx] <- 0 } # store 'true' parameters in meta$est.true fit@meta$est.true <- est.pop fit } lavaan/R/lav_test_satorra_bentler.R0000644000176200001440000006034214627656441017153 0ustar liggesusers# - 0.6-13: fix multiple-group UG^2 bug (reported by Gronneberg, Foldnes and # Moss) when Satterthwaite = TRUE, ngroups > 1, and eq constraints. # Use ug2.old.approach = TRUE to get the old result lav_test_satorra_bentler <- function(lavobject = NULL, lavsamplestats = NULL, lavmodel = NULL, lavimplied = NULL, lavoptions = NULL, lavdata = NULL, TEST.unscaled = NULL, E.inv = NULL, Delta = NULL, WLS.V = NULL, Gamma = NULL, test = "satorra.bentler", mimic = "lavaan", method = "original", ug2.old.approach = FALSE, return.u = FALSE, return.ugamma = FALSE) { TEST <- list() if (!is.null(lavobject)) { lavsamplestats <- lavobject@SampleStats lavmodel <- lavobject@Model lavoptions <- lavobject@Options lavimplied <- lavobject@implied lavdata <- lavobject@Data TEST$standard <- lavobject@test[[1]] } else { TEST$standard <- TEST.unscaled } npar <- lavmodel@nx.free # ug2.old.approach if (missing(ug2.old.approach)) { if (!is.null(lavoptions$ug2.old.approach)) { ug2.old.approach <- lavoptions$ug2.old.approach } else { ug2.old.approach <- FALSE } } # E.inv ok? if (length(lavoptions$information) == 1L && length(lavoptions$h1.information) == 1L && length(lavoptions$observed.information) == 1L) { E.inv.recompute <- FALSE } else if ( (lavoptions$information[1] == lavoptions$information[2]) && (lavoptions$h1.information[1] == lavoptions$h1.information[2]) && (lavoptions$information[2] == "expected" || (lavoptions$observed.information[1] == lavoptions$observed.information[2]))) { E.inv.recompute <- FALSE } else { E.inv.recompute <- TRUE # change information options lavoptions$information[1] <- lavoptions$information[2] lavoptions$h1.information[1] <- lavoptions$h1.information[2] lavoptions$observed.information[1] <- lavoptions$observed.information[2] } if (!is.null(E.inv) && !is.null(WLS.V) && !is.null(Delta)) { E.inv.recompute <- FALSE # user-provided } # check test if (!all(test %in% c( "satorra.bentler", "scaled.shifted", "mean.var.adjusted" ))) { lav_msg_warn(gettext( "test must be one of `satorra.bentler', `scaled.shifted' or `mean.var.adjusted'; will use `satorra.bentler' only")) test <- "satorra.bentler" } if (return.u) { method <- "original" } # check method if (!all(method %in% c("original", "orthogonal.complement", "ABA"))) { lav_msg_warn(gettext("method must be one of `original', `ABA', `orthogonal.complement'; will use `ABA'")) method <- "original" } # do we have E.inv, Delta, WLS.V? if (npar > 0L && (is.null(E.inv) || is.null(Delta) || is.null(WLS.V) || E.inv.recompute)) { if (mimic == "Mplus" && lavoptions$estimator == "ML") { E <- lav_model_information_expected_MLM( lavmodel = lavmodel, augmented = FALSE, inverted = FALSE, lavsamplestats = lavsamplestats, extra = TRUE ) } else { E <- lav_model_information( lavmodel = lavmodel, lavimplied = lavimplied, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, extra = TRUE ) } E.inv <- try(lav_model_information_augment_invert(lavmodel, information = E, inverted = TRUE ), silent = TRUE) if (inherits(E.inv, "try-error")) { if (return.ugamma) { lav_msg_warn(gettext( "could not invert information matrix needed for UGamma")) return(NULL) } else if (return.u) { lav_msg_warn(gettext( "could not invert information matrix needed for UfromUGamma")) return(NULL) } else { TEST$standard$stat <- as.numeric(NA) TEST$standard$stat.group <- rep(as.numeric(NA), lavdata@ngroups) TEST$standard$pvalue <- as.numeric(NA) TEST[[test[1]]] <- c(TEST$standard, scaling.factor = as.numeric(NA), shift.parameter = as.numeric(NA), label = character(0) ) lav_msg_warn(gettext("could not invert information matrix needed for robust test statistic")) TEST[[test[1]]]$test <- test[1] # to prevent lavTestLRT error when robust test is detected for some but not all models return(TEST) } } Delta <- attr(E, "Delta") WLS.V <- attr(E, "WLS.V") } # catch df == 0 if ((TEST$standard$df == 0L || TEST$standard$df < 0) && !return.u && !return.ugamma) { TEST[[test[1]]] <- c(TEST$standard, scaling.factor = as.numeric(NA), label = character(0) ) TEST[[test[1]]]$test <- test[1] # to prevent lavTestLRT error when robust test is detected for some but not all models return(TEST) } # Gamma if (is.null(Gamma)) { Gamma <- lavsamplestats@NACOV # still NULL? (perhaps estimator = ML) if (is.null(Gamma[[1]])) { if (!is.null(lavobject)) { Gamma <- lav_object_gamma(lavobject, model.based = FALSE) } else { Gamma <- lav_object_gamma( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = NULL, lavimplied = NULL, ADF = TRUE, model.based = FALSE ) } } } if (mimic == "Mplus" && lavmodel@categorical) { for (g in 1:lavsamplestats@ngroups) { Ng <- lavsamplestats@nobs[[g]] Gamma[[g]] <- Gamma[[g]] / Ng * (Ng - 1L) } } # ngroups ngroups <- lavsamplestats@ngroups # mean and variance adjusted? Satterthwaite <- FALSE if (any(test %in% c("mean.var.adjusted", "scaled.shifted"))) { Satterthwaite <- TRUE } if (npar == 0) { # catch npar == 0 (eg baseline model if correlation structure) trace.UGamma <- trace.UGamma2 <- U.all <- UG <- as.numeric(NA) fg <- unlist(lavsamplestats@nobs) / lavsamplestats@ntotal Gamma.f <- Gamma for (g in 1:ngroups) { Gamma.f[[g]] <- 1 / fg[g] * Gamma[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) UG <- Gamma.all trace.UGamma <- sum(diag(Gamma.all)) trace.UGamma2 <- sum(UG * t(UG)) out <- list( trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, UGamma = UG, UfromUGamma = U.all ) } else if (method == "original") { out <- lav_test_satorra_bentler_trace_original( Gamma = Gamma, Delta = Delta, WLS.V = WLS.V, E.inv = E.inv, ngroups = ngroups, nobs = lavsamplestats@nobs, ntotal = lavsamplestats@ntotal, return.u = return.u, return.ugamma = return.ugamma, ug2.old.approach = ug2.old.approach, Satterthwaite = Satterthwaite ) } else if (method == "orthogonal.complement") { out <- lav_test_satorra_bentler_trace_complement( Gamma = Gamma, Delta = Delta, WLS.V = WLS.V, lavmodel = lavmodel, ngroups = ngroups, nobs = lavsamplestats@nobs, ntotal = lavsamplestats@ntotal, return.ugamma = return.ugamma, ug2.old.approach = ug2.old.approach, Satterthwaite = Satterthwaite ) } else if (method == "ABA") { out <- lav_test_satorra_bentler_trace_ABA( Gamma = Gamma, Delta = Delta, WLS.V = WLS.V, E.inv = E.inv, ngroups = ngroups, nobs = lavsamplestats@nobs, ntotal = lavsamplestats@ntotal, return.ugamma = return.ugamma, ug2.old.approach = ug2.old.approach, Satterthwaite = Satterthwaite ) } else { lav_msg_stop(gettextf("method `%s' not supported", method)) } trace.UGamma <- out$trace.UGamma trace.UGamma2 <- out$trace.UGamma2 if ("satorra.bentler" %in% test) { # same df df.scaled <- TEST$standard$df # scaling factor scaling.factor <- trace.UGamma / df.scaled if (scaling.factor < 0) scaling.factor <- as.numeric(NA) # scaled test statistic per group stat.group <- TEST$standard$stat.group / scaling.factor # scaled test statistic global stat <- sum(stat.group) # label if (mimic == "Mplus") { if (lavoptions$estimator == "ML") { label <- "Satorra-Bentler correction (Mplus variant)" } else if (lavoptions$estimator == "DWLS") { label <- "Satorra-Bentler correction (WLSM)" } else if (lavoptions$estimator == "ULS") { label <- "Satorra-Bentler correction (ULSM)" } } else { label <- "Satorra-Bentler correction" } TEST$satorra.bentler <- list( test = "satorra.bentler", stat = stat, stat.group = stat.group, df = df.scaled, pvalue = 1 - pchisq(stat, df.scaled), trace.UGamma = trace.UGamma, scaling.factor = scaling.factor, scaled.test.stat = TEST$standard$stat, scaled.test = TEST$standard$test, label = label ) } if ("mean.var.adjusted" %in% test) { if (mimic == "Mplus") { df.scaled <- floor(trace.UGamma^2 / trace.UGamma2 + 0.5) } else { # more precise, fractional df df.scaled <- trace.UGamma^2 / trace.UGamma2 } # scaling factor scaling.factor <- trace.UGamma / df.scaled if (scaling.factor < 0) scaling.factor <- as.numeric(NA) if (ug2.old.approach) { # scaled test statistic per group stat.group <- TEST$standard$stat.group / scaling.factor # scaled test statistic global stat <- sum(stat.group) } else { # scaled test statistic per group stat.group <- TEST$standard$stat.group / scaling.factor # scaled test statistic global stat <- TEST$standard$stat / scaling.factor } # label if (mimic == "Mplus") { if (lavoptions$estimator == "ML") { label <- "mean and variance adjusted correction (MLMV)" } else if (lavoptions$estimator == "DWLS") { label <- "mean and variance adjusted correction (WLSMV)" } else if (lavoptions$estimator == "ULS") { label <- "mean and variance adjusted correction (ULSMV)" } } else { label <- "mean and variance adjusted correction" } TEST$mean.var.adjusted <- list( test = "mean.var.adjusted", stat = stat, stat.group = stat.group, df = df.scaled, pvalue = 1 - pchisq(stat, df.scaled), trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, scaling.factor = scaling.factor, scaled.test.stat = TEST$standard$stat, scaled.test = TEST$standard$test, label = label ) } if ("scaled.shifted" %in% test) { # this is the T3 statistic as used by Mplus 6 and higher # see 'Simple Second Order Chi-Square Correction' 2010 # www.statmodel.com # same df df.scaled <- TEST$standard$df # scaling factor fg <- unlist(lavsamplestats@nobs) / lavsamplestats@ntotal a <- sqrt(df.scaled / trace.UGamma2) scaling.factor <- 1 / a if (scaling.factor < 0) scaling.factor <- as.numeric(NA) if (ug2.old.approach) { # scaling factor shift.parameter <- fg * (df.scaled - a * trace.UGamma) # scaled test statistic per group stat.group <- (TEST$standard$stat.group * a + shift.parameter) # scaled test statistic global stat <- sum(stat.group) } else { shift.parameter <- df.scaled - a * trace.UGamma stat <- TEST$standard$stat * a + shift.parameter stat.group <- TEST$standard$stat.group * a + fg * shift.parameter } # label if (mimic == "Mplus") { if (lavoptions$estimator == "ML") { label <- "simple second-order correction (MLMV)" } else if (lavoptions$estimator == "DWLS") { label <- "simple second-order correction (WLSMV)" } else if (lavoptions$estimator == "ULS") { label <- "simple second-order correction (ULSMV)" } } else { label <- "simple second-order correction" } TEST$scaled.shifted <- list( test = "scaled.shifted", stat = stat, stat.group = stat.group, df = df.scaled, pvalue = 1 - pchisq(stat, df.scaled), trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, scaling.factor = scaling.factor, shift.parameter = shift.parameter, scaled.test.stat = TEST$standard$stat, scaled.test = TEST$standard$test, label = label ) } if (return.ugamma) { TEST$UGamma <- out$UGamma } if (return.u) { TEST$UfromUGamma <- out$UfromUGamma } TEST } # using the `classical' formula # UG = Gamma * [V - V Delta E.inv Delta' V'] lav_test_satorra_bentler_trace_original <- function(Gamma = NULL, Delta = NULL, WLS.V = NULL, E.inv = NULL, ngroups = NULL, nobs = NULL, ntotal = NULL, return.u = FALSE, return.ugamma = FALSE, ug2.old.approach = FALSE, Satterthwaite = FALSE) { # this is what we did <0.6-13: everything per group if (ug2.old.approach) { UfromUGamma <- UG <- vector("list", ngroups) trace.UGamma <- trace.UGamma2 <- rep(as.numeric(NA), ngroups) for (g in 1:ngroups) { fg <- nobs[[g]] / ntotal Gamma.g <- Gamma[[g]] / fg ## ?? check this Delta.g <- Delta[[g]] if (is.matrix(WLS.V[[g]])) { WLS.Vg <- WLS.V[[g]] * fg } else { WLS.Vg <- diag(WLS.V[[g]]) * fg } U <- (WLS.Vg - WLS.Vg %*% Delta[[g]] %*% E.inv %*% t(Delta[[g]]) %*% WLS.Vg) trace.UGamma[g] <- sum(U * Gamma.g) if (return.u) { UfromUGamma[[g]] <- U } UG <- NULL if (Satterthwaite || return.ugamma) { UG.group <- U %*% Gamma.g trace.UGamma2[g] <- sum(UG.group * t(UG.group)) UG[[g]] <- UG.group } } # g # sum over groups trace.UGamma <- sum(trace.UGamma) trace.UGamma2 <- sum(trace.UGamma2) U.all <- UfromUGamma # group-specific } else { trace.UGamma <- trace.UGamma2 <- U.all <- UG <- as.numeric(NA) fg <- unlist(nobs) / ntotal if (Satterthwaite || return.ugamma || return.u) { # for trace.UGamma2, we can no longer compute the trace per group V.g <- WLS.V for (g in 1:ngroups) { if (is.matrix(WLS.V[[g]])) { V.g[[g]] <- fg[g] * WLS.V[[g]] } else { V.g[[g]] <- fg[g] * diag(WLS.V[[g]]) } } V.all <- lav_matrix_bdiag(V.g) Gamma.f <- Gamma for (g in 1:ngroups) { Gamma.f[[g]] <- 1 / fg[g] * Gamma[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) Delta.all <- do.call("rbind", Delta) U.all <- V.all - V.all %*% Delta.all %*% E.inv %*% t(Delta.all) %*% V.all UG <- U.all %*% Gamma.all trace.UGamma <- sum(U.all * Gamma.all) trace.UGamma2 <- sum(UG * t(UG)) } else { # we only need trace.UGamma - this can be done group-specific trace.UGamma.group <- numeric(ngroups) for (g in 1:ngroups) { Gamma.g <- Gamma[[g]] / fg[g] Delta.g <- Delta[[g]] if (is.matrix(WLS.V[[g]])) { WLS.Vg <- WLS.V[[g]] * fg[g] } else { WLS.Vg <- diag(WLS.V[[g]]) * fg[g] } U <- (WLS.Vg - WLS.Vg %*% Delta[[g]] %*% E.inv %*% t(Delta[[g]]) %*% WLS.Vg) trace.UGamma.group[g] <- sum(U * Gamma.g) } trace.UGamma <- sum(trace.UGamma.group) } } list( trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, UGamma = UG, UfromUGamma = U.all ) } # using the orthogonal complement of Delta: Delta.c # UG = [ (Delta.c' W Delta.c)^{-1} (Delta.c' Gamma Delta.c) lav_test_satorra_bentler_trace_complement <- function(Gamma = NULL, Delta = NULL, WLS.V = NULL, lavmodel = NULL, ngroups = NULL, nobs = NULL, ntotal = NULL, return.ugamma = FALSE, ug2.old.approach = FALSE, Satterthwaite = FALSE) { # this is what we did <0.6-13: everything per group # does not work when ngroups > 1 + equality constraints if (ug2.old.approach) { UG <- vector("list", ngroups) trace.UGamma <- trace.UGamma2 <- rep(as.numeric(NA), ngroups) for (g in 1:ngroups) { fg <- nobs[[g]] / ntotal Gamma.g <- Gamma[[g]] / fg ## ?? check this Delta.g <- Delta[[g]] if (is.matrix(WLS.V[[g]])) { WLS.Vg <- WLS.V[[g]] * fg } else { WLS.Vg <- diag(WLS.V[[g]]) * fg } # handle equality constraints # FIXME: inequality constraints are ignored! if (lavmodel@eq.constraints) { Delta.g <- Delta.g %*% lavmodel@eq.constraints.K } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { Delta.g <- Delta.g %*% lavmodel@ceq.simple.K } # orthogonal complement of Delta.g Delta.c <- lav_matrix_orthogonal_complement(Delta.g) ### FIXME: compute WLS.W directly, instead of using solve(WLS.V) tmp1 <- solve(t(Delta.c) %*% solve(WLS.Vg) %*% Delta.c) tmp2 <- t(Delta.c) %*% Gamma.g %*% Delta.c trace.UGamma[g] <- sum(tmp1 * tmp2) UG <- NULL if (Satterthwaite || return.ugamma) { UG.group <- tmp1 %*% tmp2 trace.UGamma2[g] <- sum(UG.group * t(UG.group)) UG[[g]] <- UG.group } } # sum over groups trace.UGamma <- sum(trace.UGamma) trace.UGamma2 <- sum(trace.UGamma2) } else { trace.UGamma <- trace.UGamma2 <- UG <- as.numeric(NA) fg <- unlist(nobs) / ntotal V.g <- WLS.V for (g in 1:ngroups) { if (is.matrix(WLS.V[[g]])) { V.g[[g]] <- fg[g] * WLS.V[[g]] } else { V.g[[g]] <- fg[g] * diag(WLS.V[[g]]) } } V.all <- lav_matrix_bdiag(V.g) Gamma.f <- Gamma for (g in 1:ngroups) { Gamma.f[[g]] <- 1 / fg[g] * Gamma[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) Delta.all <- do.call("rbind", Delta) # handle equality constraints # FIXME: inequality constraints are ignored! if (lavmodel@eq.constraints) { Delta.all <- Delta.all %*% lavmodel@eq.constraints.K } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { Delta.all <- Delta.all %*% lavmodel@ceq.simple.K } # orthogonal complement of Delta.g Delta.c <- lav_matrix_orthogonal_complement(Delta.all) tmp1 <- solve(t(Delta.c) %*% solve(V.all) %*% Delta.c) tmp2 <- t(Delta.c) %*% Gamma.all %*% Delta.c UG <- tmp1 %*% tmp2 trace.UGamma <- sum(tmp1 * tmp2) trace.UGamma2 <- sum(UG * t(UG)) } list( trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, UGamma = UG ) } # using the ABA form # UG = Gamma %*% [V - V %*% Delta %*% E.inv %*% tDelta %*% V] # = Gamma %*% V - Gamma %*% V %*% Delta %*% E.inv %*% tDelta %*% V # = Gamma %*% A1 - Gamma %*% A1 %*% Delta %*% E.inv %*% tDelta %*% A1 # (define AGA1 := A1 %*% Gamma %*% A1) # Note this is not identical to 'B1', (model-based) first-order information # # = A1.inv %*% A1 %*% Gamma %*% A1 - # A1.inv %*% A1 %*% Gamma %*% A1 %*% Delta %*% E.inv %*% tDelta %*% A1 # # = A1.inv %*% AGA1 - # A1.inv %*% AGA1 %*% Delta %*% E.inv %*% tDelta %*% A1 # # if only the trace is needed, we can use reduce the rhs (after the minus) # to AGA1 %*% Delta %*% E.inv %*% tDelta (eliminating A1 and A1.inv) # we write it like this to highlight the connection with MLR # lav_test_satorra_bentler_trace_ABA <- function(Gamma = NULL, Delta = NULL, WLS.V = NULL, E.inv = NULL, ngroups = NULL, nobs = NULL, ntotal = NULL, return.ugamma = FALSE, ug2.old.approach = FALSE, Satterthwaite = FALSE) { # this is what we did <0.6-13: everything per group if (ug2.old.approach) { UfromUGamma <- UG <- vector("list", ngroups) trace.UGamma <- trace.UGamma2 <- rep(as.numeric(NA), ngroups) for (g in 1:ngroups) { fg <- nobs[[g]] / ntotal Gamma.g <- Gamma[[g]] / fg ## ?? check this Delta.g <- Delta[[g]] # diagonal WLS.V? we check for this since 0.5-17 diagonal <- FALSE if (is.matrix(WLS.V[[g]])) { A1 <- WLS.V[[g]] * fg AGA1 <- A1 %*% Gamma.g %*% A1 } else { diagonal <- TRUE a1 <- WLS.V[[g]] * fg # numeric vector! AGA1 <- Gamma.g * tcrossprod(a1) } # note: we have AGA1 at the end, to avoid ending up with # a transposed matrix (both parts are non-symmetric) if (diagonal) { UG <- t(Gamma.g * a1) - (Delta.g %*% tcrossprod(E.inv, Delta.g) %*% AGA1) } else { UG <- (Gamma.g %*% A1) - (Delta.g %*% tcrossprod(E.inv, Delta.g) %*% AGA1) } trace.UGamma[g] <- sum(diag(UG)) if (Satterthwaite) { trace.UGamma2[g] <- sum(UG * t(UG)) } } # sum over groups trace.UGamma <- sum(trace.UGamma) trace.UGamma2 <- sum(trace.UGamma2) } else { trace.UGamma <- trace.UGamma2 <- UG <- as.numeric(NA) fg <- unlist(nobs) / ntotal if (Satterthwaite || return.ugamma) { # for trace.UGamma2, we can no longer compute the trace per group V.g <- WLS.V for (g in 1:ngroups) { if (is.matrix(WLS.V[[g]])) { V.g[[g]] <- fg[g] * WLS.V[[g]] } else { V.g[[g]] <- fg[g] * diag(WLS.V[[g]]) } } V.all <- lav_matrix_bdiag(V.g) Gamma.f <- Gamma for (g in 1:ngroups) { Gamma.f[[g]] <- 1 / fg[g] * Gamma[[g]] } Gamma.all <- lav_matrix_bdiag(Gamma.f) Delta.all <- do.call("rbind", Delta) AGA1 <- V.all %*% Gamma.all %*% V.all UG <- (Gamma.all %*% V.all) - (Delta.all %*% tcrossprod(E.inv, Delta.all) %*% AGA1) trace.UGamma <- sum(diag(UG)) trace.UGamma2 <- sum(UG * t(UG)) } else { trace.UGamma.group <- numeric(ngroups) for (g in 1:ngroups) { fg <- nobs[[g]] / ntotal Gamma.g <- Gamma[[g]] / fg ## ?? check this Delta.g <- Delta[[g]] # diagonal WLS.V? we check for this since 0.5-17 diagonal <- FALSE if (is.matrix(WLS.V[[g]])) { A1 <- WLS.V[[g]] * fg AGA1 <- A1 %*% Gamma.g %*% A1 } else { diagonal <- TRUE a1 <- WLS.V[[g]] * fg # numeric vector! AGA1 <- Gamma.g * tcrossprod(a1) } # note: we have AGA1 at the end, to avoid ending up with # a transposed matrix (both parts are non-symmetric) if (diagonal) { UG <- t(Gamma.g * a1) - (Delta.g %*% tcrossprod(E.inv, Delta.g) %*% AGA1) } else { UG <- (Gamma.g %*% A1) - (Delta.g %*% tcrossprod(E.inv, Delta.g) %*% AGA1) } trace.UGamma.group[g] <- sum(diag(UG)) } # g trace.UGamma <- sum(trace.UGamma.group) } } if (!return.ugamma) { UG <- NULL } list( trace.UGamma = trace.UGamma, trace.UGamma2 = trace.UGamma2, UGamma = UG ) } lavaan/R/lav_partable_full.R0000644000176200001440000001465514627656441015550 0ustar liggesusers# create `full' parameter table, containing (almost) all parameters # that could be free # # main motivation: univariate scores tests (modification indices) # lav_partable_full <- function(partable = NULL, strict.exo = FALSE, free = FALSE, start = FALSE) { # check minimum requirements: lhs, op, rhs stopifnot( !is.null(partable$lhs), !is.null(partable$op), !is.null(partable$rhs) ) # lavpta? lavpta <- lav_partable_attributes(partable) # meanstructure if (!is.null(lavpta$meanstructure)) { meanstructure <- lavpta$meanstructure } else { # old object meanstructure <- any(partable$op == "~1") } # number of blocks nblocks <- lavpta$nblocks ngroups <- lavpta$ngroups nlevels <- lavpta$nlevels lhs <- rhs <- op <- character(0L) block <- group <- level <- integer(0L) # new in 0.6-3: GROUP.values <- lav_partable_group_values(partable) LEVEL.values <- lav_partable_level_values(partable) if (is.character(GROUP.values[1])) { group <- character(0L) } if (is.character(LEVEL.values[1L])) { level <- character(0L) } # block number b <- 0L for (g in 1:ngroups) { for (l in 1:nlevels) { # block b <- b + 1L ov.names <- lavpta$vnames$ov[[b]] ov.names.nox <- lavpta$vnames$ov.nox[[b]] ov.names.x <- lavpta$vnames$ov.x[[b]] ov.names.ind <- lavpta$vnames$ov.ind[[b]] ov.names.ord <- lavpta$vnames$ov.ord[[b]] lv.names <- lavpta$vnames$lv[[b]] # eqs.y, eqs.x eqs.names <- unique(c( lavpta$vnames$eqs.y[[b]], lavpta$vnames$eqs.x[[b]] )) if (length(eqs.names) > 0L) { eqs.y <- eqs.names if (strict.exo) { x.idx <- which(eqs.names %in% ov.names.x) if (length(x.idx) > 0L) { eqs.y <- eqs.names[-x.idx] } } eqs.x <- eqs.names } else { eqs.y <- character(0L) eqs.x <- character(0L) } # 1 "=~" l.lhs <- rep(lv.names, each = length(ov.names.nox)) l.rhs <- rep(ov.names.nox, times = length(lv.names)) # remove factor ~ eqs.y combinations, if any # because they also appear as a regression bad.idx <- which(l.lhs %in% lv.names & l.rhs %in% eqs.y) if (length(bad.idx) > 0L) { l.lhs <- l.lhs[-bad.idx] l.rhs <- l.rhs[-bad.idx] } l.op <- rep("=~", length(l.lhs)) # 2a. "~~" ov ## FIXME: ov.names.nox or ov.names?? # if(strict.exo) { OV <- ov.names.nox # } else { # OV <- ov.names # } nx <- length(OV) idx <- lower.tri(matrix(0, nx, nx), diag = TRUE) ov.lhs <- rep(OV, each = nx)[idx] # fill upper.tri ov.rhs <- rep(OV, times = nx)[idx] ov.op <- rep("~~", length(ov.lhs)) # remove dummy indicators that correlate with 'proper' # indicators; new in 0.6-14; fixed in 0.6-16 ov.other <- ov.names[!ov.names %in% c( ov.names.ind, ov.names.x, eqs.x, eqs.y )] if (length(ov.other) > 0L) { bad.idx <- which((ov.lhs %in% ov.names & ov.rhs %in% ov.other) | (ov.lhs %in% ov.other & ov.rhs %in% ov.names)) if (length(bad.idx) > 0L) { ov.lhs <- ov.lhs[-bad.idx] ov.rhs <- ov.rhs[-bad.idx] ov.op <- ov.op[-bad.idx] } } # exo ~~ if (!strict.exo && length(ov.names.x) > 0L) { OV <- ov.names.x nx <- length(OV) idx <- lower.tri(matrix(0, nx, nx), diag = TRUE) more.lhs <- rep(OV, each = nx)[idx] # fill upper.tri more.rhs <- rep(OV, times = nx)[idx] ov.lhs <- c(ov.lhs, more.lhs) ov.rhs <- c(ov.rhs, more.rhs) ov.op <- c(ov.op, rep("~~", length(more.lhs))) } # 2b. "~~" lv nx <- length(lv.names) idx <- lower.tri(matrix(0, nx, nx), diag = TRUE) lv.lhs <- rep(lv.names, each = nx)[idx] # fill upper.tri lv.rhs <- rep(lv.names, times = nx)[idx] lv.op <- rep("~~", length(lv.lhs)) # 3 regressions? r.lhs <- r.rhs <- r.op <- character(0) if (length(eqs.names) > 0L) { r.lhs <- rep(eqs.y, each = length(eqs.x)) r.rhs <- rep(eqs.x, times = length(eqs.y)) # remove self-arrows idx <- which(r.lhs == r.rhs) if (length(idx) > 0L) { r.lhs <- r.lhs[-idx] r.rhs <- r.rhs[-idx] } # remove indicator ~ factor if they exist bad.idx <- which(r.lhs %in% ov.names.ind & r.rhs %in% lv.names) if (length(bad.idx) > 0L) { r.lhs <- r.lhs[-bad.idx] r.rhs <- r.rhs[-bad.idx] } r.op <- rep("~", length(r.rhs)) } # 4. intercepts int.lhs <- int.rhs <- int.op <- character(0) if (meanstructure) { if (strict.exo) { int.lhs <- c(ov.names.nox, lv.names) } else { int.lhs <- c(ov.names, lv.names) } int.rhs <- rep("", length(int.lhs)) int.op <- rep("~1", length(int.lhs)) } # 5. thresholds th.lhs <- th.rhs <- th.op <- character(0) if (length(ov.names.ord) > 0L) { th.names <- lavpta$vnames$th[[b]] tmp <- strsplit(th.names, "\\|") th.lhs <- sapply(tmp, function(x) x[1]) th.rhs <- sapply(tmp, function(x) x[2]) th.op <- rep("|", length(th.lhs)) } # 6. scaling parameters delta.lhs <- delta.rhs <- delta.op <- character(0) if (ngroups > 1L && length(ov.names.ord) > 0L) { delta.lhs <- ov.names.ord delta.rhs <- ov.names.ord delta.op <- rep("~*~", length(delta.lhs)) } # combine this.lhs <- c( l.lhs, ov.lhs, lv.lhs, r.lhs, int.lhs, th.lhs, delta.lhs ) this.rhs <- c( l.rhs, ov.rhs, lv.rhs, r.rhs, int.rhs, th.rhs, delta.rhs ) this.op <- c( l.op, ov.op, lv.op, r.op, int.op, th.op, delta.op ) n.el <- length(this.lhs) lhs <- c(lhs, this.lhs) rhs <- c(rhs, this.rhs) op <- c(op, this.op) block <- c(block, rep(b, n.el)) group <- c(group, rep(GROUP.values[g], n.el)) level <- c(level, rep(LEVEL.values[l], n.el)) } # level } # group LIST <- data.frame( lhs = lhs, op = op, rhs = rhs, block = block, group = group, level = level, stringsAsFactors = FALSE ) if (free) { LIST$free <- rep(0L, nrow(LIST)) } if (start) { LIST$start <- rep(0, nrow(LIST)) } LIST } lavaan/R/lav_efa_summary.R0000644000176200001440000003175014627656441015237 0ustar liggesusers# summary information for a single (lavaan) efa model # # workflow: # - summary() first calls summary.efaList() # - for each model, summary.efaList() calls lav_object_summary() with # efa = TRUE and efa.args # - for each model, lav_object_summary() calls # lav_efa_summary(object, efa.args = efa.args) to populate the $efa slot # efa summary for a single lavaan object lav_efa_summary <- function(object, efa.args = list( lambda = TRUE, theta = TRUE, psi = TRUE, eigenvalues = TRUE, sumsq.table = TRUE, lambda.structure = FALSE, fs.determinacy = FALSE, se = FALSE, zstat = FALSE, pvalue = FALSE )) { stopifnot(inherits(object, "lavaan")) nblocks <- object@Model@nblocks orthogonal.flag <- object@Options$rotation.args$orthogonal # get standardized solution LAMBDA <- THETA <- PSI <- NULL STD <- lavTech(object, "std", add.class = TRUE, add.labels = TRUE, list.by.group = FALSE ) lambda.idx <- which(names(STD) == "lambda") theta.idx <- which(names(STD) == "theta") psi.idx <- which(names(STD) == "psi") # LAMBDA LAMBDA <- STD[lambda.idx] names(LAMBDA) <- NULL # THETA THETA <- STD[theta.idx] # make THETA diagonal THETA <- lapply(seq_len(nblocks), function(b) { tmp <- diag(THETA[[b]]) class(tmp) <- c("lavaan.vector", "numeric") tmp }) # PSI PSI <- STD[psi.idx] names(PSI) <- NULL # eigenvalues correlation matrix std.ov <- object@Options$rotation.args$std.ov COV <- object@h1$implied$cov # h1 if (std.ov) { COV <- lapply(COV, cov2cor) } eigvals <- NULL if (efa.args$eigenvalues) { eigvals <- lapply(seq_len(nblocks), function(b) { tmp <- eigen(COV[[b]], only.values = TRUE)$values names(tmp) <- paste("ev", 1:nrow(LAMBDA[[b]]), sep = "") class(tmp) <- c("lavaan.vector", "numeric") tmp }) } fs.determinacy <- NULL # Note: these 'determinacy' values are only properly defined for the # 'regression' factor scores! (If we would apply the same formulas # for Bartlett factor scores, we would obtain 1's! if (efa.args$fs.determinacy) { fs.determinacy <- lapply(seq_len(nblocks), function(b) { COR <- cov2cor(COV[[b]]) # just in case COR.inv <- try(solve(COR), silent = TRUE) if (inherits(COR.inv, "try-error")) { return(rep(as.numeric(NA), nrow(PSI[[b]]))) } fs <- LAMBDA[[b]] %*% PSI[[b]] # factor structure out <- sqrt(diag(t(fs) %*% COR.inv %*% fs)) class(out) <- c("lavaan.vector", "numeric") out }) } # sum-of-squares table sumsq.table <- NULL if (efa.args$sumsq.table) { sumsq.table <- lapply(seq_len(nblocks), function(b) { nvar <- nrow(LAMBDA[[b]]) nfactor <- ncol(LAMBDA[[b]]) # sum of squares: # - if orthogonal, this is really the sum of the squared factor # loadings # - if oblique, we need to take the correlation into account sumsq <- diag(PSI[[b]] %*% crossprod(LAMBDA[[b]])) # reorder if (nfactor > 1L) { # determine order order.idx <- sort.int(sumsq, decreasing = TRUE, index.return = TRUE)$ix # re-order from large to small sumsq <- sumsq[order.idx] } # Proportion 'explained' (= proportion of total sumsq) # note: sum(sumsq) == sum(communalities) propexpl <- sumsq / sum(sumsq) # Proportion var (= sumsq/nvar) propvar <- sumsq / nrow(LAMBDA[[b]]) # Cumulative var cumvar <- cumsum(propvar) # construct table tmp <- rbind(sumsq, propexpl, propvar, cumvar) # total + colnames if (nfactor > 1L) { # add total column tmp <- cbind(tmp, rowSums(tmp)) tmp[4, ncol(tmp)] <- tmp[3, ncol(tmp)] colnames(tmp) <- c( colnames(LAMBDA[[b]])[order.idx], "total" ) } else { colnames(tmp) <- colnames(LAMBDA[[b]])[1] } # rownames if (nfactor == 1L) { ssq.label <- "Sum of squared loadings" } else if (orthogonal.flag) { ssq.label <- "Sum of sq (ortho) loadings" } else { ssq.label <- "Sum of sq (obliq) loadings" } rownames(tmp) <- c( ssq.label, "Proportion of total", "Proportion var", "Cumulative var" ) # class class(tmp) <- c("lavaan.matrix", "matrix") tmp }) } # sumsq.table # (factor) structure coefficients if (efa.args$lambda.structure) { lambda.structure <- lapply(seq_len(nblocks), function(b) { tmp <- LAMBDA[[b]] %*% PSI[[b]] class(tmp) <- c("lavaan.matrix", "matrix") tmp }) } else { lambda.structure <- NULL } # standard errors (if any) lambda.se <- theta.se <- psi.se <- NULL lambda.zstat <- theta.zstat <- psi.zstat <- NULL lambda.pval <- theta.pval <- psi.pval <- NULL if (object@Options$se != "none") { SE <- lavTech(object, "std.se", add.class = TRUE, add.labels = TRUE, list.by.group = FALSE ) se.flag <- (efa.args$se || efa.args$zstat || efa.args$pvalue) # ALWAYS use lambda.se if (efa.args$lambda) { lambda.se <- SE[lambda.idx] names(lambda.se) <- NULL } # theta.se if (se.flag && efa.args$theta) { theta.se <- SE[theta.idx] # make theta.se diagonal theta.se <- lapply(seq_len(nblocks), function(b) { tmp <- diag(theta.se[[b]]) class(tmp) <- c("lavaan.vector", "numeric") tmp }) } # ALWAYS use psi.se if (efa.args$psi) { psi.se <- SE[psi.idx] names(psi.se) <- NULL } # compute zstat if (efa.args$zstat || efa.args$pvalue) { if (efa.args$lambda) { lambda.zstat <- lapply(seq_len(nblocks), function(b) { tmp.se <- lambda.se[[b]] tmp.se[tmp.se < sqrt(.Machine$double.eps)] <- as.numeric(NA) tmp <- LAMBDA[[b]] / tmp.se class(tmp) <- c("lavaan.matrix", "matrix") tmp }) } if (efa.args$theta) { theta.zstat <- lapply(seq_len(nblocks), function(b) { tmp.se <- theta.se[[b]] tmp.se[tmp.se < sqrt(.Machine$double.eps)] <- as.numeric(NA) tmp <- THETA[[b]] / tmp.se class(tmp) <- c("lavaan.vector", "numeric") tmp }) } if (efa.args$psi) { psi.zstat <- lapply(seq_len(nblocks), function(b) { tmp.se <- psi.se[[b]] tmp.se[tmp.se < sqrt(.Machine$double.eps)] <- as.numeric(NA) tmp <- PSI[[b]] / tmp.se class(tmp) <- c( "lavaan.matrix.symmetric", "matrix" ) tmp }) } } # compute pval if (efa.args$pvalue) { if (efa.args$lambda) { lambda.pval <- lapply(seq_len(nblocks), function(b) { tmp <- 2 * (1 - pnorm(abs(lambda.zstat[[b]]))) class(tmp) <- c("lavaan.matrix", "matrix") tmp }) } if (efa.args$theta) { theta.pval <- lapply(seq_len(nblocks), function(b) { tmp <- 2 * (1 - pnorm(abs(theta.zstat[[b]]))) class(tmp) <- c("lavaan.vector", "numeric") tmp }) } if (efa.args$psi) { psi.pval <- lapply(seq_len(nblocks), function(b) { tmp <- 2 * (1 - pnorm(abs(psi.zstat[[b]]))) class(tmp) <- c( "lavaan.matrix.symmetric", "matrix" ) tmp }) } } } # se/zstat/pvalue # block.label block.label <- object@Data@block.label # we remove them here; we may have needed them for other parts if (!efa.args$lambda) { LAMBDA <- NULL } if (!efa.args$theta) { THETA <- NULL } if (!efa.args$psi) { PSI <- NULL } if (!efa.args$se) { # always keep lambda.se and psi.se (for the signif stars) theta.se <- NULL } if (!efa.args$zstat) { lambda.zstat <- theta.zstat <- psi.zstat <- NULL } res <- list( nblocks = nblocks, block.label = block.label, std.ov = std.ov, eigvals = eigvals, sumsq.table = sumsq.table, orthogonal = object@Options$rotation.args$orthogonal, lambda.structure = lambda.structure, fs.determinacy = fs.determinacy, lambda = LAMBDA, theta = THETA, psi = PSI, lambda.se = lambda.se, lambda.zstat = lambda.zstat, lambda.pvalue = lambda.pval, psi.se = psi.se, psi.zstat = psi.zstat, psi.pvalue = psi.pval, theta.se = theta.se, theta.zstat = theta.zstat, theta.pvalue = theta.pval ) res } # summary efaList summary.efaList <- function(object, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, lambda = TRUE, theta = TRUE, psi = TRUE, fit.table = TRUE, fs.determinacy = FALSE, eigenvalues = TRUE, sumsq.table = TRUE, lambda.structure = FALSE, se = FALSE, zstat = FALSE, pvalue = FALSE, ...) { # kill object$loadings if present object[["loadings"]] <- NULL # unclass the object y <- unclass(object) # construct efa.args efa.args <- list( lambda = lambda, theta = theta, psi = psi, eigenvalues = eigenvalues, sumsq.table = sumsq.table, lambda.structure = lambda.structure, fs.determinacy = fs.determinacy, se = se, zstat = zstat, pvalue = pvalue ) # extract useful info from first model out <- lav_object_summary(y[[1]], header = TRUE, estimates = FALSE, efa = FALSE ) # header information lavaan.version <- out$header$lavaan.version converged.flag <- all(sapply(y, lavInspect, "converged")) # estimator estimator <- out$optim$estimator estimator.args <- out$optim$estimator.args # rotation rotation <- out$rotation$rotation rotation.args <- out$rotation$rotation.args # data lavdata <- out$data # main part: lav_object_summary information per model RES <- lapply(y, lav_object_summary, header = FALSE, fit.measures = FALSE, estimates = TRUE, efa = TRUE, efa.args = efa.args ) # number of factors (for ALL blocks) nfactors <- sapply(y, function(x) x@pta$nfac[[1]]) # fit.measures Table <- NULL if (fit.table) { # first, create standard table FIT <- fitMeasures(object, fit.measures = "default") NAMES <- rownames(FIT) idx <- integer(0L) # AIC/BIC if (all(c("aic", "bic", "bic2") %in% NAMES)) { this.idx <- match(c("aic", "bic", "bic2"), NAMES) idx <- c(idx, this.idx) } # chi-square if (all(c("chisq.scaled", "df.scaled", "pvalue.scaled") %in% NAMES)) { this.idx <- match( c("chisq.scaled", "df.scaled", "pvalue.scaled"), NAMES ) idx <- c(idx, this.idx) } else { this.idx <- match(c("chisq", "df", "pvalue"), NAMES) idx <- c(idx, this.idx) } # CFI if ("cfi.robust" %in% NAMES && !all(is.na(FIT["cfi.robust", ]))) { this.idx <- match("cfi.robust", NAMES) idx <- c(idx, this.idx) } else if ("cfi.scaled" %in% NAMES) { this.idx <- match("cfi.scaled", NAMES) idx <- c(idx, this.idx) } else if ("cfi" %in% NAMES) { this.idx <- match("cfi", NAMES) idx <- c(idx, this.idx) } # RMSEA if ("rmsea.robust" %in% NAMES && !all(is.na(FIT["rmsea.robust", ]))) { this.idx <- match("rmsea.robust", NAMES) idx <- c(idx, this.idx) } else if ("rmsea.scaled" %in% NAMES) { this.idx <- match("rmsea.scaled", NAMES) idx <- c(idx, this.idx) } else if ("rmsea" %in% NAMES) { this.idx <- match("rmsea", NAMES) idx <- c(idx, this.idx) } # table with fitmeasures if (length(idx) > 0L) { Table <- t(FIT[idx, , drop = FALSE]) tmp <- NAMES[idx] # strip '.scaled' tmp <- gsub(".scaled", "", tmp) # replace 'robust' by 'r' (if any) tmp <- gsub(".robust", "", tmp) # rename "bic2" -> "sabic" bic2.idx <- which(tmp == "bic2") if (length(bic2.idx) > 0L) { tmp[bic2.idx] <- "sabic" } colnames(Table) <- tmp } else { Table <- matrix(0, nrow = nfactors, ncol = 0L) } rownames(Table) <- paste("nfactors = ", nfactors, sep = "") class(Table) <- c("lavaan.matrix", "matrix") } # create return object out <- list( lavaan.version = lavaan.version, converged.flag = converged.flag, estimator = estimator, estimator.args = estimator.args, rotation = rotation, rotation.args = rotation.args, lavdata = lavdata, fit.table = Table, nfactors = nfactors, model.list = RES ) # add nd, cutoff, dot.cutoff, ... as attributes (for printing) attr(out, "nd") <- nd attr(out, "cutoff") <- cutoff attr(out, "dot.cutoff") <- dot.cutoff attr(out, "alpha.level") <- alpha.level # create class class(out) <- c("efaList.summary", "list") out } lavaan/R/lav_fit_utils.R0000644000176200001440000002231214627656441014723 0ustar liggesusers# utility functions needed to compute various (robust) fit measures: # # - lav_fit_catml_dwls (for 'robust' RMSEA/CFI if data is cateogrical) # - lav_fit_fiml_corrected (correct RMSEA/CFI if data is incomplete) # compute scaling-factor (c.hat3) for fit.dwls, using fit.catml ingredients # see: # Savalei, V. (2021) Improving Fit Indices In SEM with categorical data. # Multivariate Behavioral Research, 56(3), 390-407. # # YR Dec 2022: first version # YR Jan 2023: catml_dwls should check if the input 'correlation' matrix # is positive-definite (or not) lav_fit_catml_dwls <- function(lavobject, check.pd = TRUE) { # empty list empty.list <- list( XX3 = as.numeric(NA), df3 = as.numeric(NA), c.hat3 = as.numeric(NA), XX3.scaled = as.numeric(NA), XX3.null = as.numeric(NA), df3.null = as.numeric(NA), c.hat3.null = as.numeric(NA) ) # limitations if (!lavobject@Model@categorical || lavobject@Options$conditional.x || length(unlist(lavobject@pta$vnames$ov.num)) > 0L) { return(empty.list) } else { lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats } # check if input matrix (or matrices) are all positive definite # (perhaps later, we can rely on 'smoothing', but not for now pd.flag <- TRUE if (check.pd) { for (g in seq_len(lavdata@ngroups)) { COR <- lavsamplestats@cov[[g]] ev <- eigen(COR, symmetric = TRUE, only.values = TRUE)$values if (any(ev < .Machine$double.eps^(1 / 2))) { # non-pd! pd.flag <- FALSE # should we give a warning here? (not for now) # warning("lavaan WARNING: robust RMSEA/CFI could not be computed because the input correlation matrix is not positive-definite") # what should we do? return NA (for now) return(empty.list) } } } # 'refit' using estimator = "catML" fit.catml <- try(lav_object_catml(lavobject), silent = TRUE) if (inherits(fit.catml, "try-error")) { return(empty.list) } XX3 <- fit.catml@test[[1]]$stat df3 <- fit.catml@test[[1]]$df # compute 'k' V <- lavTech(fit.catml, "wls.v") # NT-ML weight matrix W.dwls <- lavTech(lavobject, "wls.v") # DWLS weight matrix Gamma <- lavTech(lavobject, "gamma") # acov of polychorics Delta <- lavTech(lavobject, "delta") E.inv <- lavTech(lavobject, "inverted.information") fg <- unlist(lavsamplestats@nobs) / lavsamplestats@ntotal # Fixme: as we only need the trace, perhaps we could do this # group-specific? (see lav_test_satorra_bentler_trace_original) V.g <- V W.dwls.g <- W.dwls Gamma.f <- Gamma Delta.g <- Delta for (g in seq_len(lavdata@ngroups)) { ntotal <- nrow(Gamma[[g]]) nvar <- lavobject@Model@nvar[[g]] pstar <- nvar * (nvar - 1) / 2 rm.idx <- seq_len(ntotal - pstar) # reduce Delta.g[[g]] <- Delta[[g]][-rm.idx, , drop = FALSE] # reduce and weight W.dwls.g[[g]] <- fg[g] * W.dwls[[g]][-rm.idx, -rm.idx] V.g[[g]] <- fg[g] * V[[g]] # should already have the right dims Gamma.f[[g]] <- 1 / fg[g] * Gamma[[g]][-rm.idx, -rm.idx] } # create 'big' matrices W.dwls.all <- lav_matrix_bdiag(W.dwls.g) V.all <- lav_matrix_bdiag(V.g) Gamma.all <- lav_matrix_bdiag(Gamma.f) Delta.all <- do.call("rbind", Delta.g) # compute trace WiU.all <- diag(nrow(W.dwls.all)) - Delta.all %*% E.inv %*% t(Delta.all) %*% W.dwls.all ks <- sum(diag(t(WiU.all) %*% V.all %*% WiU.all %*% Gamma.all)) # convert to lavaan 'scaling.factor' c.hat3 <- ks / df3 XX3.scaled <- XX3 / c.hat3 # baseline model XX3.null <- fit.catml@baseline$test[[1]]$stat if (is.null(XX3.null)) { XX3.null <- as.numeric(NA) df3.null <- as.numeric(NA) kbs <- as.numeric(NA) c.hat3.null <- as.numeric(NA) } else { df3.null <- fit.catml@baseline$test[[1]]$df kbs <- sum(diag(Gamma.all)) c.hat3.null <- kbs / df3.null } # return values list( XX3 = XX3, df3 = df3, c.hat3 = c.hat3, XX3.scaled = XX3.scaled, XX3.null = XX3.null, df3.null = df3.null, c.hat3.null = c.hat3.null ) } # compute ingredients to compute FIML-Corrected RMSEA/CFI # see: # Zhang X, Savalei V. (2022). New computations for RMSEA and CFI # following FIML and TS estimation with missing data. Psychological Methods. lav_fit_fiml_corrected <- function(lavobject, version = "V3") { version <- toupper(version) if (!version %in% c("V3", "V6")) { lav_msg_stop(gettext("only FIML-C(V3) and FIML-C(V6) are available.")) } # empty list empty.list <- list( XX3 = as.numeric(NA), df3 = as.numeric(NA), c.hat3 = as.numeric(NA), XX3.scaled = as.numeric(NA), XX3.null = as.numeric(NA), df3.null = as.numeric(NA), c.hat3.null = as.numeric(NA) ) # limitations if (lavobject@Options$conditional.x || lavobject@Data@nlevels > 1L || !.hasSlot(lavobject, "h1") || is.null(lavobject@h1$implied$cov[[1]])) { return(empty.list) } else { lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats h1 <- lavTech(lavobject, "h1", add.labels = TRUE) COV.tilde <- lapply(h1, "[[", "cov") MEAN.tilde <- lapply(h1, "[[", "mean") sample.nobs <- unlist(lavsamplestats@nobs) } # 'refit' using 'tilde' (=EM/saturated) sample statistics fit.tilde <- try(lavaan( model = parTable(lavobject), sample.cov = COV.tilde, sample.mean = MEAN.tilde, sample.nobs = sample.nobs, sample.cov.rescale = FALSE, information = "observed", optim.method = "none", se = "none", test = "standard", baseline = FALSE, check.post = FALSE ), silent = TRUE) if (inherits(fit.tilde, "try-error")) { return(empty.list) } XX3 <- fit.tilde@test[[1]]$stat df3 <- fit.tilde@test[[1]]$df # compute 'k' # V3/V6: always use h1.information = "unstructured"!! lavobject@Options$h1.information <- c("unstructured", "unstructured") lavobject@Options$observed.information <- c("h1", "h1") fit.tilde@Options$h1.information <- c("unstructured", "unstructured") Wm <- Wm.g <- lav_model_h1_information_observed(lavobject) Jm <- lav_model_h1_information_firstorder(lavobject) Wc <- Wc.g <- lav_model_h1_information_observed(fit.tilde) if (version == "V3") { Gamma.f <- vector("list", length = lavdata@ngroups) } Delta <- lavTech(lavobject, "delta") E.inv <- lavTech(lavobject, "inverted.information") # Wmi <- Wmi.g <- lapply(Wm, solve) ## <- how wrote this? (I did) Wmi <- Wmi.g <- try(lapply(Wm, lav_matrix_symmetric_inverse), silent = TRUE ) if (inherits(Wmi, "try-error")) { return(empty.list) } fg <- unlist(lavsamplestats@nobs) / lavsamplestats@ntotal # Fixme: as we only need the trace, perhaps we could do this # group-specific? (see lav_test_satorra_bentler_trace_original) for (g in seq_len(lavdata@ngroups)) { # group weight Wc.g[[g]] <- fg[g] * Wc[[g]] Wm.g[[g]] <- fg[g] * Wm[[g]] Wmi.g[[g]] <- 1 / fg[g] * Wmi[[g]] # Gamma if (version == "V3") { Gamma.g <- Wmi[[g]] %*% Jm[[g]] %*% Wmi[[g]] Gamma.f[[g]] <- 1 / fg[g] * Gamma.g } } # create 'big' matrices Wc.all <- lav_matrix_bdiag(Wc.g) Wm.all <- lav_matrix_bdiag(Wm.g) Wmi.all <- lav_matrix_bdiag(Wmi.g) Delta.all <- do.call("rbind", Delta) # compute trace U <- Wm.all - Wm.all %*% Delta.all %*% E.inv %*% t(Delta.all) %*% Wm.all # V3 or V6? if (version == "V3") { Gamma.all <- lav_matrix_bdiag(Gamma.f) k.fimlc <- sum(diag(U %*% Wmi.all %*% Wc.all %*% Wmi.all %*% U %*% Gamma.all)) } else { # V6 k.fimlc <- sum(diag(Wc.all %*% Wmi.all %*% U %*% Wmi.all)) } # convert to lavaan 'scaling.factor' c.hat3 <- k.fimlc / df3 XX3.scaled <- XX3 / c.hat3 # collect temp results out <- list( XX3 = XX3, df3 = df3, c.hat3 = c.hat3, XX3.scaled = XX3.scaled, XX3.null = as.numeric(NA), df3.null = as.numeric(NA), c.hat3.null = as.numeric(NA) ) # baseline model fitB <- try(lav_object_independence(lavobject), silent = TRUE) if (inherits(fitB, "try-error")) { return(out) } # 'refit' using 'tilde' (=EM/saturated) sample statistics fitB.tilde <- try(lavaan( model = parTable(fitB), sample.cov = COV.tilde, sample.mean = MEAN.tilde, sample.nobs = sample.nobs, sample.cov.rescale = FALSE, information = "observed", optim.method = "none", se = "none", test = "standard", baseline = FALSE, check.post = FALSE ), silent = TRUE) if (inherits(fitB.tilde, "try-error")) { return(out) } XX3.null <- fitB.tilde@test[[1]]$stat df3.null <- fitB.tilde@test[[1]]$df fitB@Options$h1.information <- c("unstructured", "unstructured") fitB@Options$observed.information <- c("h1", "h1") E.invB <- lavTech(fitB, "inverted.information") DeltaB <- lavTech(fitB, "Delta") DeltaB.all <- do.call("rbind", DeltaB) # trace baseline model UB <- Wm.all - Wm.all %*% DeltaB.all %*% E.invB %*% t(DeltaB.all) %*% Wm.all # V3 or V6? if (version == "V3") { kb.fimlc <- sum(diag(UB %*% Wmi.all %*% Wc.all %*% Wmi.all %*% UB %*% Gamma.all)) } else { # V6 kb.fimlc <- sum(diag(Wc.all %*% Wmi.all %*% UB %*% Wmi.all)) } # convert to lavaan 'scaling.factor' c.hat3.null <- kb.fimlc / df3.null # return values list( XX3 = XX3, df3 = df3, c.hat3 = c.hat3, XX3.scaled = XX3.scaled, XX3.null = XX3.null, df3.null = df3.null, c.hat3.null = c.hat3.null ) } lavaan/R/lav_matrix.R0000644000176200001440000014140714627656441014234 0ustar liggesusers# Magnus & Neudecker (1999) style matrix operations # YR - 11 may 2011: initial version # YR - 19 okt 2014: rename functions using lav_matrix_ prefix # vec operator # # the vec operator (for 'vectorization') transforms a matrix into # a vector by stacking the *columns* of the matrix one underneath the other # # M&N book: page 30 # # note: we do not coerce to 'double/numeric' storage-mode (like as.numeric) lav_matrix_vec <- function(A) { as.vector(A) } # vecr operator # # the vecr operator ransforms a matrix into # a vector by stacking the *rows* of the matrix one underneath the other lav_matrix_vecr <- function(A) { # faster way?? # nRow <- NROW(A); nCol <- NCOL(A) # idx <- (seq_len(nCol) - 1L) * nRow + rep(seq_len(nRow), each = nCol) lav_matrix_vec(t(A)) } # vech # # the vech operator (for 'half vectorization') transforms a *symmetric* matrix # into a vector by stacking the *columns* of the matrix one underneath the # other, but eliminating all supradiagonal elements # # see Henderson & Searle, 1979 # # M&N book: page 48-49 # lav_matrix_vech <- function(S, diagonal = TRUE) { ROW <- row(S) COL <- col(S) if (diagonal) S[ROW >= COL] else S[ROW > COL] } # the vechr operator transforms a *symmetric* matrix # into a vector by stacking the *rows* of the matrix one after the # other, but eliminating all supradiagonal elements lav_matrix_vechr <- function(S, diagonal = TRUE) { S[lav_matrix_vechr_idx(n = NCOL(S), diagonal = diagonal)] } # the vechu operator transforms a *symmetric* matrix # into a vector by stacking the *columns* of the matrix one after the # other, but eliminating all infradiagonal elements lav_matrix_vechu <- function(S, diagonal = TRUE) { S[lav_matrix_vechu_idx(n = NCOL(S), diagonal = diagonal)] } # the vechru operator transforms a *symmetric* matrix # into a vector by stacking the *rows* of the matrix one after the # other, but eliminating all infradiagonal elements # # same as vech (but using upper-diagonal elements) lav_matrix_vechru <- function(S, diagonal = TRUE) { S[lav_matrix_vechru_idx(n = NCOL(S), diagonal = diagonal)] } # return the *vector* indices of the lower triangular elements of a # symmetric matrix of size 'n' lav_matrix_vech_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (n < 100L) { ROW <- matrix(seq_len(n), n, n) COL <- matrix(seq_len(n), n, n, byrow = TRUE) if (diagonal) which(ROW >= COL) else which(ROW > COL) } else { # ldw version if (diagonal) { unlist(lapply( seq_len(n), function(j) (j - 1L) * n + seq.int(j, n) )) } else { unlist(lapply( seq_len(n - 1L), function(j) (j - 1L) * n + seq.int(j + 1L, n) )) } } } # return the *row* indices of the lower triangular elements of a # symmetric matrix of size 'n' lav_matrix_vech_row_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (diagonal) { unlist(lapply(seq_len(n), seq.int, n)) } else { 1 + unlist(lapply(seq_len(n - 1), seq.int, n - 1)) } } # return the *col* indices of the lower triangular elements of a # symmetric matrix of size 'n' lav_matrix_vech_col_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (!diagonal) { n <- n - 1L } rep.int(seq_len(n), times = rev(seq_len(n))) } # return the *vector* indices of the lower triangular elements of a # symmetric matrix of size 'n' -- ROW-WISE lav_matrix_vechr_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (n < 100L) { ROW <- matrix(seq_len(n), n, n) COL <- matrix(seq_len(n), n, n, byrow = TRUE) tmp <- matrix(seq_len(n * n), n, n, byrow = TRUE) if (diagonal) tmp[ROW <= COL] else tmp[ROW < COL] } else { if (diagonal) { unlist(lapply( seq_len(n), function(j) seq.int(1, j) * n - (n - j) )) } else { unlist(lapply( seq_len(n - 1L), function(j) seq.int(1, j) * n - (n - j) + 1 )) } } } # return the *vector* indices of the upper triangular elements of a # symmetric matrix of size 'n' -- COLUMN-WISE lav_matrix_vechu_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (n < 100L) { ROW <- matrix(seq_len(n), n, n) COL <- matrix(seq_len(n), n, n, byrow = TRUE) if (diagonal) which(ROW <= COL) else which(ROW < COL) } else { if (diagonal) { unlist(lapply(seq_len(n), function(j) seq.int(j) + (j - 1) * n)) } else { unlist(lapply(seq_len(n - 1L), function(j) seq.int(j) + j * n)) } } } # return the *vector* indices of the upper triangular elements of a # symmetric matrix of size 'n' -- ROW-WISE lav_matrix_vechru_idx <- function(n = 1L, diagonal = TRUE) { n <- as.integer(n) if (n < 100L) { # FIXME!! make this more efficient (without creating 3 n*n matrices!) ROW <- matrix(seq_len(n), n, n) COL <- matrix(seq_len(n), n, n, byrow = TRUE) tmp <- matrix(seq_len(n * n), n, n, byrow = TRUE) if (diagonal) tmp[ROW >= COL] else tmp[ROW > COL] } else { # ldw version if (diagonal) { unlist(lapply( seq_len(n), function(j) seq.int(j - 1, n - 1) * n + j )) } else { unlist(lapply( seq_len(n - 1L), function(j) seq.int(j, n - 1) * n + j )) } } } # vech.reverse and vechru.reverse (aka `upper2full') # # given the output of vech(S) --or vechru(S) which is identical-- # reconstruct S lav_matrix_vech_reverse <- lav_matrix_vechru_reverse <- lav_matrix_upper2full <- function(x, diagonal = TRUE) { # guess dimensions if (diagonal) { p <- (sqrt(1 + 8 * length(x)) - 1) / 2 } else { p <- (sqrt(1 + 8 * length(x)) + 1) / 2 } S <- numeric(p * p) S[lav_matrix_vech_idx(p, diagonal = diagonal)] <- x S[lav_matrix_vechru_idx(p, diagonal = diagonal)] <- x attr(S, "dim") <- c(p, p) S } # vechr.reverse vechu.reversie (aka `lower2full') # # given the output of vechr(S) --or vechu(S) which is identical-- # reconstruct S lav_matrix_vechr_reverse <- lav_matrix_vechu_reverse <- lav_matrix_lower2full <- function(x, diagonal = TRUE) { # guess dimensions if (diagonal) { p <- (sqrt(1 + 8 * length(x)) - 1) / 2 } else { p <- (sqrt(1 + 8 * length(x)) + 1) / 2 } stopifnot(p == round(p, 0)) S <- numeric(p * p) S[lav_matrix_vechr_idx(p, diagonal = diagonal)] <- x S[lav_matrix_vechu_idx(p, diagonal = diagonal)] <- x attr(S, "dim") <- c(p, p) S } # return the *vector* indices of the diagonal elements of a symmetric # matrix of size 'n' lav_matrix_diag_idx <- function(n = 1L) { # if(n < 1L) return(integer(0L)) 1L + (seq_len(n) - 1L) * (n + 1L) } # return the *vector* indices of the diagonal elements of the LOWER part # of a symmatrix matrix of size 'n' lav_matrix_diagh_idx <- function(n = 1L) { if (n < 1L) { return(integer(0L)) } if (n == 1L) { return(1L) } c(1L, cumsum(n:2L) + 1L) } # return the *vector* indices of the ANTI diagonal elements of a symmetric # matrix of size 'n' lav_matrix_antidiag_idx <- function(n = 1L) { if (n < 1L) { return(integer(0L)) } 1L + seq_len(n) * (n - 1L) } # return the *vector* indices of 'idx' elements in a vech() matrix # # eg if n = 4 and type == "and" and idx = c(2,4) # we create matrix A = # [,1] [,2] [,3] [,4] # [1,] FALSE FALSE FALSE FALSE # [2,] FALSE TRUE FALSE TRUE # [3,] FALSE FALSE FALSE FALSE # [4,] FALSE TRUE FALSE TRUE # # and the result is c(5,7,10) # # eg if n = 4 and type == "or" and idx = c(2,4) # we create matrix A = # [,1] [,2] [,3] [,4] # [1,] FALSE TRUE FALSE TRUE # [2,] TRUE TRUE TRUE TRUE # [3,] FALSE TRUE FALSE TRUE # [4,] TRUE TRUE TRUE TRUE # # and the result is c(2, 4, 5, 6, 7, 9, 10) # lav_matrix_vech_which_idx <- function(n = 1L, diagonal = TRUE, idx = integer(0L), type = "and", add.idx.at.start = FALSE) { if (length(idx) == 0L) { return(integer(0L)) } n <- as.integer(n) A <- matrix(FALSE, n, n) if (type == "and") { A[idx, idx] <- TRUE } else if (type == "or") { A[idx, ] <- TRUE A[, idx] <- TRUE } pstar.idx <- which(lav_matrix_vech(A, diagonal = diagonal)) if (add.idx.at.start) { pstar.idx <- c(idx, pstar.idx + n) } pstar.idx } # similar to lav_matrix_vech_which_idx(), but # - only 'type = and' # - order of idx matters! lav_matrix_vech_match_idx <- function(n = 1L, diagonal = TRUE, idx = integer(0L)) { if (length(idx) == 0L) { return(integer(0L)) } n <- as.integer(n) pstar <- n * (n + 1) / 2 A <- lav_matrix_vech_reverse(seq_len(pstar)) B <- A[idx, idx, drop = FALSE] lav_matrix_vech(B, diagonal = diagonal) } # check if square matrix is diagonal (no tolerance!) lav_matrix_is_diagonal <- function(A = NULL) { A <- as.matrix.default(A) stopifnot(nrow(A) == ncol(A)) diag(A) <- 0 all(A == 0) } # create the duplication matrix (D_n): it 'duplicates' the elements # in vech(S) to create vec(S) (where S is symmetric) # # D %*% vech(S) == vec(S) # # M&N book: pages 48-50 # # note: several flavors: dup1, dup2, dup3, ... # currently used: dup3 # first attempt # dup1: working on the vector indices only .dup1 <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } # dimensions n2 <- n * n nstar <- n * (n + 1) / 2 # THIS is the real bottleneck: allocating an ocean of zeroes... x <- numeric(n2 * nstar) # delta patterns r1 <- seq.int(from = n * n + 1, by = -(n - 1), length.out = n - 1) r2 <- seq.int(from = n - 1, by = n - 1, length.out = n - 1) r3 <- seq.int(from = 2 * n + 1, by = n, length.out = n - 1) # is there a more elegant way to do this? rr <- unlist(lapply( (n - 1):1, function(x) { c(rbind(r1[1:x], r2[1:x]), r3[n - x]) } )) idx <- c(1L, cumsum(rr) + 1L) # create matrix x[idx] <- 1.0 attr(x, "dim") <- c(n2, nstar) x } # second attempt # dup2: working on the row/col matrix indices # (but only create the matrix at the very end) .dup2 <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } nstar <- n * (n + 1) / 2 n2 <- n * n # THIS is the real bottleneck: allocating an ocean of zeroes... x <- numeric(n2 * nstar) idx1 <- lav_matrix_vech_idx(n) + ((1L:nstar) - 1L) * n2 # vector indices idx2 <- lav_matrix_vechru_idx(n) + ((1L:nstar) - 1L) * n2 # vector indices x[idx1] <- 1.0 x[idx2] <- 1.0 attr(x, "dim") <- c(n2, nstar) x } # dup3: using col idx only # D7 <- dup(7L); x<- apply(D7, 1, function(x) which(x > 0)); matrix(x,7,7) .dup3 <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } nstar <- n * (n + 1) / 2 n2 <- n * n # THIS is the real bottleneck: allocating an ocean of zeroes... x <- numeric(n2 * nstar) tmp <- matrix(0L, n, n) tmp[lav_matrix_vech_idx(n)] <- 1:nstar tmp[lav_matrix_vechru_idx(n)] <- 1:nstar idx <- (1:n2) + (lav_matrix_vec(tmp) - 1L) * n2 x[idx] <- 1.0 attr(x, "dim") <- c(n2, nstar) x } # dup4: using Matrix package, returning a sparse matrix # .dup4 <- function(n = 1L) { # if ((n < 1L) || (round(n) != n)) { # stop("n must be a positive integer") # } # # if(n > 255L) { # stop("n is too large") # } # # nstar <- n * (n+1)/2 # #n2 <- n * n # # tmp <- matrix(0L, n, n) # tmp[lav_matrix_vech_idx(n)] <- 1:nstar # tmp[lav_matrix_vechru_idx(n)] <- 1:nstar # # x <- Matrix::sparseMatrix(i = 1:(n*n), j = vec(tmp), x = 1.0) # # x # } # default dup: lav_matrix_duplication <- .dup3 # duplication matrix for correlation matrices: # - it returns a matrix of size p^2 * (p*(p-1))/2 # - the columns corresponding to the diagonal elements have been removed lav_matrix_duplication_cor <- function(n = 1L) { out <- lav_matrix_duplication(n = n) diag.idx <- lav_matrix_diagh_idx(n = n) out[, -diag.idx, drop = FALSE] } # compute t(D) %*% A (without explicitly computing D) # sqrt(nrow(A)) is an integer # A is not symmetric, and not even square, only n^2 ROWS lav_matrix_duplication_pre <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NROW(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE] u <- which(idx1 %in% idx2) OUT[u, ] <- OUT[u, ] / 2.0 OUT } # dupr_pre is faster... lav_matrix_duplication_dup_pre2 <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NROW(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- A[idx1, , drop = FALSE] u <- which(!idx1 %in% idx2) OUT[u, ] <- OUT[u, ] + A[idx2[u], ] OUT } # compute t(D) %*% A (without explicitly computing D) # sqrt(nrow(A)) is an integer # A is not symmetric, and not even square, only n^2 ROWS # correlation version: ignoring diagonal elements lav_matrix_duplication_cor_pre <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NROW(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n, diagonal = FALSE) idx2 <- lav_matrix_vechru_idx(n, diagonal = FALSE) OUT <- A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE] u <- which(idx1 %in% idx2) OUT[u, ] <- OUT[u, ] / 2.0 OUT } # compute A %*% D (without explicitly computing D) # sqrt(ncol(A)) must be an integer # A is not symmetric, and not even square, only n^2 COLUMNS lav_matrix_duplication_post <- function(A = matrix(0, 0, 0)) { # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- A[, idx1, drop = FALSE] + A[, idx2, drop = FALSE] u <- which(idx1 %in% idx2) OUT[, u] <- OUT[, u] / 2.0 OUT } # compute A %*% D (without explicitly computing D) # sqrt(ncol(A)) must be an integer # A is not symmetric, and not even square, only n^2 COLUMNS # correlation version: ignoring the diagonal elements lav_matrix_duplication_cor_post <- function(A = matrix(0, 0, 0)) { # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n, diagonal = FALSE) idx2 <- lav_matrix_vechru_idx(n, diagonal = FALSE) OUT <- A[, idx1, drop = FALSE] + A[, idx2, drop = FALSE] u <- which(idx1 %in% idx2) OUT[, u] <- OUT[, u] / 2.0 OUT } # compute t(D) %*% A %*% D (without explicitly computing D) # A must be a square matrix and sqrt(ncol) an integer lav_matrix_duplication_pre_post <- function(A = matrix(0, 0, 0)) { # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(NROW(A) == n2, sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE] u <- which(idx1 %in% idx2) OUT[u, ] <- OUT[u, ] / 2.0 OUT <- OUT[, idx1, drop = FALSE] + OUT[, idx2, drop = FALSE] OUT[, u] <- OUT[, u] / 2.0 OUT } # compute t(D) %*% A %*% D (without explicitly computing D) # A must be a square matrix and sqrt(ncol) an integer # correlation version: ignoring diagonal elements lav_matrix_duplication_cor_pre_post <- function(A = matrix(0, 0, 0)) { # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(NROW(A) == n2, sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # dup idx idx1 <- lav_matrix_vech_idx(n, diagonal = FALSE) idx2 <- lav_matrix_vechru_idx(n, diagonal = FALSE) OUT <- A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE] u <- which(idx1 %in% idx2) OUT[u, ] <- OUT[u, ] / 2.0 OUT <- OUT[, idx1, drop = FALSE] + OUT[, idx2, drop = FALSE] OUT[, u] <- OUT[, u] / 2.0 OUT } # create the elimination matrix L_n: # it removes the duplicated elements in vec(A) to create vech(A) # even if A is not symmetric # # L %*% vec(A) == vech(A) lav_matrix_elimination <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } nstar <- n * (n + 1) / 2 n2 <- n * n # THIS is the real bottleneck: allocating an ocean of zeroes... L <- matrix(0, nrow = nstar, ncol = n2) L[ cbind(seq_len(nstar), lav_matrix_vech_idx(n)) ] <- 1 L } # compute L %*% A (without explicitly computing L) # sqrt(nrow(A)) is an integer # A is not symmetric, and not even square, only n^2 ROWS lav_matrix_elimination_pre <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NROW(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # select vech idx rows idx <- lav_matrix_vech_idx(n) OUT <- A[idx, , drop = FALSE] OUT } # compute A %*% t(L)(without explicitly computing L) # sqrt(nrow(A)) is an integer # A is not symmetric, and not even square, only n^2 COLS lav_matrix_elimination_post <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NCOL(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # select vech idx rows idx <- lav_matrix_vech_idx(n) OUT <- A[, idx, drop = FALSE] OUT } # compute L %*% A %*% t(L) (without explicitly computing L) # A must be a square matrix and sqrt(ncol) an integer lav_matrix_elimination_pre_post <- function(A = matrix(0, 0, 0)) { # number of rows n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(NROW(A) == n2, sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # select vech idx rows idx <- lav_matrix_vech_idx(n) OUT <- A[idx, idx, drop = FALSE] OUT } # create the generalized inverse of the duplication matrix (D^+_n): # it removes the duplicated elements in vec(S) to create vech(S) # if S is symmetric # # D^+ %*% vec(S) == vech(S) # # M&N book: page 49 # # D^+ == solve(t(D_n %*% D_n) %*% t(D_n) # create first t(DUP.ginv) .dup_ginv1 <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } nstar <- n * (n + 1) / 2 n2 <- n * n # THIS is the real bottleneck: allocating an ocean of zeroes... x <- numeric(nstar * n2) tmp <- matrix(1:(n * n), n, n) idx1 <- lav_matrix_vech(tmp) + (0:(nstar - 1L)) * n2 x[idx1] <- 0.5 idx2 <- lav_matrix_vechru(tmp) + (0:(nstar - 1L)) * n2 x[idx2] <- 0.5 idx3 <- lav_matrix_diag_idx(n) + (lav_matrix_diagh_idx(n) - 1L) * n2 x[idx3] <- 1.0 attr(x, "dim") <- c(n2, nstar) x <- t(x) x } # create DUP.ginv without transpose .dup_ginv2 <- function(n = 1L) { if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } if (n > 255L) { lav_msg_stop(gettext("n is too large")) } nstar <- n * (n + 1) / 2 n2 <- n * n # THIS is the real bottleneck: allocating an ocean of zeroes... x <- numeric(nstar * n2) x[(lav_matrix_vech_idx(n) - 1L) * nstar + 1:nstar] <- 0.5 x[(lav_matrix_vechru_idx(n) - 1L) * nstar + 1:nstar] <- 0.5 x[(lav_matrix_diag_idx(n) - 1L) * nstar + lav_matrix_diagh_idx(n)] <- 1.0 attr(x, "dim") <- c(nstar, n2) x } lav_matrix_duplication_ginv <- .dup_ginv2 # pre-multiply with D^+ # number of rows in A must be 'square' (n*n) lav_matrix_duplication_ginv_pre <- function(A = matrix(0, 0, 0)) { A <- as.matrix.default(A) # number of rows n2 <- NROW(A) # square nrow(A) only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) nstar <- n * (n + 1) / 2 idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- (A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE]) / 2 OUT } # post-multiply with t(D^+) # number of columns in A must be 'square' (n*n) lav_matrix_duplication_ginv_post <- function(A = matrix(0, 0, 0)) { A <- as.matrix.default(A) # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- (A[, idx1, drop = FALSE] + A[, idx2, drop = FALSE]) / 2 OUT } # pre AND post-multiply with D^+: D^+ %*% A %*% t(D^+) # for square matrices only, with ncol = nrow = n^2 lav_matrix_duplication_ginv_pre_post <- function(A = matrix(0, 0, 0)) { A <- as.matrix.default(A) # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(NROW(A) == n2, sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) idx1 <- lav_matrix_vech_idx(n) idx2 <- lav_matrix_vechru_idx(n) OUT <- (A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE]) / 2 OUT <- (OUT[, idx1, drop = FALSE] + OUT[, idx2, drop = FALSE]) / 2 OUT } # pre AND post-multiply with D^+: D^+ %*% A %*% t(D^+) # for square matrices only, with ncol = nrow = n^2 # - ignoring diagonal elements lav_matrix_duplication_ginv_cor_pre_post <- function(A = matrix(0, 0, 0)) { A <- as.matrix.default(A) # number of columns n2 <- NCOL(A) # square A only, n2 = n^2 stopifnot(NROW(A) == n2, sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) idx1 <- lav_matrix_vech_idx(n, diagonal = FALSE) idx2 <- lav_matrix_vechru_idx(n, diagonal = FALSE) OUT <- (A[idx1, , drop = FALSE] + A[idx2, , drop = FALSE]) / 2 OUT <- (OUT[, idx1, drop = FALSE] + OUT[, idx2, drop = FALSE]) / 2 OUT } # create the commutation matrix (K_mn) # the mn x mx commutation matrix is a permutation matrix which # transforms vec(A) into vec(A') # # K_mn %*% vec(A) == vec(A') # # (in Henderson & Searle 1979, it is called the vec-permutation matrix) # M&N book: pages 46-48 # # note: K_mn is a permutation matrix, so it is orthogonal: t(K_mn) = K_mn^-1 # K_nm %*% K_mn == I_mn # # it is called the 'commutation' matrix because it enables us to interchange # ('commute') the two matrices of a Kronecker product, eg # K_pm (A %x% B) K_nq == (B %x% A) # # important property: it allows us to transform a vec of a Kronecker product # into the Kronecker product of the vecs (if A is m x n and B is p x q): # vec(A %x% B) == (I_n %x% K_qm %x% I_p)(vec A %x% vec B) # first attempt .com1 <- function(m = 1L, n = 1L) { if ((m < 1L) || (round(m) != m)) { lav_msg_stop(gettext("n must be a positive integer")) } if ((n < 1L) || (round(n) != n)) { lav_msg_stop(gettext("n must be a positive integer")) } p <- m * n x <- numeric(p * p) pattern <- rep(c(rep((m + 1L) * n, (m - 1L)), n + 1L), n) idx <- c(1L, 1L + cumsum(pattern)[-p]) x[idx] <- 1.0 attr(x, "dim") <- c(p, p) x } lav_matrix_commutation <- .com1 # compute K_n %*% A without explicitly computing K # K_n = K_nn, so sqrt(nrow(A)) must be an integer! # = permuting the rows of A lav_matrix_commutation_pre <- function(A = matrix(0, 0, 0)) { A <- as.matrix(A) # number of rows of A n2 <- nrow(A) # K_nn only (n2 = m * n) stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # compute row indices # row.idx <- as.integer(t(matrix(1:n2, n, n))) row.idx <- rep(1:n, each = n) + (0:(n - 1L)) * n OUT <- A[row.idx, , drop = FALSE] OUT } # compute A %*% K_n without explicitly computing K # K_n = K_nn, so sqrt(ncol(A)) must be an integer! # = permuting the columns of A lav_matrix_commutation_post <- function(A = matrix(0, 0, 0)) { A <- as.matrix(A) # number of columns of A n2 <- ncol(A) # K_nn only (n2 = m * n) stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # compute col indices # row.idx <- as.integer(t(matrix(1:n2, n, n))) col.idx <- rep(1:n, each = n) + (0:(n - 1L)) * n OUT <- A[, col.idx, drop = FALSE] OUT } # compute K_n %*% A %*% K_n without explicitly computing K # K_n = K_nn, so sqrt(ncol(A)) must be an integer! # = permuting both the rows AND columns of A lav_matrix_commutation_pre_post <- function(A = matrix(0, 0, 0)) { A <- as.matrix(A) # number of columns of A n2 <- NCOL(A) # K_nn only (n2 = m * n) stopifnot(sqrt(n2) == round(sqrt(n2))) # dimension n <- sqrt(n2) # compute col indices row.idx <- rep(1:n, each = n) + (0:(n - 1L)) * n col.idx <- row.idx OUT <- A[row.idx, col.idx, drop = FALSE] OUT } # compute K_mn %*% A without explicitly computing K # = permuting the rows of A lav_matrix_commutation_mn_pre <- function(A, m = 1L, n = 1L) { # number of rows of A mn <- NROW(A) stopifnot(mn == m * n) # compute row indices # row.idx <- as.integer(t(matrix(1:mn, m, n))) row.idx <- rep(1:m, each = n) + (0:(n - 1L)) * m OUT <- A[row.idx, , drop = FALSE] OUT } # N_n == 1/2 (I_n^2 + K_nn) # see MN page 48 # # N_n == D_n %*% D^+_n # lav_matrix_commutation_Nn <- function(n = 1L) { lav_msg_stop(gettext("not implemented yet")) } # (simplified) kronecker product for square matrices lav_matrix_kronecker_square <- function(A, check = TRUE) { dimA <- dim(A) n <- dimA[1L] n2 <- n * n if (check) { stopifnot(dimA[2L] == n) } # all possible combinations out <- tcrossprod(as.vector(A)) # break up in n*n pieces, and rearrange dim(out) <- c(n, n, n, n) out <- aperm(out, perm = c(3, 1, 4, 2)) # reshape again, to form n2 x n2 matrix dim(out) <- c(n2, n2) out } # (simplified) faster kronecker product for symmetric matrices # note: not faster, but the logic extends to vech versions lav_matrix_kronecker_symmetric <- function(S, check = TRUE) { dimS <- dim(S) n <- dimS[1L] n2 <- n * n if (check) { stopifnot(dimS[2L] == n) } # all possible combinations out <- tcrossprod(as.vector(S)) # break up in n*(n*n) pieces, and rearrange dim(out) <- c(n, n * n, n) out <- aperm(out, perm = c(3L, 2L, 1L)) # reshape again, to form n2 x n2 matrix dim(out) <- c(n2, n2) out } # shortcut for the idiom 't(S2) %*% (S %x% S) %*% S2' # where S is symmetric, and the rows of S2 correspond to # the elements of S # eg - S2 = DELTA (the jacobian dS/dtheta) lav_matrix_tS2_SxS_S2 <- function(S2, S, check = TRUE) { # size of S n <- NROW(S) if (check) { stopifnot(NROW(S2) == n * n) } A <- matrix(S %*% matrix(S2, n, ), n * n, ) A2 <- A[rep(1:n, each = n) + (0:(n - 1L)) * n, , drop = FALSE] crossprod(A, A2) } # shortcut for the idiom 't(D) %*% (S %x% S) %*% D' # where S is symmetric, and D is the duplication matrix # lav_matrix_tD_SxS_D <- function(S) { # TODO!! # } # square root of a positive definite symmetric matrix lav_matrix_symmetric_sqrt <- function(S = matrix(0, 0, 0)) { n <- NROW(S) # eigen decomposition, assume symmetric matrix S.eigen <- eigen(S, symmetric = TRUE) V <- S.eigen$vectors d <- S.eigen$values # 'fix' slightly negative tiny numbers d[d < 0] <- 0.0 # sqrt the eigenvalues and reconstruct S.sqrt <- V %*% diag(sqrt(d), n, n) %*% t(V) S.sqrt } # orthogonal complement of a matrix A # see Satorra (1992). Sociological Methodology, 22, 249-278, footnote 3: # # To compute such an orthogonal matrix, consider the p* x p* matrix P = I - # A(A'A)^-1A', which is idempotent of rank p* - q. Consider the singular value # decomposition P = HVH', where H is a p* x (p* - q) matrix of full column rank, # and V is a (p* - q) x (p* - q) diagonal matrix. It is obvious that H'A = 0; # hence, H is the desired orthogonal complement. This method of constructing an # orthogonal complement was proposed by Heinz Neudecker (1990, pers. comm.). # # update YR 21 okt 2014: # - note that A %*% solve(t(A) %*% A) %*% t(A) == tcrossprod(qr.Q(qr(A))) # - if we are using qr, we can as well use qr.Q to get the complement # lav_matrix_orthogonal_complement <- function(A = matrix(0, 0, 0)) { QR <- qr(A) ranK <- QR$rank # following Heinz Neudecker: # n <- nrow(A) # P <- diag(n) - tcrossprod(qr.Q(QR)) # OUT <- svd(P)$u[, seq_len(n - ranK), drop = FALSE] Q <- qr.Q(QR, complete = TRUE) # get rid of the first ranK columns OUT <- Q[, -seq_len(ranK), drop = FALSE] OUT } # construct block diagonal matrix from a list of matrices # ... can contain multiple arguments, which will be coerced to a list # or it can be a single list (of matrices) lav_matrix_bdiag <- function(...) { if (nargs() == 0L) { return(matrix(0, 0, 0)) } dots <- list(...) # create list of matrices if (is.list(dots[[1]])) { mlist <- dots[[1]] } else { mlist <- dots } if (length(mlist) == 1L) { return(mlist[[1]]) } # more than 1 matrix nmat <- length(mlist) nrows <- sapply(mlist, NROW) crows <- cumsum(nrows) ncols <- sapply(mlist, NCOL) ccols <- cumsum(ncols) trows <- sum(nrows) tcols <- sum(ncols) x <- numeric(trows * tcols) for (m in seq_len(nmat)) { if (m > 1L) { rcoffset <- trows * ccols[m - 1] + crows[m - 1] } else { rcoffset <- 0L } m.idx <- (rep((0:(ncols[m] - 1L)) * trows, each = nrows[m]) + rep(1:nrows[m], ncols[m]) + rcoffset) x[m.idx] <- mlist[[m]] } attr(x, "dim") <- c(trows, tcols) x } # trace of a single square matrix, or the trace of a product of (compatible) # matrices resulting in a single square matrix lav_matrix_trace <- function(..., check = TRUE) { if (nargs() == 0L) { return(as.numeric(NA)) } dots <- list(...) # create list of matrices if (is.list(dots[[1]])) { mlist <- dots[[1]] } else { mlist <- dots } # number of matrices nMat <- length(mlist) # single matrix if (nMat == 1L) { S <- mlist[[1]] if (check) { # check if square stopifnot(NROW(S) == NCOL(S)) } out <- sum(S[lav_matrix_diag_idx(n = NROW(S))]) } else if (nMat == 2L) { # dimension check is done by '*' out <- sum(mlist[[1]] * t(mlist[[2]])) } else if (nMat == 3L) { A <- mlist[[1]] B <- mlist[[2]] C <- mlist[[3]] # A, B, C # below is the logic; to be coded inline # DIAG <- numeric( NROW(A) ) # for(i in seq_len(NROW(A))) { # DIAG[i] <- sum( rep(A[i,], times = NCOL(B)) * # as.vector(B) * # rep(C[,i], each=NROW(B)) ) # } # out <- sum(DIAG) # FIXME: # dimension check is automatic B2 <- B %*% C out <- sum(A * t(B2)) } else { # nRows <- sapply(mlist, NROW) # nCols <- sapply(mlist, NCOL) # check if product is ok # stopifnot(all(nCols[seq_len(nMat-1L)] == nRows[2:nMat])) # check if product is square # stopifnot(nRows[1] == nCols[nMat]) M1 <- mlist[[1]] M2 <- mlist[[2]] for (m in 3L:nMat) { M2 <- M2 %*% mlist[[m]] } out <- sum(M1 * t(M2)) } out } # crossproduct, but handling NAs pairwise, if needed # otherwise, just call base::crossprod lav_matrix_crossprod <- function(A, B) { # single argument? if (missing(B)) { if (!anyNA(A)) { return(base::crossprod(A)) } B <- A # no missings? } else if (!anyNA(A) && !anyNA(B)) { return(base::crossprod(A, B)) } # A and B must be matrices if (!inherits(A, "matrix")) { A <- matrix(A) } if (!inherits(B, "matrix")) { B <- matrix(B) } out <- apply(B, 2L, function(x) colSums(A * x, na.rm = TRUE)) # only when A is a vector, and B is a matrix, we get back a vector # while the result should be a matrix with 1-row if (!is.matrix(out)) { out <- t(matrix(out)) } out } # reduced row echelon form of A lav_matrix_rref <- function(A, tol = sqrt(.Machine$double.eps)) { # MATLAB documentation says rref uses: tol = (max(size(A))*eps *norm(A,inf) if (missing(tol)) { A.norm <- max(abs(apply(A, 1, sum))) tol <- max(dim(A)) * A.norm * .Machine$double.eps } # check if A is a matrix stopifnot(is.matrix(A)) # dimensions nRow <- NROW(A) nCol <- NCOL(A) pivot <- integer(0L) # catch empty matrix if (nRow == 0 && nCol == 0) { return(matrix(0, 0, 0)) } rowIndex <- colIndex <- 1 while (rowIndex <= nRow && colIndex <= nCol) { # look for largest (in absolute value) element in this column: i.below <- which.max(abs(A[rowIndex:nRow, colIndex])) i <- i.below + rowIndex - 1L p <- A[i, colIndex] # check if column is empty if (abs(p) <= tol) { A[rowIndex:nRow, colIndex] <- 0L # clean up colIndex <- colIndex + 1 } else { # store pivot column pivot <- c(pivot, colIndex) # do we need to swap column? if (rowIndex != i) { A[c(rowIndex, i), colIndex:nCol] <- A[c(i, rowIndex), colIndex:nCol] } # scale pivot to be 1.0 A[rowIndex, colIndex:nCol] <- A[rowIndex, colIndex:nCol] / p # create zeroes below and above pivot other <- seq_len(nRow)[-rowIndex] A[other, colIndex:nCol] <- A[other, colIndex:nCol] - tcrossprod( A[other, colIndex], A[rowIndex, colIndex:nCol] ) # next row/col rowIndex <- rowIndex + 1 colIndex <- colIndex + 1 } } # rounding? list(R = A, pivot = pivot) } # non-orthonoramal (left) null space basis, using rref lav_matrix_orthogonal_complement2 <- function( A, tol = sqrt(.Machine$double.eps)) { # left A <- t(A) # compute rref out <- lav_matrix_rref(A = A, tol = tol) # number of free columns in R (if any) nfree <- NCOL(A) - length(out$pivot) if (nfree) { R <- out$R # remove all-zero rows zero.idx <- which(apply(R, 1, function(x) { all(abs(x) < tol) })) if (length(zero.idx) > 0) { R <- R[-zero.idx, , drop = FALSE] } FREE <- R[, -out$pivot, drop = FALSE] I <- diag(nfree) N <- rbind(-FREE, I) } else { N <- matrix(0, nrow = NCOL(A), ncol = 0L) } N } # inverse of a non-singular (not necessarily positive-definite) symmetric matrix # FIXME: error handling? lav_matrix_symmetric_inverse <- function(S, logdet = FALSE, Sinv.method = "eigen", zero.warn = FALSE) { # catch zero cols/rows zero.idx <- which(colSums(S) == 0 & diag(S) == 0 & rowSums(S) == 0) S.orig <- S if (length(zero.idx) > 0L) { if (zero.warn) { lav_msg_warn(gettext("matrix to be inverted contains zero cols/rows")) } S <- S[-zero.idx, -zero.idx, drop = FALSE] } P <- NCOL(S) if (P == 0L) { S.inv <- matrix(0, 0, 0) if (logdet) { attr(S.inv, "logdet") <- 0 } return(S.inv) } else if (P == 1L) { tmp <- S[1, 1] S.inv <- matrix(1 / tmp, 1, 1) if (logdet) { if (tmp > 0) { attr(S.inv, "logdet") <- log(tmp) } else { attr(S.inv, "logdet") <- -Inf } } } else if (P == 2L) { a11 <- S[1, 1] a12 <- S[1, 2] a21 <- S[2, 1] a22 <- S[2, 2] tmp <- a11 * a22 - a12 * a21 if (tmp == 0) { S.inv <- matrix(c(Inf, Inf, Inf, Inf), 2, 2) if (logdet) { attr(S.inv, "logdet") <- -Inf } } else { S.inv <- matrix(c(a22 / tmp, -a21 / tmp, -a12 / tmp, a11 / tmp), 2, 2) if (logdet) { if (tmp > 0) { attr(S.inv, "logdet") <- log(tmp) } else { attr(S.inv, "logdet") <- -Inf } } } } else if (Sinv.method == "eigen") { EV <- eigen(S, symmetric = TRUE) # V %*% diag(1/d) %*% V^{-1}, where V^{-1} = V^T S.inv <- tcrossprod( EV$vectors / rep(EV$values, each = length(EV$values)), EV$vectors ) # 0.5 version # S.inv <- tcrossprod(sweep(EV$vectors, 2L, # STATS = (1/EV$values), FUN="*"), EV$vectors) if (logdet) { if (all(EV$values >= 0)) { attr(S.inv, "logdet") <- sum(log(EV$values)) } else { attr(S.inv, "logdet") <- as.numeric(NA) } } } else if (Sinv.method == "solve") { S.inv <- solve.default(S) if (logdet) { ev <- eigen(S, symmetric = TRUE, only.values = TRUE) if (all(ev$values >= 0)) { attr(S.inv, "logdet") <- sum(log(ev$values)) } else { attr(S.inv, "logdet") <- as.numeric(NA) } } } else if (Sinv.method == "chol") { # this will break if S is not positive definite cS <- chol.default(S) S.inv <- chol2inv(cS) if (logdet) { diag.cS <- diag(cS) attr(S.inv, "logdet") <- sum(log(diag.cS * diag.cS)) } } else { lav_msg_stop(gettext("method must be either `eigen', `solve' or `chol'")) } if (length(zero.idx) > 0L) { logdet <- attr(S.inv, "logdet") tmp <- S.orig tmp[-zero.idx, -zero.idx] <- S.inv S.inv <- tmp attr(S.inv, "logdet") <- logdet attr(S.inv, "zero.idx") <- zero.idx } S.inv } # update inverse of A, after removing 1 or more rows (and corresponding # colums) from A # # - this is just an application of the inverse of partitioned matrices # - only removal for now # lav_matrix_inverse_update <- function(A.inv, rm.idx = integer(0L)) { ndel <- length(rm.idx) # rank-1 update if (ndel == 1L) { a <- A.inv[-rm.idx, rm.idx, drop = FALSE] b <- A.inv[rm.idx, -rm.idx, drop = FALSE] h <- A.inv[rm.idx, rm.idx] out <- A.inv[-rm.idx, -rm.idx, drop = FALSE] - (a %*% b) / h } # rank-n update else if (ndel < NCOL(A.inv)) { A <- A.inv[-rm.idx, rm.idx, drop = FALSE] B <- A.inv[rm.idx, -rm.idx, drop = FALSE] H <- A.inv[rm.idx, rm.idx, drop = FALSE] out <- A.inv[-rm.idx, -rm.idx, drop = FALSE] - A %*% solve.default(H, B) # erase all col/rows... } else if (ndel == NCOL(A.inv)) { out <- matrix(0, 0, 0) } out } # update inverse of S, after removing 1 or more rows (and corresponding # colums) from S, a symmetric matrix # # - only removal for now! # lav_matrix_symmetric_inverse_update <- function(S.inv, rm.idx = integer(0L), logdet = FALSE, S.logdet = NULL) { ndel <- length(rm.idx) if (ndel == 0L) { out <- S.inv if (logdet) { attr(out, "logdet") <- S.logdet } } # rank-1 update else if (ndel == 1L) { h <- S.inv[rm.idx, rm.idx] a <- S.inv[-rm.idx, rm.idx, drop = FALSE] / sqrt(h) out <- S.inv[-rm.idx, -rm.idx, drop = FALSE] - tcrossprod(a) if (logdet) { attr(out, "logdet") <- S.logdet + log(h) } } # rank-n update else if (ndel < NCOL(S.inv)) { A <- S.inv[rm.idx, -rm.idx, drop = FALSE] H <- S.inv[rm.idx, rm.idx, drop = FALSE] out <- (S.inv[-rm.idx, -rm.idx, drop = FALSE] - crossprod(A, solve.default(H, A))) if (logdet) { # cH <- chol.default(Re(H)); diag.cH <- diag(cH) # H.logdet <- sum(log(diag.cH * diag.cH)) H.logdet <- log(det(H)) attr(out, "logdet") <- S.logdet + H.logdet } # erase all col/rows... } else if (ndel == NCOL(S.inv)) { out <- matrix(0, 0, 0) } else { lav_msg_stop(gettext("column indices exceed number of columns in S.inv")) } out } # update determinant of A, after removing 1 or more rows (and corresponding # colums) from A # lav_matrix_det_update <- function(det.A, A.inv, rm.idx = integer(0L)) { ndel <- length(rm.idx) # rank-1 update if (ndel == 1L) { h <- A.inv[rm.idx, rm.idx] out <- det.A * h } # rank-n update else if (ndel < NCOL(A.inv)) { H <- A.inv[rm.idx, rm.idx, drop = FALSE] det.H <- det(H) out <- det.A * det.H # erase all col/rows... } else if (ndel == NCOL(A.inv)) { out <- matrix(0, 0, 0) } out } # update determinant of S, after removing 1 or more rows (and corresponding # colums) from S, a symmetric matrix # lav_matrix_symmetric_det_update <- function(det.S, S.inv, rm.idx = integer(0L)) { ndel <- length(rm.idx) # rank-1 update if (ndel == 1L) { h <- S.inv[rm.idx, rm.idx] out <- det.S * h } # rank-n update else if (ndel < NCOL(S.inv)) { H <- S.inv[rm.idx, rm.idx, drop = FALSE] cH <- chol.default(H) diag.cH <- diag(cH) det.H <- prod(diag.cH * diag.cH) out <- det.S * det.H # erase all col/rows... } else if (ndel == NCOL(S.inv)) { out <- numeric(0L) } out } # update log determinant of S, after removing 1 or more rows (and corresponding # colums) from S, a symmetric matrix # lav_matrix_symmetric_logdet_update <- function(S.logdet, S.inv, rm.idx = integer(0L)) { ndel <- length(rm.idx) # rank-1 update if (ndel == 1L) { h <- S.inv[rm.idx, rm.idx] out <- S.logdet + log(h) } # rank-n update else if (ndel < NCOL(S.inv)) { H <- S.inv[rm.idx, rm.idx, drop = FALSE] cH <- chol.default(H) diag.cH <- diag(cH) H.logdet <- sum(log(diag.cH * diag.cH)) out <- S.logdet + H.logdet # erase all col/rows... } else if (ndel == NCOL(S.inv)) { out <- numeric(0L) } out } # compute `lambda': the smallest root of the determinantal equation # |M - lambda*P| = 0 (see Fuller 1987, p.125 or p.172 # # the function allows for zero rows/columns in P, by regressing them out # this approach was suggested to me by Wayne A. Fuller, personal communication, # 12 Nov 2020 # lav_matrix_symmetric_diff_smallest_root <- function(M = NULL, P = NULL) { # check input (we will 'assume' they are square and symmetric) stopifnot(is.matrix(M), is.matrix(P)) # check if P is diagonal or not PdiagFlag <- FALSE tmp <- P diag(tmp) <- 0 if (all(abs(tmp) < sqrt(.Machine$double.eps))) { PdiagFlag <- TRUE } # diagonal elements of P nP <- nrow(P) diagP <- P[lav_matrix_diag_idx(nP)] # force diagonal elements of P to be nonnegative (warn?) neg.idx <- which(diagP < 0) if (length(neg.idx) > 0L) { lav_msg_warn(gettext( "some diagonal elements of P are negative (and set to zero)")) diag(P)[neg.idx] <- diagP[neg.idx] <- 0 } # check for (near)zero diagonal elements zero.idx <- which(abs(diagP) < sqrt(.Machine$double.eps)) # three cases: # 1. all elements are zero (P=0) -> lambda = 0 # 2. no elements are zero # 3. some elements are zero -> regress out # 1. all elements are zero if (length(zero.idx) == nP) { return(0.0) } # 2. no elements are zero else if (length(zero.idx) == 0L) { if (PdiagFlag) { Ldiag <- 1 / sqrt(diagP) LML <- t(Ldiag * M) * Ldiag } else { L <- solve(lav_matrix_symmetric_sqrt(P)) LML <- L %*% M %*% t(L) } # compute lambda lambda <- eigen(LML, symmetric = TRUE, only.values = TRUE)$values[nP] # 3. some elements are zero } else { # regress out M-block corresponding to zero diagonal elements in P # partition M accordingly: p = positive, n = negative M.pp <- M[-zero.idx, -zero.idx, drop = FALSE] M.pn <- M[-zero.idx, zero.idx, drop = FALSE] M.np <- M[zero.idx, -zero.idx, drop = FALSE] M.nn <- M[zero.idx, zero.idx, drop = FALSE] # create Mp.n Mp.n <- M.pp - M.pn %*% solve(M.nn) %*% M.np # extract positive part of P P.p <- P[-zero.idx, -zero.idx, drop = FALSE] # compute smallest root if (PdiagFlag) { diagPp <- diag(P.p) Ldiag <- 1 / sqrt(diagPp) LML <- t(Ldiag * Mp.n) * Ldiag } else { L <- solve(lav_matrix_symmetric_sqrt(P.p)) LML <- L %*% Mp.n %*% t(L) } lambda <- eigen(LML, symmetric = TRUE, only.values = TRUE )$values[nrow(P.p)] } lambda } # force a symmetric matrix to be positive definite # simple textbook version (see Matrix::nearPD for a more sophisticated version) # lav_matrix_symmetric_force_pd <- function(S, tol = 1e-06) { if (ncol(S) == 1L) { return(matrix(max(S[1, 1], tol), 1L, 1L)) } # eigen decomposition S.eigen <- eigen(S, symmetric = TRUE) # eigen values ev <- S.eigen$values # replace small/negative eigen values ev[ev / abs(ev[1]) < tol] <- tol * abs(ev[1]) # reconstruct out <- S.eigen$vectors %*% diag(ev) %*% t(S.eigen$vectors) out } # compute sample covariance matrix, divided by 'N' (not N-1, as in cov) # # Mu is not supposed to be ybar, but close # if provided, we compute S as 1/N*crossprod(Y - Mu) instead of # 1/N*crossprod(Y - ybar) lav_matrix_cov <- function(Y, Mu = NULL) { N <- NROW(Y) S1 <- stats::cov(Y) # uses a corrected two-pass algorithm S <- S1 * (N - 1) / N # Mu? if (!is.null(Mu)) { P <- NCOL(Y) ybar <- base::.colMeans(Y, m = N, n = P) S <- S + tcrossprod(ybar - Mu) } S } # transform a matrix to match a given target mean/covariance lav_matrix_transform_mean_cov <- function(Y, target.mean = numeric(NCOL(Y)), target.cov = diag(NCOL(Y))) { # coerce to matrix Y <- as.matrix.default(Y) # convert to vector target.mean <- as.vector(target.mean) S <- lav_matrix_cov(Y) S.inv <- solve.default(S) S.inv.sqrt <- lav_matrix_symmetric_sqrt(S.inv) target.cov.sqrt <- lav_matrix_symmetric_sqrt(target.cov) # transform cov X <- Y %*% S.inv.sqrt %*% target.cov.sqrt # shift mean xbar <- colMeans(X) X <- t(t(X) - xbar + target.mean) X } # weighted column means # # for each column in Y: mean = sum(wt * Y)/sum(wt) # # if we have missing values, we use only the observations and weights # that are NOT missing # lav_matrix_mean_wt <- function(Y, wt = NULL) { Y <- unname(as.matrix.default(Y)) DIM <- dim(Y) if (is.null(wt)) { return(colMeans(Y, na.rm = TRUE)) } if (anyNA(Y)) { WT <- wt * !is.na(Y) wN <- .colSums(WT, m = DIM[1], n = DIM[2]) out <- .colSums(wt * Y, m = DIM[1], n = DIM[2], na.rm = TRUE) / wN } else { out <- .colSums(wt * Y, m = DIM[1], n = DIM[2]) / sum(wt) } out } # weighted column variances # # for each column in Y: var = sum(wt * (Y - w.mean(Y))^2) / N # # where N = sum(wt) - 1 (method = "unbiased") assuming wt are frequency weights # or N = sum(wt) (method = "ML") # # Note: another approach (when the weights are 'reliability weights' is to # use N = sum(wt) - sum(wt^2)/sum(wt) (not implemented here) # # if we have missing values, we use only the observations and weights # that are NOT missing # lav_matrix_var_wt <- function(Y, wt = NULL, method = c("unbiased", "ML")) { Y <- unname(as.matrix.default(Y)) DIM <- dim(Y) if (is.null(wt)) { wt <- rep(1, nrow(Y)) } if (anyNA(Y)) { WT <- wt * !is.na(Y) wN <- .colSums(WT, m = DIM[1], n = DIM[2]) w.mean <- .colSums(wt * Y, m = DIM[1], n = DIM[2], na.rm = TRUE) / wN Ytc <- t(t(Y) - w.mean) tmp <- .colSums(wt * Ytc * Ytc, m = DIM[1], n = DIM[2], na.rm = TRUE) out <- switch(match.arg(method), unbiased = tmp / (wN - 1), ML = tmp / wN ) } else { w.mean <- .colSums(wt * Y, m = DIM[1], n = DIM[2]) / sum(wt) Ytc <- t(t(Y) - w.mean) tmp <- .colSums(wt * Ytc * Ytc, m = DIM[1], n = DIM[2]) out <- switch(match.arg(method), unbiased = tmp / (sum(wt) - 1), ML = tmp / sum(wt) ) } out } # weighted variance-covariance matrix # # always dividing by sum(wt) (for now) (=ML version) # # if we have missing values, we use only the observations and weights # that are NOT missing # # same as cov.wt(Y, wt, method = "ML") # lav_matrix_cov_wt <- function(Y, wt = NULL) { Y <- unname(as.matrix.default(Y)) DIM <- dim(Y) if (is.null(wt)) { wt <- rep(1, nrow(Y)) } if (anyNA(Y)) { tmp <- na.omit(cbind(Y, wt)) Y <- tmp[, seq_len(DIM[2]), drop = FALSE] wt <- tmp[, DIM[2] + 1L] DIM[1] <- nrow(Y) w.mean <- .colSums(wt * Y, m = DIM[1], n = DIM[2]) / sum(wt) Ytc <- t(t(Y) - w.mean) tmp <- crossprod(sqrt(wt) * Ytc) out <- tmp / sum(wt) } else { w.mean <- .colSums(wt * Y, m = DIM[1], n = DIM[2]) / sum(wt) Ytc <- t(t(Y) - w.mean) tmp <- crossprod(sqrt(wt) * Ytc) out <- tmp / sum(wt) } out } # compute (I-A)^{-1} where A is square # using a (truncated) Neumann series: (I-A)^{-1} = \sum_k=0^{\infty} A^k # = I + A + A^2 + A^3 + ... # # note: this only works if the largest eigenvalue for A is < 1; but if A # represents regressions, the diagonal will be zero, and all eigenvalues # are zero # # as A is typically sparse, we can stop if all elements in A^k are zero for, # say, k<=6 lav_matrix_inverse_iminus <- function(A = NULL) { nr <- nrow(A) nc <- ncol(A) stopifnot(nr == nc) # create I + A IA <- A diag.idx <- lav_matrix_diag_idx(nr) IA[diag.idx] <- IA[diag.idx] + 1 # initial approximation IA.inv <- IA # first order A2 <- A %*% A if (all(A2 == 0)) { # we are done return(IA.inv) } else { IA.inv <- IA.inv + A2 } # second order A3 <- A2 %*% A if (all(A3 == 0)) { # we are done return(IA.inv) } else { IA.inv <- IA.inv + A3 } # third order A4 <- A3 %*% A if (all(A4 == 0)) { # we are done return(IA.inv) } else { IA.inv <- IA.inv + A4 } # fourth order A5 <- A4 %*% A if (all(A5 == 0)) { # we are done return(IA.inv) } else { IA.inv <- IA.inv + A5 } # fifth order A6 <- A5 %*% A if (all(A6 == 0)) { # we are done return(IA.inv) } else { # naive version (for now) tmp <- -A tmp[diag.idx] <- tmp[diag.idx] + 1 IA.inv <- solve(tmp) return(IA.inv) } } lavaan/R/lav_object_methods.R0000644000176200001440000012462514627656441015724 0ustar liggesusers# `methods' for fitted lavaan objects # # standard (S4) methods: # - show() # - summary() # - coef() # - fitted.values() + fitted() # - vcov() # - logLik() # - nobs() # - update() # - anova() # lavaan-specific methods: # # - parameterEstimates() # - standardizedSolution() # - parameterTable() # - varTable() setMethod( "show", "lavaan", function(object) { # efa? efa.flag <- object@Options$model.type == "efa" # show only basic information res <- lav_object_summary(object, fit.measures = FALSE, estimates = FALSE, modindices = FALSE, efa = efa.flag ) if (efa.flag) { # print (standardized) loadings only class(res) <- c("lavaan.efa", "list") print(res) } else { # print lavaan header print(res) } invisible(res) } ) setMethod( "summary", "lavaan", function(object, header = TRUE, fit.measures = FALSE, estimates = TRUE, ci = FALSE, fmi = FALSE, standardized = FALSE, std = standardized, std.nox = FALSE, # TODO: remove deprecated argument in early 2025 remove.step1 = TRUE, remove.unused = TRUE, cov.std = TRUE, rsquare = FALSE, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.h0.closefit = 0.05, rmsea.h0.notclosefit = 0.08, robust = TRUE, cat.check.pd = TRUE ), modindices = FALSE, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1) { # efa? efa.flag <- object@Options$model.type == "efa" res <- lav_object_summary( object = object, header = header, fit.measures = fit.measures, estimates = estimates, ci = ci, fmi = fmi, std = std, standardized = standardized, remove.step1 = remove.step1, remove.unused = remove.unused, cov.std = cov.std, rsquare = rsquare, efa = efa.flag, fm.args = fm.args, modindices = modindices ) # res has class c("lavaan.summary", "list") # what about nd? only used if we actually print; save as attribute attr(res, "nd") <- nd # if efa, add cutoff and dot.cutoff, and change class if (efa.flag) { # class(res) <- c("lavaan.summary.efa", "list") attr(res, "cutoff") <- cutoff attr(res, "dot.cutoff") <- dot.cutoff } res } ) setMethod( "coef", "lavaan", function(object, type = "free", labels = TRUE) { lav_object_inspect_coef( object = object, type = type, add.labels = labels, add.class = TRUE ) } ) standardizedSolution <- # nolint standardizedsolution <- function(object, # nolint type = "std.all", se = TRUE, zstat = TRUE, pvalue = TRUE, ci = TRUE, level = 0.95, cov.std = TRUE, remove.eq = TRUE, remove.ineq = TRUE, remove.def = FALSE, partable = NULL, GLIST = NULL, # nolint est = NULL, output = "data.frame") { stopifnot(type %in% c("std.all", "std.lv", "std.nox")) # check output= argument output <- tolower(output) if (output %in% c("data.frame", "table")) { output <- "data.frame" } else if (output %in% c("text", "pretty")) { output <- "text" } else { lav_msg_stop(gettextf( "output must be %s or %s", sQuote("data.frame"), sQuote("text")) ) } # no zstat + pvalue if estimator is Bayes if (object@Options$estimator == "Bayes") { zstat <- pvalue <- FALSE } # no se if class is not lavaan # using class() -- can't use inherits(), as this includes blavaan if (class(object)[1L] != "lavaan") { if (missing(se) || !se) { se <- FALSE zstat <- FALSE pvalue <- FALSE } } if (is.null(partable)) { tmp.partable <- inspect(object, "list") } else { tmp.partable <- partable } tmp.list <- tmp.partable[, c("lhs", "op", "rhs", "exo")] if (!is.null(tmp.partable$group)) { tmp.list$group <- tmp.partable$group } if (!is.null(tmp.partable$block)) { tmp.list$block <- tmp.partable$block } if (sum(nchar(tmp.partable$label)) != 0L) { tmp.list$label <- tmp.partable$label } # add std and std.all columns if (type == "std.lv") { tmp.list$est.std <- lav_standardize_lv(object, est = est, GLIST = GLIST, partable = partable, cov.std = cov.std ) } else if (type == "std.all") { tmp.list$est.std <- lav_standardize_all(object, est = est, GLIST = GLIST, partable = partable, cov.std = cov.std ) } else if (type == "std.nox") { tmp.list$est.std <- lav_standardize_all_nox(object, est = est, GLIST = GLIST, partable = partable, cov.std = cov.std ) } if (object@Options$se != "none" && se) { # add 'se' for standardized parameters tmp.vcov <- try(lav_object_inspect_vcov(object, standardized = TRUE, type = type, free.only = FALSE, add.labels = FALSE, add.class = FALSE )) if (inherits(tmp.vcov, "try-error") || is.null(tmp.vcov)) { tmp.list$se <- rep(NA, length(tmp.list$lhs)) if (zstat) { tmp.list$z <- rep(NA, length(tmp.list$lhs)) } if (pvalue) { tmp.list$pvalue <- rep(NA, length(tmp.list$lhs)) } } else { tmp <- diag(tmp.vcov) # catch negative values min.idx <- which(tmp < 0) if (length(min.idx) > 0L) { tmp[min.idx] <- as.numeric(NA) } # now, we can safely take the square root tmp <- sqrt(tmp) # catch near-zero SEs zero.idx <- which(tmp < .Machine$double.eps^(1 / 4)) # was 1/2 < 0.6 # was 1/3 < 0.6-9 if (length(zero.idx) > 0L) { tmp[zero.idx] <- 0.0 } tmp.list$se <- tmp # add 'z' column if (zstat) { tmp.se <- ifelse(tmp.list$se == 0.0, NA, tmp.list$se) tmp.list$z <- tmp.list$est.std / tmp.se } if (zstat && pvalue) { tmp.list$pvalue <- 2 * (1 - pnorm(abs(tmp.list$z))) } } } # simple symmetric confidence interval if (se && object@Options$se != "none" && ci) { # next three lines based on confint.lm a <- (1 - level) / 2 a <- c(a, 1 - a) fac <- qnorm(a) # if(object@Options$se != "bootstrap") { ci <- tmp.list$est.std + tmp.list$se %o% fac # } else { # ci <- rep(as.numeric(NA), length(tmp.list$est.std)) + # tmp.list$se %o% fac # } tmp.list$ci.lower <- ci[, 1] tmp.list$ci.upper <- ci[, 2] } # if single group, remove group column if (object@Data@ngroups == 1L) tmp.list$group <- NULL # remove == rows? if (remove.eq) { eq.idx <- which(tmp.list$op == "==") if (length(eq.idx) > 0L) { tmp.list <- tmp.list[-eq.idx, ] } } # remove <> rows? if (remove.ineq) { ineq.idx <- which(tmp.list$op %in% c("<", ">")) if (length(ineq.idx) > 0L) { tmp.list <- tmp.list[-ineq.idx, ] } } # remove := rows? if (remove.def) { def.idx <- which(tmp.list$op == ":=") if (length(def.idx) > 0L) { tmp.list <- tmp.list[-def.idx, ] } } # remove attribute for data order attr(tmp.list, "ovda") <- NULL if (output == "text") { class(tmp.list) <- c( "lavaan.parameterEstimates", "lavaan.data.frame", "data.frame" ) # tmp.list$exo is needed for printing, don't remove it attr(tmp.list, "group.label") <- object@Data@group.label attr(tmp.list, "level.label") <- object@Data@level.label # attr(tmp.list, "header") <- FALSE } else { tmp.list$exo <- NULL tmp.list$block <- NULL class(tmp.list) <- c("lavaan.data.frame", "data.frame") } tmp.list } parameterEstimates <- # nolint parameterestimates <- function(object, # select columns se = TRUE, zstat = TRUE, pvalue = TRUE, ci = TRUE, standardized = FALSE, fmi = FALSE, # control level = 0.95, boot.ci.type = "perc", cov.std = TRUE, fmi.options = list(), # add rows rsquare = FALSE, # remove rows remove.system.eq = TRUE, remove.eq = TRUE, remove.ineq = TRUE, remove.def = FALSE, remove.nonfree = FALSE, remove.step1 = TRUE, remove.unused = FALSE, # output add.attributes = FALSE, output = "data.frame", header = FALSE) { if (inherits(object, "lavaan.fsr")) { return(object$PE) } # deprecated add.attributes (for psycho/blavaan) if (add.attributes) { output <- "text" } # no se if class is not lavaan # can't use inherits(), as this would return TRUE if object is from blavaan if (class(object)[1L] != "lavaan") { if (missing(se) || !se) { se <- FALSE zstat <- FALSE pvalue <- FALSE } } # check output= argument output <- tolower(output) if (output %in% c("data.frame", "table")) { output <- "data.frame" header <- FALSE } else if (output %in% c("text", "pretty")) { output <- "text" } else { lav_msg_stop(gettextf( "output must be %s or %s", sQuote("data.frame"), sQuote("text")) ) } # check fmi if (fmi) { if (inherits(object, "lavaanList")) { lav_msg_warn(gettext( "fmi not available for object of class \"lavaanList\"")) fmi <- FALSE } if (object@Options$se != "standard") { lav_msg_warn(gettext( "fmi only available if se = \"standard\"")) fmi <- FALSE } if (object@Options$estimator != "ML") { lav_msg_warn(gettext( "fmi only available if estimator = \"ML\"")) fmi <- FALSE } if (!object@SampleStats@missing.flag) { lav_msg_warn(gettext( "fmi only available if missing = \"(fi)ml\"")) fmi <- FALSE } if (!object@optim$converged) { lav_msg_warn(gettext( "fmi not available; model did not converge")) fmi <- FALSE } } # no zstat + pvalue if estimator is Bayes if (object@Options$estimator == "Bayes") { zstat <- pvalue <- FALSE } tmp.partable <- as.data.frame(object@ParTable, stringsAsFactors = FALSE) tmp.list <- tmp.partable[, c("lhs", "op", "rhs", "free")] if (!is.null(tmp.partable$user)) { tmp.list$user <- tmp.partable$user } if (!is.null(tmp.partable$block)) { tmp.list$block <- tmp.partable$block } else { tmp.list$block <- rep(1L, length(tmp.list$lhs)) } if (!is.null(tmp.partable$level)) { tmp.list$level <- tmp.partable$level } else { tmp.list$level <- rep(1L, length(tmp.list$lhs)) } if (!is.null(tmp.partable$group)) { tmp.list$group <- tmp.partable$group } else { tmp.list$group <- rep(1L, length(tmp.list$lhs)) } if (!is.null(tmp.partable$step)) { tmp.list$step <- tmp.partable$step } if (!is.null(tmp.partable$efa)) { tmp.list$efa <- tmp.partable$efa } if (!is.null(tmp.partable$label)) { tmp.list$label <- tmp.partable$label } else { tmp.list$label <- rep("", length(tmp.list$lhs)) } if (!is.null(tmp.partable$exo)) { tmp.list$exo <- tmp.partable$exo } else { tmp.list$exo <- rep(0L, length(tmp.list$lhs)) } if (inherits(object, "lavaanList")) { # per default: nothing! # if("partable" %in% object@meta$store.slots) { # COF <- sapply(object@ParTableList, "[[", "est") # tmp.list$est <- rowMeans(COF) # } tmp.list$est <- NULL } else if (!is.null(tmp.partable$est)) { tmp.list$est <- tmp.partable$est } else { tmp.list$est <- lav_model_get_parameters(object@Model, type = "user", extra = TRUE ) } if (!is.null(tmp.partable$lower)) { tmp.list$lower <- tmp.partable$lower } if (!is.null(tmp.partable$upper)) { tmp.list$upper <- tmp.partable$upper } # add se, zstat, pvalue if (se && object@Options$se != "none") { tmp.list$se <- lav_object_inspect_se(object) # handle tiny SEs tmp.list$se <- ifelse(tmp.list$se < sqrt(.Machine$double.eps), 0, tmp.list$se ) tmp.se <- ifelse(tmp.list$se < sqrt(.Machine$double.eps), NA, tmp.list$se) if (zstat) { tmp.list$z <- tmp.list$est / tmp.se if (pvalue) { tmp.list$pvalue <- 2 * (1 - pnorm(abs(tmp.list$z))) # remove p-value if bounds have been used if (!is.null(tmp.partable$lower)) { b.idx <- which(abs(tmp.partable$lower - tmp.partable$est) < sqrt(.Machine$double.eps) & tmp.partable$free > 0L) if (length(b.idx) > 0L) { tmp.list$pvalue[b.idx] <- as.numeric(NA) } } if (!is.null(tmp.partable$upper)) { b.idx <- which(abs(tmp.partable$upper - tmp.partable$est) < sqrt(.Machine$double.eps) & tmp.partable$free > 0L) if (length(b.idx) > 0L) { tmp.list$pvalue[b.idx] <- as.numeric(NA) } } } } } # extract bootstrap data (if any) if (object@Options$se == "bootstrap" || "bootstrap" %in% object@Options$test || "bollen.stine" %in% object@Options$test) { tmp.boot <- lav_object_inspect_boot(object) bootstrap.seed <- attr(tmp.boot, "seed") # for bca error.idx <- attr(tmp.boot, "error.idx") if (length(error.idx) > 0L) { tmp.boot <- tmp.boot[-error.idx, , drop = FALSE] # drops attributes } } else { tmp.boot <- NULL } bootstrap.successful <- NROW(tmp.boot) # should be zero if NULL # confidence interval if (se && object@Options$se != "none" && ci) { # next three lines based on confint.lm a <- (1 - level) / 2 a <- c(a, 1 - a) if (object@Options$se != "bootstrap") { fac <- qnorm(a) ci <- tmp.list$est + tmp.list$se %o% fac } else if (object@Options$se == "bootstrap") { # local copy of 'norm.inter' from boot package (not exported!) norm.inter <- function(t, alpha) { t <- t[is.finite(t)] tmp.r <- length(t) rk <- (tmp.r + 1) * alpha if (!all(rk > 1 & rk < tmp.r)) { lav_msg_warn(gettext("extreme order statistics used as endpoints")) } k <- trunc(rk) inds <- seq_along(k) out <- inds kvs <- k[k > 0 & k < tmp.r] tstar <- sort(t, partial = sort(union(c(1, tmp.r), c(kvs, kvs + 1)))) ints <- (k == rk) if (any(ints)) out[inds[ints]] <- tstar[k[inds[ints]]] out[k == 0] <- tstar[1L] out[k == tmp.r] <- tstar[tmp.r] not <- function(v) xor(rep(TRUE, length(v)), v) temp <- inds[not(ints) & k != 0 & k != tmp.r] temp1 <- qnorm(alpha[temp]) temp2 <- qnorm(k[temp] / (tmp.r + 1)) temp3 <- qnorm((k[temp] + 1) / (tmp.r + 1)) tk <- tstar[k[temp]] tk1 <- tstar[k[temp] + 1L] out[temp] <- tk + (temp1 - temp2) / (temp3 - temp2) * (tk1 - tk) cbind(round(rk, 2), out) } stopifnot(!is.null(tmp.boot)) stopifnot(boot.ci.type %in% c( "norm", "basic", "perc", "bca.simple", "bca" )) if (boot.ci.type == "norm") { fac <- qnorm(a) boot.x <- colMeans(tmp.boot, na.rm = TRUE) boot.est <- lav_model_get_parameters(object@Model, GLIST = lav_model_x2GLIST(object@Model, boot.x), type = "user", extra = TRUE ) bias.est <- (boot.est - tmp.list$est) ci <- (tmp.list$est - bias.est) + tmp.list$se %o% fac } else if (boot.ci.type == "basic") { ci <- cbind(tmp.list$est, tmp.list$est) alpha <- (1 + c(level, -level)) / 2 # free.idx only qq <- apply(tmp.boot, 2, norm.inter, alpha) free.idx <- which(object@ParTable$free & !duplicated(object@ParTable$free)) ci[free.idx, ] <- 2 * ci[free.idx, ] - t(qq[c(3, 4), ]) # def.idx def.idx <- which(object@ParTable$op == ":=") if (length(def.idx) > 0L) { boot.def <- apply(tmp.boot, 1, object@Model@def.function) if (length(def.idx) == 1L) { boot.def <- as.matrix(boot.def) } else { boot.def <- t(boot.def) } qq <- apply(boot.def, 2, norm.inter, alpha) ci[def.idx, ] <- 2 * ci[def.idx, ] - t(qq[c(3, 4), ]) } # TODO: add cin/ceq? } else if (boot.ci.type == "perc") { ci <- cbind(tmp.list$est, tmp.list$est) alpha <- (1 + c(-level, level)) / 2 # free.idx only qq <- apply(tmp.boot, 2, norm.inter, alpha) free.idx <- which(object@ParTable$free & !duplicated(object@ParTable$free)) ci[free.idx, ] <- t(qq[c(3, 4), ]) # def.idx def.idx <- which(object@ParTable$op == ":=") if (length(def.idx) > 0L) { boot.def <- apply(tmp.boot, 1, object@Model@def.function) if (length(def.idx) == 1L) { boot.def <- as.matrix(boot.def) } else { boot.def <- t(boot.def) } qq <- apply(boot.def, 2, norm.inter, alpha) def.idx <- which(object@ParTable$op == ":=") ci[def.idx, ] <- t(qq[c(3, 4), ]) } # TODO: add cin/ceq? } else if (boot.ci.type == "bca.simple") { # no adjustment for scale!! only bias!! alpha <- (1 + c(-level, level)) / 2 zalpha <- qnorm(alpha) ci <- cbind(tmp.list$est, tmp.list$est) # free.idx only free.idx <- which(object@ParTable$free & !duplicated(object@ParTable$free)) x <- tmp.list$est[free.idx] for (i in seq_along(free.idx)) { t <- tmp.boot[, i] t <- t[is.finite(t)] t0 <- x[i] # check if we have variance (perhaps constrained to 0?) # new in 0.6-3 if (var(t) == 0) { next } w <- qnorm(sum(t < t0) / length(t)) a <- 0.0 #### !!! #### adj.alpha <- pnorm(w + (w + zalpha) / (1 - a * (w + zalpha))) qq <- norm.inter(t, adj.alpha) ci[free.idx[i], ] <- qq[, 2] } # def.idx def.idx <- which(object@ParTable$op == ":=") if (length(def.idx) > 0L) { x.def <- object@Model@def.function(x) boot.def <- apply(tmp.boot, 1, object@Model@def.function) if (length(def.idx) == 1L) { boot.def <- as.matrix(boot.def) } else { boot.def <- t(boot.def) } for (i in seq_along(def.idx)) { t <- boot.def[, i] t <- t[is.finite(t)] t0 <- x.def[i] w <- qnorm(sum(t < t0) / length(t)) a <- 0.0 #### !!! #### adj.alpha <- pnorm(w + (w + zalpha) / (1 - a * (w + zalpha))) qq <- norm.inter(t, adj.alpha) ci[def.idx[i], ] <- qq[, 2] } } # TODO: # - add cin/ceq } else if (boot.ci.type == "bca") { # new in 0.6-12 # we assume that the 'ordinary' (nonparametric) was used lavoptions <- object@Options ngroups <- object@Data@ngroups nobs <- object@SampleStats@nobs ntotal <- object@SampleStats@ntotal # we need enough bootstrap runs if (nrow(tmp.boot) < ntotal) { lav_msg_stop(gettextf( "BCa confidence intervals require more (successful) bootstrap runs (%1$s) than the number of observations (%2$s).", nrow(tmp.boot), ntotal)) } # does not work with sampling weights (yet) if (!is.null(object@Data@weights[[1]])) { lav_msg_stop( gettext("BCa confidence intervals not available in the presence of sampling weights.")) } # check if we have a seed if (is.null(bootstrap.seed)) { lav_msg_stop(gettext("seed not available in tmp.boot object.")) } # compute 'X' matrix with frequency indices (to compute # the empirical influence values using regression) tmp.freq <- lav_utils_bootstrap_indices( R = lavoptions$bootstrap, nobs = nobs, parallel = lavoptions$parallel[1], ncpus = lavoptions$ncpus, cl = lavoptions[["cl"]], iseed = bootstrap.seed, return.freq = TRUE, merge.groups = TRUE ) if (length(error.idx) > 0L) { tmp.freq <- tmp.freq[-error.idx, , drop = FALSE] } stopifnot(nrow(tmp.freq) == nrow(tmp.boot)) # compute empirical influence values (using regression) # remove first column per group first.idx <- sapply(object@Data@case.idx, "[[", 1L) tmp.lm <- lm.fit(x = cbind(1, tmp.freq[, -first.idx]), y = tmp.boot) tmp.beta <- unname(tmp.lm$coefficients)[-1, , drop = FALSE] tmp.ll <- rbind(0, tmp.beta) # compute 'a' for all parameters at once tmp.aa <- apply(tmp.ll, 2L, function(x) { tmp.l <- x - mean(x) sum(tmp.l^3) / (6 * sum(tmp.l^2)^1.5) }) # adjustment for both bias AND scale alpha <- (1 + c(-level, level)) / 2 zalpha <- qnorm(alpha) ci <- cbind(tmp.list$est, tmp.list$est) # free.idx only free.idx <- which(object@ParTable$free & !duplicated(object@ParTable$free)) stopifnot(length(free.idx) == ncol(tmp.boot)) x <- tmp.list$est[free.idx] for (i in seq_along(free.idx)) { t <- tmp.boot[, i] t <- t[is.finite(t)] t0 <- x[i] # check if we have variance (perhaps constrained to 0?) # new in 0.6-3 if (var(t) == 0) { next } w <- qnorm(sum(t < t0) / length(t)) a <- tmp.aa[i] adj.alpha <- pnorm(w + (w + zalpha) / (1 - a * (w + zalpha))) qq <- norm.inter(t, adj.alpha) ci[free.idx[i], ] <- qq[, 2] } # def.idx def.idx <- which(object@ParTable$op == ":=") if (length(def.idx) > 0L) { x.def <- object@Model@def.function(x) boot.def <- apply(tmp.boot, 1, object@Model@def.function) if (length(def.idx) == 1L) { boot.def <- as.matrix(boot.def) } else { boot.def <- t(boot.def) } # recompute empirical influence values tmp.lm <- lm.fit(x = cbind(1, tmp.freq[, -1]), y = boot.def) tmp.beta <- unname(tmp.lm$coefficients)[-1, , drop = FALSE] tmp.ll <- rbind(0, tmp.beta) # compute 'a' values for all def.idx parameters tmp.aa <- apply(tmp.ll, 2L, function(x) { tmp.l <- x - mean(x) sum(tmp.l^3) / (6 * sum(tmp.l^2)^1.5) }) # compute bca ci for (i in seq_along(def.idx)) { t <- boot.def[, i] t <- t[is.finite(t)] t0 <- x.def[i] w <- qnorm(sum(t < t0) / length(t)) a <- tmp.aa[i] adj.alpha <- pnorm(w + (w + zalpha) / (1 - a * (w + zalpha))) qq <- norm.inter(t, adj.alpha) ci[def.idx[i], ] <- qq[, 2] } } # TODO: # - add cin/ceq } } tmp.list$ci.lower <- ci[, 1] tmp.list$ci.upper <- ci[, 2] } # standardized estimates? # 28 March 2024: TDJ adds option to select specific types if (is.logical(standardized)) { if (standardized) { standardized <- c("std.lv", "std.all") if (length(lavNames(object, "ov.x")) && object@Options$fixed.x) { standardized <- c(standardized, "std.nox") } } else { standardized <- character(0) } # corresponds to standardized=FALSE } else { # !is.logical(standardized) standardized <- tolower(as.character(standardized)) if ("std.nox" %in% standardized) { # sanity checks if (length(lavNames(object, "ov.x")) == 0) { lav_msg_note(gettext( "`std.nox' unavailable without fixed exogenous predictors")) standardized <- setdiff(standardized, "std.nox") } else if (!object@Options$fixed.x) { lav_msg_note(gettext("`std.nox' unavailable when fixed.x = FALSE")) standardized <- setdiff(standardized, "std.nox") } } } # Then add each requested type # (original source code, but now independently conditional) if ("std.lv" %in% standardized) { tmp.list$std.lv <- lav_standardize_lv(object, cov.std = cov.std) } if ("std.all" %in% standardized) { tmp.list$std.all <- lav_standardize_all(object, est.std = tmp.list$est.std, cov.std = cov.std ) } if ("std.nox" %in% standardized) { tmp.list$std.nox <- lav_standardize_all_nox(object, est.std = tmp.list$est.std, cov.std = cov.std ) } # rsquare? if (rsquare) { r2 <- lavTech(object, "rsquare", add.labels = TRUE) tmp.names <- unlist(lapply(r2, names)) nel <- length(tmp.names) if (nel == 0L) { lav_msg_warn( gettext("rsquare = TRUE, but there are no dependent variables")) } else { if (lav_partable_nlevels(tmp.list) == 1L) { block <- rep(seq_along(r2), sapply(r2, length)) first.block.idx <- which(!duplicated(tmp.list$block) & tmp.list$block > 0L) gval <- tmp.list$group[first.block.idx] if (length(gval) > 0L) { group <- rep(gval, sapply(r2, length)) } else { # single block, single group group <- rep(1L, length(block)) } r2 <- data.frame( lhs = tmp.names, op = rep("r2", nel), rhs = tmp.names, block = block, group = group, est = unlist(r2), stringsAsFactors = FALSE ) } else { # add level column block <- rep(seq_along(r2), sapply(r2, length)) first.block.idx <- which(!duplicated(tmp.list$block) & tmp.list$block > 0L) # always at least two blocks gval <- tmp.list$group[first.block.idx] group <- rep(gval, sapply(r2, length)) lval <- tmp.list$level[first.block.idx] level <- rep(lval, sapply(r2, length)) r2 <- data.frame( lhs = tmp.names, op = rep("r2", nel), rhs = tmp.names, block = block, group = group, level = level, est = unlist(r2), stringsAsFactors = FALSE ) } # add step column if needed if (!is.null(tmp.list$step)) { r2$step <- 2L # per default # simplification: we assume that only the # observed indicators of latent variables are step 1 ov.ind <- unlist(object@pta$vnames$ov.ind) step1.idx <- which(r2$lhs %in% ov.ind) r2$step[step1.idx] <- 1L } tmp.list <- lav_partable_merge(pt1 = tmp.list, pt2 = r2, warn = FALSE) } } # fractional missing information (if estimator="fiml") if (fmi) { se.orig <- tmp.list$se # new in 0.6-6, use 'EM' based (unstructured) sample statistics # otherwise, it would be as if we use expected info, while the # original use observed, producing crazy results if (object@Data@ngroups > 1L) { em.cov <- lapply(lavInspect(object, "sampstat.h1"), "[[", "cov") em.mean <- lapply(lavInspect(object, "sampstat.h1"), "[[", "mean") } else { em.cov <- lavInspect(object, "sampstat.h1")$cov em.mean <- lavInspect(object, "sampstat.h1")$mean } tmp.pt <- parTable(object) tmp.pt$ustart <- tmp.pt$est tmp.pt$start <- tmp.pt$est <- NULL this.options <- object@Options if (!is.null(fmi.options) && is.list(fmi.options)) { # modify original options this.options <- modifyList(this.options, fmi.options) } # override this.options$optim.method <- "none" this.options$sample.cov.rescale <- FALSE this.options$check.gradient <- FALSE this.options$baseline <- FALSE this.options$h1 <- FALSE this.options$test <- FALSE fit.complete <- lavaan( model = tmp.pt, sample.cov = em.cov, sample.mean = em.mean, sample.nobs = lavInspect(object, "nobs"), slotOptions = this.options ) se.comp <- parameterEstimates(fit.complete, ci = FALSE, fmi = FALSE, zstat = FALSE, pvalue = FALSE, remove.system.eq = FALSE, remove.eq = FALSE, remove.ineq = FALSE, remove.def = FALSE, remove.nonfree = FALSE, remove.unused = FALSE, rsquare = rsquare, add.attributes = FALSE )$se se.comp <- ifelse(se.comp == 0.0, as.numeric(NA), se.comp) tmp.list$fmi <- 1 - (se.comp * se.comp) / (se.orig * se.orig) } # if single level, remove level column if (object@Data@nlevels == 1L) tmp.list$level <- NULL # if single group, remove group column if (object@Data@ngroups == 1L) tmp.list$group <- NULL # if single everything, remove block column if (object@Data@nlevels == 1L && object@Data@ngroups == 1L) { tmp.list$block <- NULL } # if no user-defined labels, remove label column if (sum(nchar(object@ParTable$label)) == 0L) { tmp.list$label <- NULL } # remove non-free parameters? (but keep ==, >, < and :=) if (remove.nonfree) { nonfree.idx <- which(tmp.list$free == 0L & !tmp.list$op %in% c("==", ">", "<", ":=")) if (length(nonfree.idx) > 0L) { tmp.list <- tmp.list[-nonfree.idx, ] } } # remove 'unused' parameters # these are parameters that are automatically added (user == 0), # but with their final (est) values fixed to their default values # (typically 1 or 0). # currently only intercepts and scaling-factors (for now) # should we also remove fixed-to-1 variances? (parameterization = theta)? if (remove.unused) { # intercepts int.idx <- which(tmp.list$op == "~1" & tmp.list$user == 0L & tmp.list$free == 0L & tmp.list$est == 0) if (length(int.idx) > 0L) { tmp.list <- tmp.list[-int.idx, ] } # scaling factors scaling.idx <- which(tmp.list$op == "~*~" & tmp.list$user == 0L & tmp.list$free == 0L & tmp.list$est == 1) if (length(scaling.idx) > 0L) { tmp.list <- tmp.list[-scaling.idx, ] } } # remove 'free' column tmp.list$free <- NULL # remove == rows? if (remove.eq) { eq.idx <- which(tmp.list$op == "==" & tmp.list$user == 1L) if (length(eq.idx) > 0L) { tmp.list <- tmp.list[-eq.idx, ] } } if (remove.system.eq) { eq.idx <- which(tmp.list$op == "==" & tmp.list$user != 1L) if (length(eq.idx) > 0L) { tmp.list <- tmp.list[-eq.idx, ] } } # remove <> rows? if (remove.ineq) { ineq.idx <- which(tmp.list$op %in% c("<", ">")) if (length(ineq.idx) > 0L) { tmp.list <- tmp.list[-ineq.idx, ] } } # remove := rows? if (remove.def) { def.idx <- which(tmp.list$op == ":=") if (length(def.idx) > 0L) { tmp.list <- tmp.list[-def.idx, ] } } # remove step 1 rows? if (remove.step1 && !is.null(tmp.list$step)) { step1.idx <- which(tmp.list$step == 1L) if (length(step1.idx) > 0L) { tmp.list <- tmp.list[-step1.idx, ] } # remove step column tmp.list$step <- NULL } # remove attribute for data order attr(tmp.list, "ovda") <- NULL # remove tmp.list$user tmp.list$user <- NULL if (output == "text") { class(tmp.list) <- c( "lavaan.parameterEstimates", "lavaan.data.frame", "data.frame" ) if (header) { attr(tmp.list, "categorical") <- object@Model@categorical attr(tmp.list, "parameterization") <- object@Model@parameterization attr(tmp.list, "information") <- object@Options$information[1] attr(tmp.list, "information.meat") <- object@Options$information.meat attr(tmp.list, "se") <- object@Options$se attr(tmp.list, "group.label") <- object@Data@group.label attr(tmp.list, "level.label") <- object@Data@level.label attr(tmp.list, "bootstrap") <- object@Options$bootstrap attr(tmp.list, "bootstrap.successful") <- bootstrap.successful attr(tmp.list, "missing") <- object@Options$missing attr(tmp.list, "observed.information") <- object@Options$observed.information[1] attr(tmp.list, "h1.information") <- object@Options$h1.information[1] attr(tmp.list, "h1.information.meat") <- object@Options$h1.information.meat attr(tmp.list, "header") <- header # FIXME: add more!! } } else { tmp.list$exo <- NULL tmp.list$lower <- tmp.list$upper <- NULL class(tmp.list) <- c("lavaan.data.frame", "data.frame") } tmp.list } parameterTable <- parametertable <- parTable <- partable <- # nolint function(object) { # convert to data.frame out <- as.data.frame(object@ParTable, stringsAsFactors = FALSE) class(out) <- c("lavaan.data.frame", "data.frame") out } varTable <- vartable <- function(object, ov.names = names(object), # nolint ov.names.x = NULL, ordered = NULL, factor = NULL, as.data.frame. = TRUE) { # nolint if (inherits(object, "lavaan")) { tmp.var <- object@Data@ov } else if (inherits(object, "lavData")) { tmp.var <- object@ov } else if (inherits(object, "data.frame")) { tmp.var <- lav_dataframe_vartable( frame = object, ov.names = ov.names, ov.names.x = ov.names.x, ordered = ordered, factor = factor, as.data.frame. = FALSE ) } else { lav_msg_stop(gettext("object must of class lavaan or a data.frame")) } if (as.data.frame.) { tmp.var <- as.data.frame(tmp.var, stringsAsFactors = FALSE, row.names = seq_along(tmp.var$name) ) class(tmp.var) <- c("lavaan.data.frame", "data.frame") } tmp.var } setMethod( "fitted.values", "lavaan", function(object, type = "moments", labels = TRUE) { # lowercase type type <- tolower(type) # catch type="casewise" if (type %in% c("casewise", "case", "obs", "observations", "ov")) { return(lavPredict(object, type = "ov", label = labels)) } lav_object_inspect_implied(object, add.labels = labels, add.class = TRUE, drop.list.single.group = TRUE ) } ) setMethod( "fitted", "lavaan", function(object, type = "moments", labels = TRUE) { fitted.values(object, type = type, labels = labels) } ) setMethod( "vcov", "lavaan", function(object, type = "free", labels = TRUE, remove.duplicated = FALSE) { # check for convergence first! if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_stop(gettext("model did not converge")) } if (object@Options$se == "none") { lav_msg_stop(gettext("vcov not available if se=\"none\"")) } if (type == "user" || type == "joint" || type == "all" || type == "full" || type == "complete") { if (remove.duplicated) { lav_msg_stop(gettext( "argument \"remove.duplicated\" not supported if type = \"user\"" )) } tmp.varcov <- lav_object_inspect_vcov_def(object, joint = TRUE, add.labels = labels, add.class = TRUE ) } else if (type == "free") { tmp.varcov <- lav_object_inspect_vcov(object, add.labels = labels, add.class = TRUE, remove.duplicated = remove.duplicated ) } else { lav_msg_stop(gettext("type argument should be \"user\" or \"free\"")) } tmp.varcov } ) # logLik (so that we can use the default AIC/BIC functions from stats4( setMethod( "logLik", "lavaan", function(object, ...) { if (object@Options$estimator != "ML") { lav_msg_warn(gettext("logLik only available if estimator is ML")) } if (object@optim$npar > 0L && !object@optim$converged) { lav_msg_warn(gettext("model did not converge")) } # new in 0.6-1: we use the @loglik slot (instead of fitMeasures) if (.hasSlot(object, "loglik")) { tmp.logl <- object@loglik } else { tmp.logl <- lav_model_loglik( lavdata = object@Data, lavsamplestats = object@SampleStats, lavimplied = object@implied, lavmodel = object@Model, lavoptions = object@Options ) } logl <- tmp.logl$loglik attr(logl, "df") <- tmp.logl$npar ### note: must be npar, not df!! attr(logl, "nobs") <- tmp.logl$ntotal class(logl) <- "logLik" logl } ) # nobs if (!exists("nobs", envir = asNamespace("stats4"))) { setGeneric("nobs", function(object, ...) standardGeneric("nobs")) } setMethod( "nobs", signature(object = "lavaan"), function(object, ...) { object@SampleStats@ntotal } ) # see: src/library/stats/R/update.R setMethod( "update", signature(object = "lavaan"), function(object, model, add, ..., evaluate = TRUE) { call <- object@call if (is.null(call)) { lav_msg_stop(gettext("need an object with call slot")) } extras <- match.call(expand.dots = FALSE)$... if (!missing(model)) { # call$formula <- update.formula(formula(object), formula.) call$model <- model } else if (exists(as.character(call$model))) { call$model <- eval(call$model, parent.frame()) } else if (is.character(call$model)) { ## do nothing ## call$model <- call$model } else { call$model <- parTable(object) call$model$est <- NULL call$model$se <- NULL } if (!is.null(call$slotParTable) && is.list(call$model)) { call$slotParTable <- call$model } if (length(extras) > 0) { ## check for call$slotOptions conflicts if (!is.null(call$slotOptions)) { same.names <- intersect(names(lavOptions()), names(extras)) for (i in same.names) { call$slotOptions[[i]] <- extras[[i]] extras[i] <- NULL # not needed if they are in slotOptions } } existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (missing(add) && !evaluate) { return(call) } ## for any of the other 3 scenarios, we need the updated fit ## Check if "add" and "model" are both strings; combine them if (missing(add)) { add.allready.in.partable <- TRUE # because nothing to add } else { if (is.character(add) && is.character(call$model)) { call$model <- c(call$model, add) add.allready.in.partable <- TRUE } else { add.allready.in.partable <- FALSE } } newfit <- eval(call, parent.frame()) if (add.allready.in.partable && evaluate) { return(newfit) } ## only remaining situations: "add" exists, but either "add" or "model" ## is a parameter table, so update the parameter table in the call if (!(mode(add) %in% c("list", "character"))) { lav_msg_stop( gettext("'add' argument must be model syntax or parameter table. See ?lavaanify help page.") ) } tmp.pt <- lav_object_extended(newfit, add = add)@ParTable tmp.pt$user <- NULL # get rid of "10" category used in lavTestScore() ## group == 0L in new rows tmp.pt$group[tmp.pt$group == 0L] <- tmp.pt$block[tmp.pt$group == 0L] # tmp.pt$plabel == "" in new rows. Consequences? tmp.pt$est <- NULL tmp.pt$se <- NULL call$model <- tmp.pt if (evaluate) { eval(call, parent.frame()) } else { call } } ) setMethod( "anova", signature(object = "lavaan"), function(object, ...) { # NOTE: if we add additional arguments, it is not the same generic # anova() function anymore, and match.call will be screwed up # NOTE: we need to extract the names of the models from match.call here, # otherwise, we loose them in the call stack mcall <- match.call(expand.dots = TRUE) dots <- list(...) # catch SB.classic and SB.H0 # SB.classic <- TRUE; SB.H0 <- FALSE # arg.names <- names(dots) # arg.idx <- which(nchar(arg.names) > 0L) # if(length(arg.idx) > 0L) { # if(!is.null(dots$SB.classic)) # SB.classic <- dots$SB.classic # if(!is.null(dots$SB.H0)) # SB.H0 <- dots$SB.H0 # dots <- dots[-arg.idx] # } modp <- if (length(dots)) { sapply(dots, inherits, "lavaan") } else { logical(0) } tmp.names <- sapply(as.list(mcall)[c(FALSE, TRUE, modp)], deparse) # use do.call to handle changed dots # ans <- do.call("lavTestLRT", c(list(object = object, # SB.classic = SB.classic, SB.H0 = SB.H0, # model.names = tmp.names), dots)) # ans lavTestLRT(object = object, ..., model.names = tmp.names) } ) lavaan/R/lav_syntax_independence.R0000644000176200001440000000453314627656441016755 0ustar liggesusers# generate syntax for an independence model lav_syntax_independence <- function(ov.names = character(0), ov.names.x = character(0), sample.cov = NULL) { ov.names.nox <- ov.names[!ov.names %in% ov.names.x] nvar <- length(ov.names.nox) lv.names <- paste("f", 1:nvar, sep = "") # check sample.cov if (!is.null(sample.cov)) { if (is.list(sample.cov)) { ngroups <- length(sample.cov) } else { ngroups <- 1L sample.cov <- list(sample.cov) } stopifnot(is.matrix(sample.cov[[1]])) # stopifnot(length(ov.names) == nrow(sample.cov[[1]])) # FIXME: check rownames and reorder... } # construct lavaan syntax for an independence model txt <- "# independence model\n" # =~ lines (each observed variables has its own latent variable) # excepct for ov's that are in ov.names.x txt <- paste(txt, paste(lv.names, " =~ 1*", ov.names.nox, "\n", sep = "", collapse = "" ), sep = "") # residual ov variances fixed to zero txt <- paste(txt, paste(ov.names.nox, " ~~ 0*", ov.names.nox, "\n", sep = "", collapse = "" ), sep = "") # latent variances if (is.null(sample.cov)) { txt <- paste(txt, paste(lv.names, " ~~ ", lv.names, "\n", sep = "", collapse = "" ), sep = "") } else { # fill in sample values ov.idx <- match(ov.names.nox, ov.names) start.txt <- paste("start(c(", apply(matrix( unlist(lapply(sample.cov, function(x) { diag(x)[ov.idx] })), ncol = ngroups ), 1, paste, collapse = ","), "))", sep = "" ) txt <- paste(txt, paste(lv.names, " ~~ ", start.txt, " * ", lv.names, "\n", sep = "", collapse = "" ), sep = "") } # latent *covariances* fixed to zero (= independence!) if (length(lv.names) > 1L) { tmp <- utils::combn(lv.names, 2) txt <- paste(txt, paste(tmp[1, ], " ~~ 0*", tmp[2, ], "\n", sep = "", collapse = "" ), sep = "") } # if 'independent x' variables, add an 'empty' regression if ((nx <- length(ov.names.x)) > 0) { # dummy regression line txt <- paste(txt, paste("f1 ~ 0*", ov.names.x, "\n", sep = "", collapse = "" ), sep = "") } # Note: no need to pass starting values here, lavaanStart will # use the sample statistics anyway.... txt } lavaan/R/lav_uvreg.R0000644000176200001440000001767014627656441014064 0ustar liggesusers# the univariate (weighted) linear model # - scores/gradient/hessian # - including the residual variance! # YR - 30 Dec 2019 (replacing the old lav_ols.R routines) lav_uvreg_fit <- function(y = NULL, X = NULL, wt = NULL, optim.method = "nlminb", control = list(), output = "list") { # check weights if (is.null(wt)) { wt <- rep(1, length(y)) } else { if (length(y) != length(wt)) { lav_msg_stop(gettext("length y is not the same as length wt")) } if (any(wt < 0)) { lav_msg_stop(gettext("all weights should be positive")) } } # optim.method minObjective <- lav_uvreg_min_objective minGradient <- lav_uvreg_min_gradient minHessian <- lav_uvreg_min_hessian if (optim.method == "nlminb" || optim.method == "nlminb2") { # nothing to do } else if (optim.method == "nlminb0") { minGradient <- minHessian <- NULL } else if (optim.method == "nlminb1") { minHessian <- NULL } # create cache environment cache <- lav_uvreg_init_cache(y = y, X = X, wt = wt) # optimize -- only changes from defaults control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = 0L, abs.tol = (.Machine$double.eps * 10) ) control.nlminb <- modifyList(control.nlminb, control) optim <- nlminb( start = cache$theta, objective = minObjective, gradient = minGradient, hessian = minHessian, control = control.nlminb, cache = cache ) if (output == "cache") { return(cache) } # return results as a list (to be compatible with lav_polychor.R) out <- list( theta = optim$par, nexo = cache$nexo, int.idx = cache$int.idx, slope.idx = cache$slope.idx, beta.idx = cache$beta.idx, var.idx = cache$var.idx, y = cache$y, wt = cache$wt, X = cache$X1[, -1L, drop = FALSE], yhat = cache$yhat ) } # prepare cache environment lav_uvreg_init_cache <- function(y = NULL, X = NULL, wt = rep(1, length(y)), parent = parent.frame()) { # y y <- as.vector(y) # X if (is.null(X)) { nexo <- 0L X1 <- matrix(1, length(y), 1) } else { X <- unname(X) nexo <- ncol(X) X1 <- cbind(1, X, deparse.level = 0) # new in 0.6-17: check if X is full rank if (!anyNA(X)) { if (qr(X)$rank < ncol(X)) { lav_msg_stop(gettext("matrix of exogenous covariates is rank deficient! (i.e., some x variables contain redundant information)")) } } } # nobs if (is.null(wt)) { N <- length(y) } else { N <- sum(wt) } # indices of free parameters int.idx <- 1L slope.idx <- seq_len(nexo) + 1L beta.idx <- c(int.idx, slope.idx) var.idx <- 1L + nexo + 1L # starting values + crossprod if (any(is.na(y)) || any(is.na(X1))) { missing.idx <- which(apply(cbind(y, X1), 1, function(x) any(is.na(x)))) y.tmp <- y[-missing.idx] X1.tmp <- X1[-missing.idx, , drop = FALSE] wt.tmp <- wt[-missing.idx] fit.lm <- stats::lm.wfit(y = y.tmp, x = X1.tmp, w = wt.tmp) theta.evar <- sum(fit.lm$residuals * wt.tmp * fit.lm$residuals) / sum(wt.tmp) lav_crossprod <- lav_matrix_crossprod } else { fit.lm <- stats::lm.wfit(y = y, x = X1, w = wt) theta.evar <- sum(fit.lm$residuals * wt * fit.lm$residuals) / sum(wt) lav_crossprod <- base::crossprod } theta.beta <- unname(fit.lm$coefficients) theta <- c(theta.beta, theta.evar) out <- list2env( list( y = y, X1 = X1, wt = wt, N = N, int.idx = int.idx, beta.idx = beta.idx, var.idx = var.idx, slope.idx = slope.idx, nexo = nexo, lav_crossprod = lav_crossprod, theta = theta ), parent = parent ) out } # compute total (log)likelihood lav_uvreg_loglik <- function(y = NULL, X = NULL, wt = rep(1, length(y)), cache = NULL) { if (is.null(cache)) { cache <- lav_uvreg_fit(y = y, X = X, wt = wt, output = "cache") } lav_uvreg_loglik_cache(cache = cache) } lav_uvreg_loglik_cache <- function(cache = NULL) { with(cache, { # free parameters beta <- theta[beta.idx] evar <- theta[var.idx] yhat <- drop(X1 %*% beta) logliki <- dnorm(y, mean = yhat, sd = sqrt(evar), log = TRUE) # total weighted log-likelihood loglik <- sum(wt * logliki, na.rm = TRUE) return(loglik) }) } # casewise scores lav_uvreg_scores <- function(y = NULL, X = NULL, wt = rep(1, length(y)), cache = NULL) { if (is.null(cache)) { cache <- lav_uvreg_fit(y = y, X = X, wt = wt, output = "cache") } lav_uvreg_scores_cache(cache = cache) } lav_uvreg_scores_cache <- function(cache = NULL) { with(cache, { res <- y - yhat resw <- res * wt evar2 <- evar * evar scores.beta <- 1 / evar * X1 * resw scores.evar <- -wt / (2 * evar) + 1 / (2 * evar2) * res * resw return(cbind(scores.beta, scores.evar, deparse.level = 0)) }) } # gradient lav_uvreg_gradient <- function(y = NULL, X = NULL, wt = rep(1, length(y)), cache = NULL) { if (is.null(cache)) { cache <- lav_uvreg_fit(y = y, X = X, wt = wt, output = "cache") } lav_uvreg_gradient_cache(cache = cache) } lav_uvreg_gradient_cache <- function(cache = NULL) { with(cache, { res <- y - yhat resw <- res * wt evar2 <- evar * evar dx.beta <- colSums(1 / evar * X1 * resw, na.rm = TRUE) dx.var <- sum(-wt / (2 * evar) + 1 / (2 * evar2) * res * resw, na.rm = TRUE) return(c(dx.beta, dx.var)) }) } # compute total Hessian lav_uvreg_hessian <- function(y = NULL, X = NULL, wt = rep(1, length(y)), cache = NULL) { if (is.null(cache)) { cache <- lav_uvreg_fit(y = y, X = X, wt = wt, output = "cache") } lav_uvreg_hessian_cache(cache = cache) } lav_uvreg_hessian_cache <- function(cache = NULL) { with(cache, { dx2.beta <- -1 / evar * lav_crossprod(X1 * wt, X1) dx.beta.var <- -1 / (evar2) * lav_crossprod(X1, resw) sq.evar <- sqrt(evar) sq.evar6 <- sq.evar * sq.evar * sq.evar * sq.evar * sq.evar * sq.evar dx2.var <- (sum(wt, na.rm = TRUE) / (2 * evar2) - 1 / sq.evar6 * sum(resw * res, na.rm = TRUE)) Hessian <- rbind(cbind(dx2.beta, dx.beta.var, deparse.level = 0), cbind(t(dx.beta.var), dx2.var, deparse.level = 0), deparse.level = 0 ) return(Hessian) }) } # compute total (log)likelihood, for specific 'x' (nlminb) lav_uvreg_min_objective <- function(x, cache = NULL) { cache$theta <- x -1 * lav_uvreg_loglik_cache(cache = cache) / cache$N } # compute gradient, for specific 'x' (nlminb) lav_uvreg_min_gradient <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_uvreg_loglik_cache(cache = cache) } -1 * lav_uvreg_gradient_cache(cache = cache) / cache$N } # compute hessian, for specific 'x' (nlminb) lav_uvreg_min_hessian <- function(x, cache = NULL) { # check if x has changed if (!all(x == cache$theta)) { cache$theta <- x tmp <- lav_uvreg_loglik_cache(cache = cache) tmp <- lav_uvreg_gradient_cache(cache = cache) } -1 * lav_uvreg_hessian_cache(cache = cache) / cache$N } # update fit object with new parameters lav_uvreg_update_fit <- function(fit.y = NULL, evar.new = NULL, beta.new = NULL) { if (is.null(evar.new) && is.null(beta.new)) { return(fit.y) } if (!is.null(evar.new)) { fit.y$theta[fit.y$var.idx] <- evar.new } if (!is.null(beta.new)) { fit.y$theta[fit.y$beta.idx] <- beta.new } beta <- fit.y$theta[fit.y$beta.idx] X <- fit.y$X X1 <- cbind(1, X, deparse.level = 0) fit.y$yhat <- drop(X1 %*% beta) fit.y } lavaan/R/lav_lavaan_step07_bounds.R0000644000176200001440000000200414627656441016733 0ustar liggesuserslav_lavaan_step07_bounds <- function(lavoptions = NULL, lavh1 = NULL, lavdata = NULL, lavsamplestats = NULL, lavpartable = NULL) { # # # # # # # # # # # # # # # # # 7. parameter bounds # # # # # # # # # # # # # # # # # # if lavoptions$optim.bounds not NULL and its members lower and upper # have length > 0L # modify lavpartable via lav_partable_add_bounds # automatic bounds (new in 0.6-6) if (!is.null(lavoptions$optim.bounds) || length(lavoptions$optim.bounds$lower) > 0L || length(lavoptions$optim.bounds$upper) > 0L) { if (lav_verbose()) { cat("lavpartable bounds ...") } lavpartable <- lav_partable_add_bounds( partable = lavpartable, lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) if (lav_verbose()) { cat(" done.\n") } } lavpartable } lavaan/R/lav_bootstrap_lrt.R0000644000176200001440000002770114627656441015626 0ustar liggesusers## YR this files needs updating! should be merged with ## lav_bootstrap.R bootstrapLRT <- function(h0 = NULL, h1 = NULL, R = 1000L, type = "bollen.stine", verbose = FALSE, return.LRT = FALSE, double.bootstrap = "no", double.bootstrap.R = 500L, double.bootstrap.alpha = 0.05, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, iseed = NULL) { # checks type <- tolower(type) stopifnot( inherits(h0, "lavaan"), inherits(h1, "lavaan"), type %in% c( "bollen.stine", "parametric", "yuan", "nonparametric", "ordinary" ), double.bootstrap %in% c("no", "FDB", "standard") ) if (type == "nonparametric") type <- "ordinary" if (!missing(verbose)) { current.verbose <- lav_verbose() if (lav_verbose(verbose)) on.exit(lav_verbose(current.verbose), TRUE) } # check for conditional.x = TRUE if (h0@Model@conditional.x) { lav_msg_stop(gettext( "this function is not (yet) available if conditional.x = TRUE")) } # prepare LRT <- rep(as.numeric(NA), R) if ((h1@optim$fx - h0@optim$fx) > sqrt(.Machine$double.eps)) { # restricted fit should not be better! cat( " ... h0@optim$fx = ", h0@optim$fx, "h1@optim$fx = ", h1@optim$fx, "h0 should not be better!\n" ) return(NULL) } LRT.original <- abs(anova(h1, h0)$`Chisq diff`[2L]) # abs only needed because df may be the same for both models! if (double.bootstrap == "FDB") { LRT.2 <- numeric(R) } else if (double.bootstrap == "standard") { plugin.pvalues <- numeric(R) } # prepare for parallel processing if (missing(parallel)) parallel <- "no" parallel <- match.arg(parallel) have_mc <- have_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { have_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") { have_snow <- TRUE } if (!have_mc && !have_snow) { ncpus <- 1L } loadNamespace("parallel") } # data data <- h0@Data # Compute covariance matrix and additional mean vector if (type == "bollen.stine" || type == "parametric" || type == "yuan") { Sigma.hat <- computeSigmaHat(lavmodel = h0@Model) Mu.hat <- computeMuHat(lavmodel = h0@Model) } # can we use the original data, or do we need to transform it first? if (type == "bollen.stine" || type == "yuan") { # check if data is complete if (h0@Options$missing != "listwise") { lav_msg_stop(gettext( "bollen.stine/yuan bootstrap not available for missing data")) } dataX <- vector("list", length = data@ngroups) } else { dataX <- data@X } lavdata <- h0@Data lavoptions <- h0@Options # Bollen-Stine data transformation if (type == "bollen.stine") { for (g in 1:h0@Data@ngroups) { sigma.sqrt <- lav_matrix_symmetric_sqrt(Sigma.hat[[g]]) S.inv.sqrt <- lav_matrix_symmetric_sqrt(h0@SampleStats@icov[[g]]) # center X <- scale(data@X[[g]], center = TRUE, scale = FALSE) # transform X <- X %*% S.inv.sqrt %*% sigma.sqrt # add model based mean if (h0@Model@meanstructure) { X <- scale(X, center = (-1 * Mu.hat[[g]]), scale = FALSE) } # transformed data dataX[[g]] <- X } # Yuan et al data transformation } else if ((type == "yuan")) { # page numbers refer to Yuan et al, 2007 # Define a function to find appropriate value of a # (p. 272) g.a <- function(a, Sigmahat, Sigmahat.inv, S, tau.hat, p) { S.a <- a * S + (1 - a) * Sigmahat tmp.term <- S.a %*% Sigmahat.inv res1 <- (sum(diag(tmp.term)) - log(det(tmp.term)) - p) - tau.hat res <- res1 * res1 # From p 272 attr(res, "gradient") <- sum(diag((S - Sigmahat) %*% (Sigmahat.inv - chol2inv(chol(S.a))))) res } # Now use g.a within each group for (g in 1:h0@Data@ngroups) { S <- h0@SampleStats@cov[[g]] # test is in Fit slot ghat <- h0@test[[1]]$stat.group[[g]] df <- h0@test[[1]]$df Sigmahat <- Sigma.hat[[g]] Sigmahat.inv <- inv.chol(Sigmahat) nmv <- nrow(Sigmahat) n <- data@nobs[[g]] # Calculate tauhat_1, middle p. 267. # Yuan et al note that tauhat_1 could be negative; # if so, we need to let S.a = Sigmahat. (see middle p 275) tau.hat <- (ghat - df) / (n - 1) if (tau.hat >= 0) { # Find a to minimize g.a a <- optimize( g.a, c(0, 1), Sigmahat, Sigmahat.inv, S, tau.hat, nmv )$minimum # Calculate S_a (p. 267) S.a <- a * S + (1 - a) * Sigmahat } else { S.a <- Sigmahat } # Transform the data (p. 263) S.a.sqrt <- lav_matrix_symmetric_sqrt(S.a) S.inv.sqrt <- lav_matrix_symmetric_sqrt(h0@SampleStats@icov[[g]]) X <- data@X[[g]] X <- X %*% S.inv.sqrt %*% S.a.sqrt # transformed data dataX[[g]] <- X } } # run bootstraps fn <- function(b) { if (type == "bollen.stine" || type == "ordinary" || type == "yuan") { # take a bootstrap sample for each group BOOT.idx <- vector("list", length = lavdata@ngroups) for (g in 1:lavdata@ngroups) { stopifnot(lavdata@nobs[[g]] > 1L) boot.idx <- sample( x = lavdata@nobs[[g]], size = lavdata@nobs[[g]], replace = TRUE ) BOOT.idx[[g]] <- boot.idx dataX[[g]] <- dataX[[g]][boot.idx, , drop = FALSE] } newData <- lav_data_update( lavdata = lavdata, newX = dataX, BOOT.idx = BOOT.idx, lavoptions = lavoptions ) } else { # parametric! for (g in 1:lavdata@ngroups) { dataX[[g]] <- MASS::mvrnorm( n = lavdata@nobs[[g]], Sigma = Sigma.hat[[g]], mu = Mu.hat[[g]] ) } newData <- lav_data_update( lavdata = lavdata, newX = dataX, lavoptions = lavoptions ) } # verbose if (lav_verbose()) cat(" ... bootstrap draw number: ", b, "\n") # Get sample statistics bootSampleStats <- try(lav_samplestats_from_data( lavdata = newData, lavoptions = lavoptions ), silent = TRUE) if (inherits(bootSampleStats, "try-error")) { if (lav_verbose()) cat(" FAILED: creating h0@SampleStats statistics\n") return(NULL) } if (lav_verbose()) cat(" ... ... model h0: ") current.verbose2 <- lav_verbose() if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose2), TRUE, FALSE) h0@Options$se <- "none" h0@Options$test <- "standard" h0@Options$baseline <- FALSE h0@Options$h1 <- FALSE # Fit h0 model fit.h0 <- suppressWarnings(lavaan( slotOptions = h0@Options, slotParTable = h0@ParTable, slotSampleStats = bootSampleStats, slotData = data )) lav_verbose(current.verbose2) if (!fit.h0@optim$converged) { if (lav_verbose()) cat(" FAILED: no convergence\n") return(NULL) } if (lav_verbose()) { cat( " ok -- niter = ", fit.h0@optim$iterations, " fx = ", fit.h0@optim$fx, "\n" ) } if (lav_verbose()) cat(" ... ... model h1: ") lav_verbose(FALSE) h1@Options$se <- "none" h1@Options$test <- "standard" h1@Options$baseline <- FALSE h1@Options$h1 <- FALSE # Fit h1 model fit.h1 <- suppressWarnings(lavaan( slotOptions = h1@Options, slotParTable = h1@ParTable, slotSampleStats = bootSampleStats, slotData = data )) lav_verbose(current.verbose2) if (!fit.h1@optim$converged) { if (lav_verbose()) { cat( " FAILED: no convergence -- niter = ", fit.h1@optim$iterations, " fx = ", fit.h1@optim$fx, "\n" ) } return(NULL) } if (lav_verbose()) { cat( " ok -- niter = ", fit.h1@optim$iterations, " fx = ", fit.h1@optim$fx, "\n" ) } # store LRT if ((fit.h1@optim$fx - fit.h0@optim$fx) > sqrt(.Machine$double.eps)) { # if((fit.h1@optim$fx - fit.h0@optim$fx) > 0.0) { if (lav_verbose()) { cat(" ... ... LRT = h0 > h1, delta = ", fit.h1@optim$fx - fit.h0@optim$fx, "\n") } return(NULL) } else { lrt.boot <- abs(anova(fit.h1, fit.h0)$`Chisq diff`[2L]) if (lav_verbose()) { cat(" ... ... LRT = ", lrt.boot, "\n") } } # double bootstrap if (double.bootstrap == "standard") { if (lav_verbose()) cat(" ... ... calibrating p.value - ") plugin.pvalue <- bootstrapLRT( h0 = fit.h0, h1 = fit.h1, R = double.bootstrap.R, type = type, verbose = FALSE, return.LRT = FALSE, # FALSE parallel = parallel, ncpus = ncpus, cl = cl, double.bootstrap = "no" ) if (lav_verbose()) cat(sprintf("%5.3f", plugin.pvalue), "\n") attr(lrt.boot, "plugin.pvalue") <- plugin.pvalue } else if (double.bootstrap == "FDB") { # Fast double bootstrap plugin.pvalue <- bootstrapLRT( h0 = fit.h0, h1 = fit.h1, R = 1L, type = type, verbose = FALSE, return.LRT = TRUE, # TRUE parallel = parallel, ncpus = ncpus, cl = cl, double.bootstrap = "no" ) LRT.2 <- attr(plugin.pvalue, "LRT") if (lav_verbose()) cat(" ... ... LRT2 = ", LRT.2, "\n") attr(lrt.boot, "LRT.2") <- LRT.2 } lrt.boot } # Parallel processing RR <- sum(R) res <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { parallel::mclapply(seq_len(RR), fn, mc.cores = ncpus) } else if (have_snow) { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if (RNGkind()[1L] == "L'Ecuyer-CMRG") { parallel::clusterSetRNGStream(cl, iseed = iseed) } res <- parallel::parLapply(cl, seq_len(RR), fn) parallel::stopCluster(cl) res } else { parallel::parLapply(cl, seq_len(RR), fn) } } } else { lapply(seq_len(RR), fn) } error.idx <- integer(0) for (b in seq_len(RR)) { if (!is.null(res[[b]])) { LRT[b] <- res[[b]] if (double.bootstrap == "standard") { plugin.pvalues[b] <- attr(res[[b]], "plugin.pvalue") } else if (double.bootstrap == "FDB") { LRT.2[b] <- attr(res[[b]], "LRT.2") } } else { error.idx <- c(error.idx, b) } } # Error handling if (length(error.idx) > 0L) { # warning("lavaan WARNING: only ", (R - length(error.idx)), # " bootstrap draws were successful") LRT <- LRT[-error.idx] if (length(LRT) == 0) LRT <- as.numeric(NA) if (double.bootstrap == "standard") { plugin.pvalues <- plugin.pvalues[-error.idx] attr(LRT, "error.idx") <- error.idx } if (double.bootstrap == "FDB") { LRT.2 <- LRT.2[-error.idx] attr(LRT.2, "error.idx") <- error.idx } } else { if (lav_verbose()) { cat("Number of successful bootstrap draws:", (R - length(error.idx)), "\n") } } pvalue <- sum(LRT > LRT.original) / length(LRT) if (return.LRT) { attr(pvalue, "LRT.original") <- LRT.original attr(pvalue, "LRT") <- LRT } if (double.bootstrap == "FDB") { Q <- (1 - pvalue) lrt.q <- quantile(LRT.2, Q, na.rm = TRUE) adj.pvalue <- sum(LRT > lrt.q) / length(LRT) attr(pvalue, "lrt.q") <- lrt.q attr(pvalue, "adj.pvalue") <- adj.pvalue if (return.LRT) { attr(pvalue, "LRT.original") <- LRT.original attr(pvalue, "LRT") <- LRT attr(pvalue, "LRT2") <- LRT.2 } } else if (double.bootstrap == "standard") { adj.alpha <- quantile(plugin.pvalues, double.bootstrap.alpha, na.rm = TRUE ) attr(pvalue, "adj.alpha") <- adj.alpha adj.pvalue <- sum(plugin.pvalues < pvalue) / length(plugin.pvalues) attr(pvalue, "plugin.pvalues") <- plugin.pvalues attr(pvalue, "adj.pvalue") <- adj.pvalue } pvalue } lavaan/R/lav_model_utils.R0000644000176200001440000002104414627656441015242 0ustar liggesusers# lav_model utility functions # initial version: YR 25/03/2009: `methods' for the Model class # - YR 14 Jan 2014: rename object -> lavmodel, all functions as lav_model_* # - YR 20 Nov 2021: add lav_model_dmmdpar lav_model_get_parameters <- function(lavmodel = NULL, GLIST = NULL, type = "free", extra = TRUE) { # type == "free": only non-redundant free parameters (x) # type == "user": all parameters listed in User model # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST # if this changes, tag @TDJorgensen in commit message if (type == "free") { N <- lavmodel@nx.free # } else if(type == "unco") { # N <- lavmodel@nx.unco } else if (type == "user") { N <- lavmodel@nx.user } x <- numeric(N) for (mm in 1:length(lavmodel@GLIST)) { if (type == "free") { m.idx <- lavmodel@m.free.idx[[mm]] x.idx <- lavmodel@x.free.idx[[mm]] # } else if(type == "unco") { # m.idx <- lavmodel@m.unco.idx[[mm]] # x.idx <- lavmodel@x.unco.idx[[mm]] } else if (type == "user") { m.idx <- lavmodel@m.user.idx[[mm]] x.idx <- lavmodel@x.user.idx[[mm]] } x[x.idx] <- GLIST[[mm]][m.idx] } if (type == "user" && extra && sum( lavmodel@x.def.idx, lavmodel@x.ceq.idx, lavmodel@x.cin.idx ) > 0L) { # we need 'free' x x.free <- lav_model_get_parameters( lavmodel = lavmodel, GLIST = GLIST, type = "free" ) if (length(lavmodel@x.def.idx) > 0L) { x[lavmodel@x.def.idx] <- lavmodel@def.function(x.free) } if (length(lavmodel@x.ceq.idx) > 0L) { x[lavmodel@x.ceq.idx] <- lavmodel@ceq.function(x.free) } if (length(lavmodel@x.cin.idx) > 0L) { x[lavmodel@x.cin.idx] <- lavmodel@cin.function(x.free) } } x } # warning: this will make a copy of lavmodel lav_model_set_parameters <- function(lavmodel = NULL, x = NULL) { tmp <- lavmodel@GLIST for (mm in 1:length(lavmodel@GLIST)) { m.free.idx <- lavmodel@m.free.idx[[mm]] x.free.idx <- lavmodel@x.free.idx[[mm]] tmp[[mm]][m.free.idx] <- x[x.free.idx] } if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } else { correlation <- FALSE } # categorical? set categorical theta elements (if any) if (lavmodel@categorical || correlation) { nmat <- lavmodel@nmat if (lavmodel@representation == "LISREL") { for (g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0L, nmat))[g] if (lavmodel@estimator %in% c( "ML", "WLS", "GLS", "DWLS", "ULS", "PML", "catML" )) { if (lavmodel@parameterization == "delta") { tmp[mm.in.group] <- setResidualElements.LISREL( MLIST = tmp[mm.in.group], num.idx = lavmodel@num.idx[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]] ) } else if (lavmodel@parameterization == "theta") { tmp[mm.in.group] <- setDeltaElements.LISREL( MLIST = tmp[mm.in.group], num.idx = lavmodel@num.idx[[g]] ) } } else if (lavmodel@estimator %in% c("MML", "FML")) { # ttt <- diag(tmp[mm.in.group]$theta) # diag(tmp[mm.in.group]$theta) <- as.numeric(NA) # if(length(lavmodel@num.idx[[g]]) > 0L) { # diag(tmp[mm.in.group]$theta)[ lavmodel@num.idx[[g]] ] <- # ttt[ lavmodel@num.idx[[g]] ] # } } } } else { cat("FIXME: deal with theta elements in the categorical case (RAM)") } } lavmodel@GLIST <- tmp lavmodel } # create a standalone GLIST, filled with (new) x values # (avoiding a copy of lavmodel) lav_model_x2GLIST <- function(lavmodel = NULL, x = NULL, type = "free", setDelta = TRUE, m.el.idx = NULL, x.el.idx = NULL) { if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } else { correlation <- FALSE } GLIST <- lavmodel@GLIST for (mm in 1:length(GLIST)) { # skip empty matrix if (nrow(GLIST[[mm]]) == 0L) { next } if (type == "free") { M.EL.IDX <- lavmodel@m.free.idx[[mm]] X.EL.IDX <- lavmodel@x.free.idx[[mm]] } else if (type == "unco") { M.EL.IDX <- lavmodel@m.free.idx[[mm]] X.EL.IDX <- lavmodel@x.unco.idx[[mm]] } else if (type == "full") { if (lavmodel@isSymmetric[mm]) { N <- ncol(GLIST[[mm]]) M.EL.IDX <- lav_matrix_vech_idx(N) } else { M.EL.IDX <- seq_len(length(GLIST[[mm]])) } X.EL.IDX <- seq_len(length(m.el.idx)) if (mm > 1) X.EL.IDX <- X.EL.IDX + sum(lavmodel@mmSize[1:(mm - 1)]) } else if (type == "custom") { # nothing to do, m.el.idx and x.el.idx should be given M.EL.IDX <- m.el.idx[[mm]] X.EL.IDX <- x.el.idx[[mm]] } # assign GLIST[[mm]][M.EL.IDX] <- x[X.EL.IDX] # make symmetric (if full) if (type == "full" && lavmodel@isSymmetric[mm]) { T <- t(GLIST[[mm]]) GLIST[[mm]][upper.tri(GLIST[[mm]])] <- T[upper.tri(T)] } } # # theta parameterization: delta must be reset! # if((lavmodel@categorical || correlation) && setDelta && # lavmodel@parameterization == "theta") { # nmat <- lavmodel@nmat # for(g in 1:lavmodel@nblocks) { # # which mm belong to group g? # mm.in.group <- 1:nmat[g] + cumsum(c(0L,nmat))[g] # GLIST[mm.in.group] <- # setDeltaElements.LISREL(MLIST = GLIST[mm.in.group], # num.idx = lavmodel@num.idx[[g]]) # } # } # in 0.6-13: we always set theta/delta if ((lavmodel@categorical || correlation) && setDelta) { nmat <- lavmodel@nmat if (lavmodel@representation == "LISREL") { for (g in 1:lavmodel@nblocks) { # which mm belong to group g? mm.in.group <- 1:nmat[g] + cumsum(c(0L, nmat))[g] if (lavmodel@parameterization == "delta") { GLIST[mm.in.group] <- setResidualElements.LISREL( MLIST = GLIST[mm.in.group], num.idx = lavmodel@num.idx[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]] ) } else if (lavmodel@parameterization == "theta") { GLIST[mm.in.group] <- setDeltaElements.LISREL( MLIST = GLIST[mm.in.group], num.idx = lavmodel@num.idx[[g]] ) } } # blocks } else { cat("FIXME: deal with theta elements in the categorical case (RAM)") } } GLIST } # derivative of model matrix (say, Psi, Theta) wrt the free elements # in that model matrix # returns a matrix with 0/1 entries # - rows are the nrow*ncol elements of the full matrix # - cols are the free parameters # # TOdo: use sparse matrices # lav_model_dmmdpar <- function(lavmodel, target = "theta", group = 1L) { stopifnot(group <= lavmodel@ngroups) # MLIST for this group nmat <- lavmodel@nmat # which mm belong to group g? mm.in.group <- 1:nmat[group] + cumsum(c(0L, nmat))[group] MLIST <- lavmodel@GLIST[mm.in.group] # find target model matrix mlist.idx <- which(names(MLIST) == target) if (length(mlist.idx) == 0L) { lav_msg_stop(gettextf( "model matrix \"%s\" not found. Available model matrices are:", target), paste(names(MLIST), collapse = " ")) } # target idx in GLIST target.idx <- cumsum(c(0L, nmat))[group] + mlist.idx # symmetric matrices (eg Psi, Theta) if (lavmodel@isSymmetric[[target.idx]]) { TARGET <- lavmodel@GLIST[[target.idx]] P <- nrow(TARGET) unique.idx <- unique(lavmodel@x.free.idx[[target.idx]]) row.idx <- match(lavmodel@x.free.idx[[target.idx]], unique.idx) out <- matrix(0L, nrow = P * P, ncol = length(unique.idx)) IDX <- cbind(lavmodel@m.free.idx[[target.idx]], row.idx) out[IDX] <- 1L # non-symmetric matrices (eg Lambda, Beta) } else { TARGET <- lavmodel@GLIST[[target.idx]] P <- nrow(TARGET) M <- ncol(TARGET) row.idx <- seq_len(length(lavmodel@x.free.idx[[target.idx]])) out <- matrix(0L, nrow = P * M, ncol = length(row.idx)) IDX <- cbind(lavmodel@m.free.idx[[target.idx]], row.idx) out[IDX] <- 1L } out } # backwards compatibility # getModelParameters <- lav_model_get_parameters # setModelParameters <- lav_model_set_parameters # x2GLIST <- lav_model_x2GLIST lavaan/R/lav_model.R0000644000176200001440000004170614627656441014031 0ustar liggesusers# constructor of the matrix lavoptions$representation # # initial version: YR 22/11/2010 # - YR 14 Jan 2014: moved to lav_model.R # - YR 18 Nov 2014: more efficient handling of linear equality constraints # - YR 02 Dec 2014: allow for bare-minimum parameter tables # - YR 25 Jan 2017: collect options in lavoptions # - YR 12 Mar 2021: add lavpta as argument; create model attributes (ma) # construct MATRIX lavoptions$representation of the model lav_model <- function(lavpartable = NULL, # nolint lavoptions = NULL, th.idx = list()) { # handle bare-minimum partables lavpartable <- lav_partable_complete(lavpartable) lavpta = lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) # global info from user model nblocks <- lav_partable_nblocks(lavpartable) ngroups <- lav_partable_ngroups(lavpartable) meanstructure <- any(lavpartable$op == "~1") correlation <- lavoptions$correlation if (is.null(correlation)) { correlation <- FALSE } categorical <- any(lavpartable$op == "|") if (categorical) { meanstructure <- TRUE # handle th.idx if length(th.idx) != nblocks if (nblocks != length(th.idx)) { th.idx <- rep(th.idx, each = nblocks) } } group.w.free <- any(lavpartable$lhs == "group" & lavpartable$op == "%") multilevel <- FALSE if (!is.null(lavpartable$level)) { nlevels <- lav_partable_nlevels(lavpartable) if (nlevels > 1L) { multilevel <- TRUE } } else { nlevels <- 1L } nefa <- lav_partable_nefa(lavpartable) if (nefa > 0L) { efa.values <- lav_partable_efa_values(lavpartable) } # check for simple equality constraints eq.simple <- any(lavpartable$free > 0L & duplicated(lavpartable$free)) if (eq.simple) { # just like in <0.5-18, add (temporary) 'unco' column # so we can fill in x.unco.idx lavpartable$unco <- integer(length(lavpartable$id)) idx.free <- which(lavpartable$free > 0L) lavpartable$unco[idx.free] <- seq_along(idx.free) } # handle variable definitions and (in)equality constraints tmp.con <- lav_constraints_parse( partable = lavpartable, constraints = NULL ) # handle *linear* equality constraints special if (tmp.con$ceq.linear.only.flag) { con.jac <- tmp.con$ceq.JAC con.lambda <- numeric(nrow(tmp.con$ceq.JAC)) attr(con.jac, "inactive.idx") <- integer(0L) attr(con.jac, "ceq.idx") <- seq_len(nrow(tmp.con$ceq.JAC)) } else { con.jac <- matrix(0, 0, 0) con.lambda <- numeric(0) } # select model matrices if (lavoptions$representation == "LISREL") { tmp.rep <- lav_lisrel(lavpartable, target = NULL, extra = TRUE) } else if (lavoptions$representation == "RAM") { tmp.rep <- lav_ram(lavpartable, target = NULL, extra = TRUE) } else { lav_msg_stop(gettextf( "%1$s argument must be either %2$s or %3$s", "representation", "LISREL", "RAM")) } if (lav_debug()) print(tmp.rep) # FIXME: check for non-existing parameters bad.idx <- which((tmp.rep$mat == "" | is.na(tmp.rep$row) | is.na(tmp.rep$col)) & !lavpartable$op %in% c("==", "<", ">", ":=")) if (length(bad.idx) > 0L) { this.formula <- paste(lavpartable$lhs[bad.idx[1]], lavpartable$op[bad.idx[1]], lavpartable$rhs[bad.idx[1]], sep = " " ) if (lavoptions$representation == "LISREL") { lav_msg_stop(gettextf( "a model parameter is not defined in the LISREL representation %s. Upgrade to latent variables or consider using representation = 'RAM'.", this.formula) ) } else { lav_msg_stop( gettextf("parameter is not defined: %s", this.formula) ) } } # prepare nG-sized slots tmp.ng <- sum(unlist(attr(tmp.rep, "mmNumber"))) tmp.glist <- vector(mode = "list", tmp.ng) names(tmp.glist) <- unlist(attr(tmp.rep, "mmNames")) dim.names <- vector(mode = "list", length = tmp.ng) is.symmetric <- logical(tmp.ng) mm.size <- integer(tmp.ng) m.free.idx <- m.user.idx <- vector(mode = "list", length = tmp.ng) x.free.idx <- x.unco.idx <- x.user.idx <- vector( mode = "list", length = tmp.ng ) # prepare nblocks-sized slots nvar <- integer(nblocks) nmat <- unlist(attr(tmp.rep, "mmNumber")) num.idx <- vector("list", length = nblocks) nexo <- integer(nblocks) ov.x.dummy.ov.idx <- vector(mode = "list", length = nblocks) ov.x.dummy.lv.idx <- vector(mode = "list", length = nblocks) ov.y.dummy.ov.idx <- vector(mode = "list", length = nblocks) ov.y.dummy.lv.idx <- vector(mode = "list", length = nblocks) ov.efa.idx <- vector(mode = "list", length = nblocks) lv.efa.idx <- vector(mode = "list", length = nblocks) offset <- 0L # keep track of ov.names across blocks for (g in 1:nblocks) { # observed and latent variables for this block ov.names <- lav_partable_vnames(lavpartable, "ov", block = g) ov.names.nox <- lav_partable_vnames(lavpartable, "ov.nox", block = g) ov.names.x <- lav_partable_vnames(lavpartable, "ov.x", block = g) ov.num <- lav_partable_vnames(lavpartable, "ov.num", block = g) if (lavoptions$conditional.x) { if (nlevels > 1L) { if (ngroups == 1L) { other.block.names <- lav_partable_vnames(lavpartable, "ov", block = seq_len(nblocks)[-g] ) } else { # TEST ME! # which group is this? this.group <- ceiling(g / nlevels) blocks.within.group <- (this.group - 1L) * nlevels + seq_len(nlevels) other.block.names <- lav_partable_vnames(lavpartable, "ov", block = blocks.within.group[-g] ) } if (length(ov.names.x) > 0L) { idx <- which(ov.names.x %in% other.block.names) if (length(idx) > 0L) { ov.names.nox <- unique(c(ov.names.nox, ov.names.x[idx])) ov.names.x <- ov.names.x[-idx] ov.names <- ov.names.nox } } } nvar[g] <- length(ov.names.nox) if (correlation) { num.idx[[g]] <- integer(0L) } else { num.idx[[g]] <- which(ov.names.nox %in% ov.num) } } else { nvar[g] <- length(ov.names) if (correlation) { num.idx[[g]] <- integer(0L) } else { num.idx[[g]] <- which(ov.names %in% ov.num) } } nexo[g] <- length(ov.names.x) if (nefa > 0L) { lv.names <- lav_partable_vnames(lavpartable, "lv", block = g) } # model matrices for this block mm.number <- attr(tmp.rep, "mmNumber")[[g]] mm.names <- attr(tmp.rep, "mmNames")[[g]] mm.symmetric <- attr(tmp.rep, "mmSymmetric")[[g]] mm.dim.names <- attr(tmp.rep, "mmDimNames")[[g]] mm.rows <- attr(tmp.rep, "mmRows")[[g]] mm.cols <- attr(tmp.rep, "mmCols")[[g]] for (mm in 1:mm.number) { # offset in tmp.glist offset <- offset + 1L # matrix size, symmetric, dim.names if (mm.symmetric[mm]) { tmp.n <- mm.rows[mm] mm.size <- as.integer(tmp.n * (tmp.n + 1) / 2) } else { mm.size <- as.integer(mm.rows[mm] * mm.cols[mm]) } mm.size[offset] <- mm.size is.symmetric[offset] <- mm.symmetric[mm] dim.names[[offset]] <- mm.dim.names[[mm]] # select elements for this matrix idx <- which(lavpartable$block == g & tmp.rep$mat == mm.names[mm]) # create empty `pattern' matrix # FIXME: one day, we may want to use sparse matrices... # but they should not slow things down! tmp <- matrix(0L, nrow = mm.rows[mm], ncol = mm.cols[mm] ) # 1. first assign free values only, to get vector index # -> to be used in lav_model_objective tmp[cbind(tmp.rep$row[idx], tmp.rep$col[idx])] <- lavpartable$free[idx] if (mm.symmetric[mm]) { # NOTE: we assume everything is in the UPPER tri! tmp.tt <- t(tmp) tmp[lower.tri(tmp)] <- tmp.tt[lower.tri(tmp.tt)] } m.free.idx[[offset]] <- which(tmp > 0) x.free.idx[[offset]] <- tmp[which(tmp > 0)] # 2. if simple equality constraints, unconstrained free parameters # -> to be used in lav_model_gradient if (eq.simple) { tmp[cbind( tmp.rep$row[idx], tmp.rep$col[idx] )] <- lavpartable$unco[idx] if (mm.symmetric[mm]) { # NOTE: we assume everything is in the UPPER tri! tmp.tt <- t(tmp) tmp[lower.tri(tmp)] <- tmp.tt[lower.tri(tmp.tt)] } # m.unco.idx[[offset]] <- which(tmp > 0) x.unco.idx[[offset]] <- tmp[which(tmp > 0)] } else { # m.unco.idx[[offset]] <- m.free.idx[[offset]] x.unco.idx[[offset]] <- x.free.idx[[offset]] } # 3. general mapping between user and tmp.glist tmp[cbind(tmp.rep$row[idx], tmp.rep$col[idx])] <- lavpartable$id[idx] if (mm.symmetric[mm]) { tmp.tt <- t(tmp) tmp[lower.tri(tmp)] <- tmp.tt[lower.tri(tmp.tt)] } m.user.idx[[offset]] <- which(tmp > 0) x.user.idx[[offset]] <- tmp[which(tmp > 0)] # 4. now assign starting/fixed values # create empty matrix # FIXME: again, we may want to use sparse matrices here... tmp <- matrix(0.0, nrow = mm.rows[mm], ncol = mm.cols[mm] ) tmp[cbind(tmp.rep$row[idx], tmp.rep$col[idx])] <- lavpartable$start[idx] if (mm.symmetric[mm]) { tmp.tt <- t(tmp) tmp[lower.tri(tmp)] <- tmp.tt[lower.tri(tmp.tt)] } # 4b. override with cov.x (if conditional.x = TRUE) # new in 0.6-1 # shouldn't be needed, if lavpartable$start contains cov.x values # if(mm.names[mm] == "cov.x") { # tmp <- cov.x[[g]] # } # 4c. override with mean.x (if conditional.x = TRUE) # new in 0.6-1 # shouldn't be needed, if lavpartable$start contains mean.x values # if(mm.names[mm] == "mean.x") { # tmp <- as.matrix(mean.x[[g]]) # } # representation specific stuff if (lavoptions$representation == "LISREL" && mm.names[mm] == "lambda") { ov.dummy.names.nox <- attr(tmp.rep, "ov.dummy.names.nox")[[g]] ov.dummy.names.x <- attr(tmp.rep, "ov.dummy.names.x")[[g]] ov.dummy.names <- c(ov.dummy.names.nox, ov.dummy.names.x) # define dummy latent variables if (length(ov.dummy.names)) { # in this case, lv.names will be extended with the dummys tmp.lv.names <- mm.dim.names$psi[[1]] row.tmp.idx <- match(ov.dummy.names, ov.names) col.tmp.idx <- match(ov.dummy.names, tmp.lv.names) # Fix lambda values to 1.0 tmp[cbind(row.tmp.idx, col.tmp.idx)] <- 1.0 ov.x.dummy.ov.idx[[g]] <- match(ov.dummy.names.x, ov.names) ov.x.dummy.lv.idx[[g]] <- match(ov.dummy.names.x, tmp.lv.names) ov.y.dummy.ov.idx[[g]] <- match(ov.dummy.names.nox, ov.names) ov.y.dummy.lv.idx[[g]] <- match(ov.dummy.names.nox, tmp.lv.names) } } # representation specific if (lavoptions$representation == "LISREL" && mm.names[mm] == "delta") { # only categorical values are listed in the lavpartable # but all remaining values should be 1.0 idx <- which(tmp[, 1L] == 0.0) tmp[idx, 1L] <- 1.0 } # representation specific if (lavoptions$representation == "RAM" && mm.names[mm] == "ov.idx") { tmp[1, ] <- attr(tmp.rep, "ov.idx")[[g]] } # assign matrix to tmp.glist tmp.glist[[offset]] <- tmp } # mm # efa related info if (nefa > 0L) { ov.efa.idx[[g]] <- vector("list", length = nefa) lv.efa.idx[[g]] <- vector("list", length = nefa) for (set in seq_len(nefa)) { # determine ov idx for this set ov.efa <- unique(lavpartable$rhs[lavpartable$op == "=~" & lavpartable$block == g & lavpartable$efa == efa.values[set]]) ov.efa.idx[[g]][[set]] <- match(ov.efa, ov.names) lv.efa <- unique(lavpartable$lhs[lavpartable$op == "=~" & lavpartable$block == g & lavpartable$efa == efa.values[set]]) lv.efa.idx[[g]][[set]] <- match(lv.efa, lv.names) } names(ov.efa.idx[[g]]) <- efa.values names(lv.efa.idx[[g]]) <- efa.values } # efa } # g # fixed.x parameters? # fixed.x <- any(lavpartable$exo > 0L & lavpartable$free == 0L) # if(categorical) { # fixed.x <- TRUE # } # dirty hack to mimic MUML if (!is.null(lavoptions$tech.muml.scale)) { lav_msg_warn(gettext("using muml scale in group 2")) # find matrix lambda.idx <- which(names(tmp.glist) == "lambda")[2L] # find rows/cols b.names <- paste0("b", ov.names) ## ad-hoc assumption!!! tmp.cols <- match(b.names, tmp.lv.names) tmp.rows <- seq_len(nvar[2]) stopifnot(length(tmp.cols) == length(tmp.rows)) tmp.glist[[lambda.idx]][cbind(tmp.rows, tmp.cols)] <- lavoptions$tech.muml.scale } # which free parameters are observed variances? ov.names <- vnames(lavpartable, "ov") x.free.var.idx <- lavpartable$free[lavpartable$free & # !duplicated(lavpartable$free) & lavpartable$lhs %in% ov.names & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs] rv.lv <- rv.ov <- list() if (multilevel) { # store information about random slopes (if any) lv.names <- lav_partable_vnames(lavpartable, "lv") # we should also add splitted-y names (x) to lv.names # FIXME: make this work for multiple work multilevel level.values <- lav_partable_level_values(lavpartable) ovx1 <- lavNames(lavpartable, "ov.x", level = level.values[1]) ovx2 <- lavNames(lavpartable, "ov.x", level = level.values[2]) ovx12 <- ovx2[ovx2 %in% ovx1] lv.names <- c(lv.names, ovx12) # RV LV rv.idx <- which(nchar(lavpartable$rv) > 0L & lavpartable$level == level.values[1] & lavpartable$rhs %in% lv.names) if (length(rv.idx)) { rv.lv <- lapply(rv.idx, function(x) { c(lavpartable$lhs[x], lavpartable$rhs[x]) }) names(rv.lv) <- lavpartable$rv[rv.idx] } # RV OV rv.idx <- which(nchar(lavpartable$rv) > 0L & lavpartable$level == level.values[1] & !lavpartable$rhs %in% lv.names) if (length(rv.idx)) { rv.ov <- lapply(rv.idx, function(x) { c(lavpartable$lhs[x], lavpartable$rhs[x]) }) names(rv.ov) <- lavpartable$rv[rv.idx] } } # multilevel # new in 0.6-9: model properties modprop <- lav_model_properties( GLIST = tmp.glist, lavpartable = lavpartable, nmat = nmat, m.free.idx = m.free.idx ) tmp.model <- new("lavModel", GLIST = tmp.glist, dimNames = dim.names, isSymmetric = is.symmetric, mmSize = mm.size, representation = lavoptions$representation, modprop = modprop, meanstructure = meanstructure, correlation = correlation, categorical = categorical, multilevel = multilevel, link = lavoptions$link, nblocks = nblocks, ngroups = ngroups, # breaks rsem???? nefa = nefa, group.w.free = group.w.free, nmat = nmat, nvar = nvar, num.idx = num.idx, th.idx = th.idx, nx.free = max(lavpartable$free), nx.unco = if (is.null(lavpartable$unco)) { max(lavpartable$free) } else { max(lavpartable$unco) }, nx.user = max(lavpartable$id), m.free.idx = m.free.idx, x.free.idx = x.free.idx, x.free.var.idx = x.free.var.idx, # m.unco.idx=m.unco.idx, x.unco.idx = x.unco.idx, m.user.idx = m.user.idx, x.user.idx = x.user.idx, x.def.idx = which(lavpartable$op == ":="), x.ceq.idx = which(lavpartable$op == "=="), x.cin.idx = which(lavpartable$op == ">" | lavpartable$op == "<"), ceq.simple.only = tmp.con$ceq.simple.only, ceq.simple.K = tmp.con$ceq.simple.K, eq.constraints = tmp.con$ceq.linear.only.flag, eq.constraints.K = tmp.con$ceq.JAC.NULL, eq.constraints.k0 = tmp.con$ceq.rhs.NULL, def.function = tmp.con$def.function, ceq.function = tmp.con$ceq.function, ceq.JAC = tmp.con$ceq.JAC, ceq.rhs = tmp.con$ceq.rhs, ceq.jacobian = tmp.con$ceq.jacobian, ceq.linear.idx = tmp.con$ceq.linear.idx, ceq.nonlinear.idx = tmp.con$ceq.nonlinear.idx, cin.function = tmp.con$cin.function, cin.JAC = tmp.con$cin.JAC, cin.rhs = tmp.con$cin.rhs, cin.jacobian = tmp.con$cin.jacobian, cin.linear.idx = tmp.con$cin.linear.idx, cin.nonlinear.idx = tmp.con$cin.nonlinear.idx, con.jac = con.jac, con.lambda = con.lambda, nexo = nexo, fixed.x = lavoptions$fixed.x, conditional.x = lavoptions$conditional.x, parameterization = lavoptions$parameterization, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.efa.idx = ov.efa.idx, lv.efa.idx = lv.efa.idx, rv.lv = rv.lv, rv.ov = rv.ov, estimator = lavoptions$estimator, estimator.args = lavoptions$estimator.args ) if (lav_debug()) { cat("lavaan debug: lavaanModel\n") print(str(tmp.model)) print(tmp.model@GLIST) } tmp.model } # for backwards compatibility # tmp.model <- lav_model lavaan/R/lav_h1_implied.R0000644000176200001440000001547014627656441014743 0ustar liggesusers# compute sample statistics for the unrestricted (h1) model # and also the logl (if available) lav_h1_implied_logl <- function(lavdata = NULL, lavsamplestats = NULL, lavpartable = NULL, # multilevel + missing lavoptions = NULL) { lavpta <- NULL if (!is.null(lavpartable)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } if (lavdata@nlevels == 1L) { if (lavsamplestats@missing.flag) { if (lavoptions$conditional.x) { implied <- list() # not available yet } else { implied <- list( cov = lapply( lavsamplestats@missing.h1, "[[", "sigma" ), mean = lapply( lavsamplestats@missing.h1, "[[", "mu" ), th = lavsamplestats@th, group.w = lavsamplestats@group.w ) } } else { if (lavoptions$conditional.x) { implied <- list( res.cov = lavsamplestats@res.cov, res.int = lavsamplestats@res.int, res.slopes = lavsamplestats@res.slopes, cov.x = lavsamplestats@cov.x, mean.x = lavsamplestats@mean.x, res.th = lavsamplestats@res.th, group.w = lavsamplestats@group.w ) } else { implied <- list( cov = lavsamplestats@cov, mean = lavsamplestats@mean, th = lavsamplestats@th, group.w = lavsamplestats@group.w ) } } # complete data logl <- lav_h1_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } else { # estimate Mu.B, Mu.W, Sigma.B and Sigma.W for unrestricted model ngroups <- lavdata@ngroups nlevels <- lavdata@nlevels implied <- list( cov = vector("list", length = ngroups * nlevels), mean = vector("list", length = ngroups * nlevels) ) loglik.group <- numeric(lavdata@ngroups) for (g in 1:lavdata@ngroups) { if (lav_verbose()) { cat("\n\nfitting unrestricted (H1) model in group ", g, "\n") } if (lavsamplestats@missing.flag) { # missing data # 1. first a few EM iteration faking complete data # Y1 <- lavdata@X[[g]] # cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] # Y2.complete <- unname(as.matrix(aggregate(Y1, # by = list(cluster.idx), # FUN = function(x) { # if( all(is.na(x)) ) { # all elements are NA # as.numeric(0) # in this cluster # } else { # mean(x, na.rm = TRUE) # } # })[,-1])) # YLp = lavsamplestats@YLp[[g]] # YLp[[2]]$Y2 <- Y2.complete # OUT <- lav_mvnorm_cluster_em_sat( # YLp = YLp, # Lp = lavdata@Lp[[g]], # verbose = TRUE, # for now # tol = 1e-04, # min.variance = 1e-05, # max.iter = 5L) ## create tmp lav1, only for this group # implied$cov[[ (g-1)*nlevels + 1L]] <- OUT$Sigma.W # implied$cov[[ (g-1)*nlevels + 2L]] <- OUT$Sigma.B # implied$mean[[(g-1)*nlevels + 1L]] <- OUT$Mu.W # implied$mean[[(g-1)*nlevels + 2L]] <- OUT$Mu.B # loglik.group[g] <- OUT$logl # lavh1 <- list(implied = implied, logl = sum(loglik.group)) # lavpartable <- lav_partable_unrestricted(lavdata = lavdata, # lavsamplestats = lavsamplestats, lavoptions = lavoptions, # lavpta = lavpta, lavh1 = lavh1) # lavpartable$lower <- rep(-Inf, length(lavpartable$lhs)) # var.idx <- which(lavpartable$free > 0L & # lavpartable$op == "~~" & # lavpartable$lhs == lavpartable$rhs) # lavpartable$lower[var.idx] <- 1e-05 lavpartable <- lav_partable_unrestricted_chol( lavdata = lavdata, lavoptions = lavoptions, lavpta = lavpta ) lavoptions2 <- lavoptions lavoptions2$se <- "none" lavoptions2$test <- "none" lavoptions2$do.fit <- TRUE lavoptions2$h1 <- FALSE lavoptions2$baseline <- FALSE lavoptions2$fixed.x <- FALSE # even if model uses fixed.x=TRUE lavoptions2$model.type <- "unrestricted" lavoptions2$optim.attempts <- 4L lavoptions2$check.gradient <- FALSE lavoptions2$optim.force.convergence <- TRUE # for now... lavoptions2$control <- list(rel.tol = 1e-7) # FIT <- lavaan(lavpartable, slotOptions = lavoptions2, # slotSampleStats = lavsamplestats, # slotData = lavdata, sloth1 = lavh1) FIT <- lavaan(lavpartable, slotOptions = lavoptions2, slotSampleStats = lavsamplestats, slotData = lavdata, warn = FALSE ) OUT <- list( Sigma.W = FIT@implied$cov[[1]], Sigma.B = FIT@implied$cov[[2]], Mu.W = FIT@implied$mean[[1]], Mu.B = FIT@implied$mean[[2]], logl = FIT@loglik$loglik ) # if(lavoptions$fixed.x) { # OUT$logl <- OUT$logl - lavsamplestats@YLp[[g]][[2]]$loglik.x # } } else { # complete data OUT <- lav_mvnorm_cluster_em_sat( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], tol = 1e-04, # option? min.variance = 1e-05, # option? max.iter = 5000L ) # option? } if (lav_verbose()) { cat("\n") } # if any near-zero within variance(s), produce warning here zero.var <- which(diag(OUT$Sigma.W) <= 1e-05) if (length(zero.var)) { gtxt <- if (ngroups > 1L) { gettextf(" in group %s.", g) } else { " " } lav_msg_warn(gettextf( "H1 estimation resulted in a within covariance matrix %1$s with (near) zero variances for some of the level-1 variables: %2$s", gtxt, lav_msg_view(lavdata@ov.names.l[[g]][[1]][zero.var])) ) } # new in 0.6-18: ensure Mu.W[both.idx] is zero (post-estimation!) both.idx <- lavdata@Lp[[g]]$both.idx[[2]] tmp <- OUT$Mu.W[both.idx] + OUT$Mu.B[both.idx] OUT$Mu.W[both.idx] <- 0 OUT$Mu.B[both.idx]<- tmp[both.idx] # store in implied implied$cov[[(g - 1) * nlevels + 1L]] <- OUT$Sigma.W implied$cov[[(g - 1) * nlevels + 2L]] <- OUT$Sigma.B implied$mean[[(g - 1) * nlevels + 1L]] <- OUT$Mu.W implied$mean[[(g - 1) * nlevels + 2L]] <- OUT$Mu.B # store logl per group loglik.group[g] <- OUT$logl } logl <- list(loglik = sum(loglik.group), loglik.group = loglik.group) } list(implied = implied, logl = logl) } lavaan/R/ctr_estfun.R0000644000176200001440000002602614627656441014241 0ustar liggesusers# contributed by Ed Merkle (17 Jan 2013) # WLS version contributed by Franz Classe (March 2024) # (adapted for inclusion in lavaan by YR) # YR 12 Feb 2013: small changes to match the results of lav_model_gradient # in the multiple group case # YR 30 May 2014: handle 1-variable case (fixing apply in lines 56, 62, 108) # YR 05 Nov 2015: add remove.duplicated = TRUE, to cope with strucchange in # case of simple equality constraints # YR 19 Nov 2015: if constraints have been used, compute case-wise Lagrange # multipliers, and define the scores as: SC + (t(R) lambda) # YR 05 Feb 2016: catch conditional.x = TRUE: no support (for now), until # we can use the generic 0.6 infrastructure for scores, # including the missing-values case # YR 16 Feb 2016: adapt to changed @Mp slot elements; add remove.empty.cases= # argument # YR 12 Mar 2024: make lintr (more) happy; include WLS code from Franz Classe # move ML-specific code to lav_scores_ml() function estfun.lavaan <- lavScores <- function(object, scaling = FALSE, # nolint ignore.constraints = FALSE, remove.duplicated = TRUE, remove.empty.cases = TRUE) { stopifnot(inherits(object, "lavaan")) # what if estimator is not ML or WLS? # avoid hard error (using stop); throw a warning, and return an empty matrix if (!object@Options$estimator %in% c("ML", "WLS")) { lav_msg_warn(gettext("scores only availlabe if estimator is ML")) return(matrix(0, 0, 0)) } # check if conditional.x = TRUE if (object@Model@conditional.x) { lav_msg_stop(gettext("scores not available (yet) if conditional.x = TRUE")) } # shortcuts lavdata <- object@Data lavmodel <- object@Model lavsamplestats <- object@SampleStats lavoptions <- object@Options ## number variables/sample size # ntab <- unlist(lavsamplestats@nobs) ## change in 0.5-17: we keep the 'empty cases' ## and 'fill' in the scores at their 'case.idx' ## later, we remove the 'empty rows' # ntot <- max( object@Data@case.idx[[ object@Data@ngroups ]] ) ntab <- unlist(lavdata@norig) ntot <- sum(ntab) npar <- lav_object_inspect_npar(object) if (object@Options$estimator == "ML") { moments <- fitted(object) score_matrix <- lav_scores_ml( ntab = ntab, ntot = ntot, npar = npar, moments = moments, lavdata = lavdata, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavoptions = lavoptions, scaling = scaling ) } else if (object@Options$estimator == "WLS") { # check if ALL observed variables are ordered ov.names <- unlist(lavdata@ov.names) ov.idx <- which(lavdata@ov$name %in% ov.names) if (!all(lavdata@ov$type[ov.idx] == "ordered")) { lav_msg_stop(gettext( "WLS scores only available if all observed variables are ordered.")) } # compute WLS scores score_matrix <- lav_scores_wls( ntab = ntab, ntot = ntot, npar = npar, lavdata = lavdata, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavoptions = lavoptions ) } else { # should not happen lav_msg_fixme("this should not happen") } # handle empty rows if (remove.empty.cases) { # empty.idx <- which( apply(score_matrix, 1L, # function(x) sum(is.na(x))) == ncol(score_matrix) ) empty.idx <- unlist(lapply(lavdata@Mp, "[[", "empty.idx")) if (length(empty.idx) > 0L) { score_matrix <- score_matrix[-empty.idx, , drop = FALSE] } } # provide column names colnames(score_matrix) <- names(lav_object_inspect_coef(object, type = "free", add.labels = TRUE )) # handle general constraints, so that the sum of the columns equals zero if (!ignore.constraints && sum( lavmodel@ceq.linear.idx, lavmodel@ceq.nonlinear.idx, lavmodel@cin.linear.idx, lavmodel@cin.nonlinear.idx ) > 0) { r_matrix <- object@Model@con.jac[, ] pre <- lav_constraints_lambda_pre(object) # LAMBDA <- -1 * t(pre %*% t(score_matrix)) # RLAMBDA <- t(t(r_matrix) %*% t(LAMBDA)) score_matrix <- score_matrix - t(t(r_matrix) %*% pre %*% t(score_matrix)) } # handle simple equality constraints if (remove.duplicated && lavmodel@eq.constraints) { simple.flag <- lav_constraints_check_simple(lavmodel) if (simple.flag) { k_matrix <- lav_constraints_R2K(lavmodel) score_matrix <- score_matrix %*% k_matrix } else { lav_msg_warn(gettext( "remove.duplicated is TRUE, but equality constraints do not appear to be simple; returning full scores")) } } score_matrix } lav_scores_ml <- function(ntab = 0L, ntot = 0L, npar = 0L, moments = NULL, lavdata = NULL, lavsamplestats = NULL, lavmodel = NULL, lavoptions = NULL, scaling = FALSE) { score_matrix <- matrix(NA, ntot, npar) # Delta matrix Delta <- computeDelta(lavmodel = lavmodel) # rename moments moments.groups <- moments for (g in 1:lavsamplestats@ngroups) { if (lavsamplestats@ngroups > 1) { moments <- moments.groups[[g]] } sigma_hat <- moments$cov if (lavoptions$likelihood == "wishart") { nobs1 <- lavsamplestats@nobs[[g]] / (lavsamplestats@nobs[[g]] - 1) } else { nobs1 <- 1 } if (!lavsamplestats@missing.flag) { # complete data # if(lavmodel@meanstructure) { # mean structure nvar <- ncol(lavsamplestats@cov[[g]]) mu_hat <- moments$mean X <- lavdata@X[[g]] sigma_inv <- inv.chol(sigma_hat, logdet = FALSE) group.w <- (unlist(lavsamplestats@nobs) / lavsamplestats@ntotal) J <- matrix(1, 1L, ntab[g]) ## FIXME: needed? better maybe rowSums/colSums? J2 <- matrix(1, nvar, nvar) diag(J2) <- 0.5 if (lavmodel@meanstructure) { ## scores_h1 (H1 = saturated model) mean.diff <- t(t(X) - mu_hat %*% J) dx_mu <- -1 * mean.diff %*% sigma_inv dx_sigma <- t(matrix(apply( mean.diff, 1L, function(x) { lav_matrix_vech(-J2 * (sigma_inv %*% (tcrossprod(x) * nobs1 - sigma_hat) %*% sigma_inv)) } ), ncol = nrow(mean.diff))) scores_h1 <- cbind(dx_mu, dx_sigma) } else { mean.diff <- t(t(X) - lavsamplestats@mean[[g]] %*% J) dx_sigma <- t(matrix(apply( mean.diff, 1L, function(x) { lav_matrix_vech(-J2 * (sigma_inv %*% (tcrossprod(x) * nobs1 - sigma_hat) %*% sigma_inv)) } ), ncol = nrow(mean.diff))) scores_h1 <- dx_sigma } ## FIXME? Seems like we would need group.w even in the ## complete-data case: ## if(scaling){ ## scores_h1 <- group.w[g] * scores_h1 ## } # } else { # ## no mean structure # stop("Score calculation with no mean structure is not implemented.") # } } else { # incomplete data nsub <- ntab[g] M <- lavsamplestats@missing[[g]] Mp <- lavdata@Mp[[g]] # pat.idx <- match(MP1$id, MP1$order) group.w <- (unlist(lavsamplestats@nobs) / lavsamplestats@ntotal) mu_hat <- moments$mean nvar <- ncol(lavsamplestats@cov[[g]]) score.sigma <- matrix(0, nsub, nvar * (nvar + 1) / 2) score.mu <- matrix(0, nsub, nvar) for (p in seq_along(length(M))) { ## Data # X <- M[[p]][["X"]] case.idx <- Mp$case.idx[[p]] var.idx <- M[[p]][["var.idx"]] X <- lavdata@X[[g]][case.idx, var.idx, drop = FALSE] nobs <- M[[p]][["freq"]] ## Which unique entries of covariance matrix are estimated? ## (Used to keep track of scores in score.sigma) var.idx.mat <- tcrossprod(var.idx) sigma.idx <- which(var.idx.mat[lower.tri(var.idx.mat, diag = TRUE)] == 1) J <- matrix(1, 1L, nobs) # [var.idx] J2 <- matrix(1, nvar, nvar)[var.idx, var.idx, drop = FALSE] diag(J2) <- 0.5 sigma_inv <- inv.chol(sigma_hat[var.idx, var.idx, drop = FALSE], logdet = FALSE ) Mu <- mu_hat[var.idx] mean.diff <- t(t(X) - Mu %*% J) ## Scores for missing pattern p within group g score.mu[case.idx, var.idx] <- -1 * mean.diff %*% sigma_inv score.sigma[case.idx, sigma.idx] <- t(matrix(apply( mean.diff, 1L, function(x) { lav_matrix_vech(-J2 * (sigma_inv %*% (tcrossprod(x) - sigma_hat[var.idx, var.idx, drop = FALSE]) %*% sigma_inv)) } ), ncol = nrow(mean.diff))) } scores_h1 <- cbind(score.mu, score.sigma) if (scaling) { scores_h1 <- group.w[g] * scores_h1 } } # missing # if(lavmodel@eq.constraints) { # Delta <- Delta %*% lavmodel@eq.constraints.K # #x <- as.numeric(lavmodel@eq.constraints.K %*% x) + # # lavmodel@eq.constraints.k0 # } wi <- lavdata@case.idx[[g]] score_matrix[wi, ] <- -scores_h1 %*% Delta[[g]] if (scaling) { score_matrix[wi, ] <- (-1 / ntot) * score_matrix[wi, ] } } # g score_matrix } # this function is based on code originally written by Franz Classe (Munich) lav_scores_wls <- function(ntab = 0L, ntot = 0L, npar = 0L, lavdata = NULL, lavsamplestats = NULL, lavmodel = NULL, lavoptions = NULL) { # internal function doDummySingleVar <- function(X, lv, ntot, num) { Xd <- matrix(NA, nrow = ntot, ncol = lv[num] - 1) x <- X[, num] minx <- min(x) categ <- minx - 1 v <- 1 while (categ < lv[num] - 1) { categ <- categ + 1 Xd[, v] <- ifelse(x > categ, 1, 0) v <- v + 1 } Xd } # containere for scores score_matrix <- matrix(NA, ntot, npar) # Delta matrix Delta <- computeDelta(lavmodel = lavmodel) # shortcuts lv <- lavdata@ov[["nlev"]] for (g in 1:lavsamplestats@ngroups) { nvar <- ncol(lavsamplestats@cov[[g]]) X <- lavdata@X[[g]] # convert categorical data to dummy variables # FIXME: skip continuous variables Xd <- do.call( cbind, lapply( 1:nvar, function(i) doDummySingleVar(X, lv, ntot, i) ) ) # e1 musd <- colMeans(Xd) e1 <- t(t(Xd) - musd) # e2 mus <- colMeans(X) y_minus_mu <- t(apply(X, 1L, function(x) x - mus)) s_vech <- t(apply(y_minus_mu, 1L, function(i) { lavaan::lav_matrix_vech(tcrossprod(i), diagonal = FALSE) })) # s=c( (y1-mu1)(y2-mu2).... sigma <- colMeans(s_vech) e2 <- t(apply(s_vech, 1L, function(x) x - sigma)) # e e <- cbind(e1, e2) # weight matrix W <- lavsamplestats@WLS.V[[g]] # combine matrices wi <- lavdata@case.idx[[g]] score_matrix[wi, ] <- t(t(Delta[[g]]) %*% W %*% t(e)) } # g score_matrix } lavaan/R/lav_sam_step0.R0000644000176200001440000000430614627656441014617 0ustar liggesusers# STEP 0: process full model, without fitting lav_sam_step0 <- function(cmd = "sem", model = NULL, data = NULL, se = "twostep", sam.method = "local", dotdotdot = NULL) { # create dotdotdot0 for dummy fit dotdotdot0 <- dotdotdot # parse model, so we can inspect a few features flat.model <- lavParseModelString(model) # remove do.fit option if present dotdotdot0$do.fit <- NULL # if (sam.method %in% c("local", "fsr")) { dotdotdot0$sample.icov <- FALSE # if N < nvar } dotdotdot0$se <- "none" dotdotdot0$test <- "none" dotdotdot0$verbose <- FALSE # no output for this 'dummy' FIT dotdotdot0$conditional.x <- FALSE dotdotdot0$fixed.x <- TRUE dotdotdot0$ceq.simple <- TRUE # if not the default yet dotdotdot0$check.lv.interaction <- FALSE # we allow for it # dotdotdot0$cat.wls.w <- FALSE # no weight matrix if categorical # note: this break the computation of twostep standard errors... # any lv interaction terms? if (length(lavNames(flat.model, "lv.interaction")) > 0L) { dotdotdot0$meanstructure <- TRUE dotdotdot0$marker.int.zero <- TRUE } # initial processing of the model, no fitting FIT <- do.call(cmd, args = c(list( model = flat.model, data = data, do.fit = FALSE ), dotdotdot0) ) # restore options # do.fit FIT@Options$do.fit <- TRUE # FIT@Options$cat.wls.w <- TRUE # sample.icov if (sam.method %in% c("local", "fsr")) { FIT@Options$sample.icov <- TRUE } # se FIT@Options$se <- se # test if (!is.null(dotdotdot$test)) { FIT@Options$test <- dotdotdot$test } else { FIT@Options$test <- "standard" } # adjust parameter table: PT <- FIT@ParTable # check parameter table PT$est <- PT$se <- NULL # est equals ustart by default (except exo values) PT$est <- PT$ustart if (any(PT$exo > 0L)) { PT$est[PT$exo > 0L] <- PT$start[PT$exo > 0L] } # clear se values (needed here?) only for global approach to compute SE PT$se <- rep(as.numeric(NA), length(PT$lhs)) PT$se[PT$free == 0L & !is.na(PT$ustart)] <- 0.0 FIT@ParTable <- PT FIT } lavaan/R/lav_objective.R0000644000176200001440000006761414627656441014711 0ustar liggesusers# fitting function for standard ML estimator.ML <- function(Sigma.hat = NULL, Mu.hat = NULL, data.cov = NULL, data.mean = NULL, data.cov.log.det = NULL, meanstructure = FALSE) { # FIXME: WHAT IS THE BEST THING TO DO HERE?? # CURRENTLY: return Inf (at least for nlminb, this works well) if (!attr(Sigma.hat, "po")) { return(Inf) } Sigma.hat.inv <- attr(Sigma.hat, "inv") Sigma.hat.log.det <- attr(Sigma.hat, "log.det") nvar <- ncol(Sigma.hat) if (!meanstructure) { fx <- (Sigma.hat.log.det + sum(data.cov * Sigma.hat.inv) - data.cov.log.det - nvar) } else { W.tilde <- data.cov + tcrossprod(data.mean - Mu.hat) fx <- (Sigma.hat.log.det + sum(W.tilde * Sigma.hat.inv) - data.cov.log.det - nvar) } # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # fitting function for standard ML estimator.ML_res <- function(Sigma.hat = NULL, Mu.hat = NULL, PI = NULL, res.cov = NULL, res.int = NULL, res.slopes = NULL, res.cov.log.det = NULL, cov.x = NULL, mean.x = NULL) { if (!attr(Sigma.hat, "po")) { return(Inf) } # augmented mean.x + cov.x matrix C3 <- rbind( c(1, mean.x), cbind(mean.x, cov.x + tcrossprod(mean.x)) ) Sigma.hat.inv <- attr(Sigma.hat, "inv") Sigma.hat.log.det <- attr(Sigma.hat, "log.det") nvar <- ncol(Sigma.hat) # sigma objective.sigma <- (Sigma.hat.log.det + sum(res.cov * Sigma.hat.inv) - res.cov.log.det - nvar) # beta OBS <- t(cbind(res.int, res.slopes)) EST <- t(cbind(Mu.hat, PI)) Diff <- OBS - EST objective.beta <- sum(Sigma.hat.inv * crossprod(Diff, C3) %*% Diff) fx <- objective.sigma + objective.beta # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # fitting function for restricted ML estimator.REML <- function(Sigma.hat = NULL, Mu.hat = NULL, data.cov = NULL, data.mean = NULL, data.cov.log.det = NULL, meanstructure = FALSE, group = 1L, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL) { if (!attr(Sigma.hat, "po")) { return(Inf) } Sigma.hat.inv <- attr(Sigma.hat, "inv") Sigma.hat.log.det <- attr(Sigma.hat, "log.det") nvar <- ncol(Sigma.hat) if (!meanstructure) { fx <- (Sigma.hat.log.det + sum(data.cov * Sigma.hat.inv) - data.cov.log.det - nvar) } else { W.tilde <- data.cov + tcrossprod(data.mean - Mu.hat) fx <- (Sigma.hat.log.det + sum(W.tilde * Sigma.hat.inv) - data.cov.log.det - nvar) } lambda.idx <- which(names(lavmodel@GLIST) == "lambda") LAMBDA <- lavmodel@GLIST[[lambda.idx[group]]] data.cov.inv <- lavsamplestats@icov[[group]] reml.h0 <- log(det(t(LAMBDA) %*% Sigma.hat.inv %*% LAMBDA)) reml.h1 <- log(det(t(LAMBDA) %*% data.cov.inv %*% LAMBDA)) nobs <- lavsamplestats@nobs[[group]] # fx <- (Sigma.hat.log.det + tmp - data.cov.log.det - nvar) + 1/Ng * (reml.h0 - reml.h1) fx <- fx + (1 / nobs * (reml.h0 - reml.h1)) # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # 'classic' fitting function for GLS # used again since 0.6-10 (we used the much slower estimator.WLS before) estimator.GLS <- function(Sigma.hat = NULL, Mu.hat = NULL, data.cov = NULL, data.cov.inv = NULL, data.mean = NULL, meanstructure = FALSE, correlation = FALSE) { tmp <- data.cov.inv %*% (data.cov - Sigma.hat) # tmp is not perfectly symmetric, so we use t(tmp) on the next line # to obtain the same value as estimator.WLS fx <- 0.5 * sum(tmp * t(tmp)) if (correlation) { # Bentler & Savalei (2010) eq 1.31 DD <- as.matrix(diag(tmp)) TT <- diag(nrow(data.cov)) + data.cov * data.cov.inv fx <- fx - drop(t(DD) %*% solve(TT) %*% DD) } if (meanstructure) { tmp2 <- sum(data.cov.inv * tcrossprod(data.mean - Mu.hat)) fx <- fx + tmp2 } # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # general WLS estimator (Muthen, Appendix 4, eq 99 single group) # full weight (WLS.V) matrix estimator.WLS <- function(WLS.est = NULL, WLS.obs = NULL, WLS.V = NULL) { # diff <- as.matrix(WLS.obs - WLS.est) # fx <- as.numeric( t(diff) %*% WLS.V %*% diff ) # since 0.5-17, we use crossprod twice diff <- WLS.obs - WLS.est fx <- as.numeric(crossprod(crossprod(WLS.V, diff), diff)) # todo alternative: using chol(WLS.V) # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # diagonally weighted LS (DWLS) estimator.DWLS <- function(WLS.est = NULL, WLS.obs = NULL, WLS.VD = NULL) { diff <- WLS.obs - WLS.est fx <- sum(diff * diff * WLS.VD) # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 fx } # Full Information ML estimator (FIML) handling the missing values estimator.FIML <- function(Sigma.hat = NULL, Mu.hat = NULL, Yp = NULL, h1 = NULL, N = NULL) { if (is.null(N)) { N <- sum(sapply(Yp, "[[", "freq")) } # Note: we ignore x.idx (if any) fx <- lav_mvnorm_missing_loglik_samplestats( Yp = Yp, Mu = Mu.hat, Sigma = Sigma.hat, log2pi = FALSE, minus.two = TRUE ) / N # ajust for h1 if (!is.null(h1)) { fx <- fx - h1 # no negative values if (is.finite(fx) && fx < 0.0) fx <- 0.0 } fx } # pairwise maximum likelihood # this is adapted from code written by Myrsini Katsikatsou # # some changes: # - no distinction between x/y (ksi/eta) # - 29/03/2016: adapt for exogenous covariates # - 21/09/2016: added code for missing = doubly.robust (contributed by # Myrsini Katsikatsou) # - HJ 18/10/2023: For sampling weights the lavcache$bifreq are weighted estimator.PML <- function(Sigma.hat = NULL, # model-based var/cov/cor Mu.hat = NULL, # model-based means TH = NULL, # model-based thresholds + means PI = NULL, # slopes th.idx = NULL, # threshold idx per variable num.idx = NULL, # which variables are numeric X = NULL, # raw data eXo = NULL, # eXo data wt = NULL, # case weights lavcache = NULL, # housekeeping stuff missing = NULL) { # how to deal with missings? # YR 3 okt 2012 # - the idea is to compute for each pair of variables, the model-based # probability (or likelihood in mixed case) (that we observe the data # for this pair under the model) # - if we have exogenous variables + conditional.x, do this for each case # - after taking logs, the sum over the cases gives the # log probablity/likelihood for this pair # - the sum over all pairs gives the final PL based logl # first of all: check if all correlations are within [-1,1] # if not, return Inf; (at least with nlminb, this works well) # diagonal of Sigma.hat is not necessarily 1, even for categorical vars Sigma.hat2 <- Sigma.hat if (length(num.idx) > 0L) { diag(Sigma.hat2)[-num.idx] <- 1 } else { diag(Sigma.hat2) <- 1 } # all positive variances? (for continuous variables) if (any(diag(Sigma.hat2) < 0)) { OUT <- +Inf attr(OUT, "logl") <- as.numeric(NA) return(OUT) } Cor.hat <- cov2cor(Sigma.hat2) # to get correlations (rho!) cors <- lav_matrix_vech(Cor.hat, diagonal = FALSE) if (length(cors) > 0L && (any(abs(cors) > 1) || any(is.na(cors)))) { # question: what is the best approach here?? OUT <- +Inf attr(OUT, "logl") <- as.numeric(NA) return(OUT) } nvar <- nrow(Sigma.hat) if (is.null(eXo)) { nexo <- 0L } else { nexo <- NCOL(eXo) } pstar <- nvar * (nvar - 1) / 2 ov.types <- rep("ordered", nvar) if (length(num.idx) > 0L) { ov.types[num.idx] <- "numeric" } ##### Three cases: ##### 1) all ordered, no exogenous (fast!) ##### 2) mixed ordered + continuous, no exogenous ##### 3) mixed ordered + continuous, exogenous (conditional.x = TRUE) ##### Case 1: ##### all ordered ##### no exogenous covariates ##### if (all(ov.types == "ordered") && nexo == 0L) { # prepare for Myrsini's vectorization scheme long2 <- LongVecTH.Rho( no.x = nvar, all.thres = TH, index.var.of.thres = th.idx, rho.xixj = cors ) # get expected probability per table, per pair pairwisePI <- pairwiseExpProbVec( ind.vec = lavcache$long, th.rho.vec = long2 ) pairwisePI_orig <- pairwisePI # for doubly.robust # get frequency per table, per pair logl <- sum(lavcache$bifreq * log(pairwisePI)) # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # FYI the bifreq are already weighted so this will work. Alternatively: if (!is.null(wt)) { logl <- sum(lavcache$sum_obs_weights_xixj_ab_vec * log(pairwisePI)) } # more convenient fit function prop <- lavcache$bifreq / lavcache$nobs freq <- lavcache$bifreq if (!is.null(wt)) { prop <- lavcache$sum_obs_weights_xixj_ab_vec / sum(wt) freq <- lavcache$sum_obs_weights_xixj_ab_vec } # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # remove zero props # FIXME!!! or add 0.5??? # zero.idx <- which(prop == 0.0) zero.idx <- which((prop == 0.0) | !is.finite(prop)) if (length(zero.idx) > 0L) { freq <- freq[-zero.idx] prop <- prop[-zero.idx] pairwisePI <- pairwisePI[-zero.idx] } ## Fmin <- sum( prop*log(prop/pairwisePI) ) Fmin <- sum(freq * log(prop / pairwisePI)) # to avoid 'N' if (missing == "available.cases" || missing == "doubly.robust") { uniPI <- univariateExpProbVec(TH = TH, th.idx = th.idx) # shortcuts unifreq <- lavcache$unifreq uninobs <- lavcache$uninobs uniweights <- lavcache$uniweights logl <- logl + sum(uniweights * log(uniPI)) uniprop <- unifreq / uninobs # remove zero props # uni.zero.idx <- which(uniprop == 0.0) uni.zero.idx <- which((uniprop == 0.0) | !is.finite(uniprop)) if (length(uni.zero.idx) > 0L) { uniprop <- uniprop[-uni.zero.idx] uniPI <- uniPI[-uni.zero.idx] uniweights <- uniweights[-uni.zero.idx] } Fmin <- Fmin + sum(uniweights * log(uniprop / uniPI)) } if (missing == "doubly.robust") { # COMPUTE THE SUM OF THE EXPECTED BIVARIATE CONDITIONAL LIKELIHOODS # SUM_{i,j} [ E_{Yi,Yj|y^o}(lnf(Yi,Yj))) ] # First compute the terms of the summand. Since the cells of # pairwiseProbGivObs are zero for the pairs of variables that at least # one of the variables is observed (hence not contributing to the summand) # there is no need to construct an index vector for summing appropriately # within each individual. log_pairwisePI_orig <- log(pairwisePI_orig) pairwiseProbGivObs <- lavcache$pairwiseProbGivObs tmp_prod <- t(t(pairwiseProbGivObs) * log_pairwisePI_orig) SumElnfijCasewise <- apply(tmp_prod, 1, sum) SumElnfij <- sum(SumElnfijCasewise) logl <- logl + SumElnfij Fmin <- Fmin - SumElnfij # COMPUTE THE THE SUM OF THE EXPECTED UNIVARIATE CONDITIONAL LIKELIHOODS # SUM_{i,j} [ E_{Yj|y^o}(lnf(Yj|yi))) ] # First compute the model-implied conditional univariate probabilities # p(y_i=a|y_j=b). Let ModProbY1Gy2 be the vector of these # probabilities. The order the probabilities # are listed in the vector ModProbY1Gy2 is as follows: # y1|y2, y1|y3, ..., y1|yp, y2|y1, y2|y3, ..., y2|yp, # ..., yp|y1, yp|y2, ..., yp|y(p-1). Within each pair of variables the # index "a" which represents the response category of variable yi runs faster than # "b" which represents the response category of the given variable yj. # The computation of these probabilities are based on the model-implied # bivariate probabilities p(y_i=a,y_j=b). To do the appropriate summations # and divisions we need some index vectors to keep track of the index i, j, # a, and b, as well as the pair index. These index vectors should be # computed once and stored in lavcache. About where in the lavaan code # we will add the computations and how they will be done please see the # file "new objects in lavcache for DR-PL.r" idx.pairs <- lavcache$idx.pairs idx.cat.y2.split <- lavcache$idx.cat.y2.split idx.cat.y1.split <- lavcache$idx.cat.y1.split idx.Y1 <- lavcache$idx.Y1 idx.Gy2 <- lavcache$idx.Gy2 idx.cat.Y1 <- lavcache$idx.cat.Y1 idx.cat.Gy2 <- lavcache$idx.cat.Gy2 id.uniPrGivObs <- lavcache$id.uniPrGivObs # the latter keeps track which variable each column of the matrix # univariateProbGivObs refers to # For the function compute_uniCondProb_based_on_bivProb see the .r file # with the same name. ModProbY1Gy2 <- compute_uniCondProb_based_on_bivProb( bivProb = pairwisePI_orig, nvar = nvar, idx.pairs = idx.pairs, idx.Y1 = idx.Y1, idx.Gy2 = idx.Gy2, idx.cat.y1.split = idx.cat.y1.split, idx.cat.y2.split = idx.cat.y2.split ) log_ModProbY1Gy2 <- log(ModProbY1Gy2) # Let univariateProbGivObs be the matrix of the conditional univariate # probabilities Pr(y_i=a|y^o) that has been computed in advance and are # fed to the DR-PL function. The rows represent different individuals, # i.e. nrow=nobs, and the columns different probabilities. The columns # are listed as follows: a runs faster than i. # Note that the number of columns of univariateProbGivObs is not the # same with the length(log_ModProbY1Gy2), actually # ncol(univariateProbGivObs) < length(log_ModProbY1Gy2). # For this we use the following commands in order to multiply correctly. # Compute for each case the product Pr(y_i=a|y^o) * log[ p(y_i=a|y_j=b) ] # i.e. univariateProbGivObs * log_ModProbY1Gy2 univariateProbGivObs <- lavcache$univariateProbGivObs nobs <- nrow(X) uniweights.casewise <- lavcache$uniweights.casewise id.cases.with.missing <- which(uniweights.casewise > 0) no.cases.with.missing <- length(id.cases.with.missing) no.obs.casewise <- nvar - uniweights.casewise idx.missing.var <- apply(X, 1, function(x) { which(is.na(x)) }) idx.observed.var <- lapply(idx.missing.var, function(x) { c(1:nvar)[-x] }) idx.cat.observed.var <- sapply(1:nobs, function(i) { X[i, idx.observed.var[[i]]] }) ElnyiGivyjbCasewise <- sapply(1:no.cases.with.missing, function(i) { tmp.id.case <- id.cases.with.missing[i] tmp.no.mis <- uniweights.casewise[tmp.id.case] tmp.idx.mis <- idx.missing.var[[tmp.id.case]] tmp.idx.obs <- idx.observed.var[[tmp.id.case]] tmp.no.obs <- no.obs.casewise[tmp.id.case] tmp.idx.cat.obs <- idx.cat.observed.var[[tmp.id.case]] tmp.uniProbGivObs.i <- univariateProbGivObs[tmp.id.case, ] sapply(1:tmp.no.mis, function(k) { tmp.idx.mis.var <- tmp.idx.mis[k] tmp.uniProbGivObs.ik <- tmp.uniProbGivObs.i[id.uniPrGivObs == tmp.idx.mis.var] tmp.log_ModProbY1Gy2 <- sapply(1:tmp.no.obs, function(z) { log_ModProbY1Gy2[idx.Y1 == tmp.idx.mis.var & idx.Gy2 == tmp.idx.obs[z] & idx.cat.Gy2 == tmp.idx.cat.obs[z]] }) sum(tmp.log_ModProbY1Gy2 * tmp.uniProbGivObs.ik) }) }) ElnyiGivyjb <- sum(unlist(ElnyiGivyjbCasewise)) logl <- logl + ElnyiGivyjb # for the Fmin function Fmin <- Fmin - ElnyiGivyjb } # end of if (missing =="doubly.robust") ##### Case 2: ##### mixed ordered + numeric ##### no exogenous covariates ##### } else if (nexo == 0L) { # mixed ordered/numeric variables, but no exogenous covariates # - no need to compute 'casewise' (log)likelihoods PSTAR <- matrix(0, nvar, nvar) # utility matrix, to get indices PSTAR[lav_matrix_vech_idx(nvar, diagonal = FALSE)] <- 1:pstar N <- NROW(X) logLikPair <- numeric(pstar) # logl per pair (summed over cases) for (j in seq_len(nvar - 1L)) { for (i in (j + 1L):nvar) { pstar.idx <- PSTAR[i, j] if (ov.types[i] == "numeric" && ov.types[j] == "numeric") { logLIK <- lav_mvnorm_loglik_data( Y = X[, c(i, j)], wt = wt, Mu = Mu.hat[c(i, j)], Sigma = Sigma.hat[c(i, j), c(i, j)], casewise = TRUE ) logLikPair[pstar.idx] <- sum(logLIK, na.rm = TRUE) } else if (ov.types[i] == "numeric" && ov.types[j] == "ordered") { # polyserial correlation logLIK <- lav_bvmix_lik( Y1 = X[, i], Y2 = X[, j], wt = wt, evar.y1 = Sigma.hat[i, i], beta.y1 = Mu.hat[i], th.y2 = TH[th.idx == j], rho = Cor.hat[i, j], .log = TRUE ) logLikPair[pstar.idx] <- sum(logLIK, na.rm = TRUE) } else if (ov.types[j] == "numeric" && ov.types[i] == "ordered") { # polyserial correlation logLIK <- lav_bvmix_lik( Y1 = X[, j], Y2 = X[, i], wt = wt, evar.y1 = Sigma.hat[j, j], beta.y1 = Mu.hat[j], th.y2 = TH[th.idx == i], rho = Cor.hat[i, j], .log = TRUE ) logLikPair[pstar.idx] <- sum(logLIK, na.rm = TRUE) } else if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { # polychoric correlation pairwisePI <- lav_bvord_noexo_pi( rho = Cor.hat[i, j], th.y1 = TH[th.idx == i], th.y2 = TH[th.idx == j] ) # avoid zeroes pairwisePI[pairwisePI < .Machine$double.eps] <- .Machine$double.eps # note: missing values are just not counted FREQ <- lav_bvord_freq(X[, i], X[, j], wt = wt) logLikPair[pstar.idx] <- sum(FREQ * log(pairwisePI)) } } } # all pairs na.idx <- which(is.na(logLikPair)) if (length(na.idx) > 0L) { lav_msg_warn(gettext("some pairs produces NA values for logl:"), lav_msg_view(round(logLikPair, 3), "none") ) } # sum over pairs logl <- sum(logLikPair) # Fmin Fmin <- (-1) * logl ##### Case 3: ##### mixed ordered + numeric ##### exogenous covariates ##### (conditional.x = TRUE) } else { LIK <- matrix(0, nrow(X), pstar) # likelihood per case, per pair PSTAR <- matrix(0, nvar, nvar) # utility matrix, to get indices PSTAR[lav_matrix_vech_idx(nvar, diagonal = FALSE)] <- 1:pstar N <- NROW(X) for (j in seq_len(nvar - 1L)) { for (i in (j + 1L):nvar) { pstar.idx <- PSTAR[i, j] # cat("pstar.idx =", pstar.idx, "i = ", i, " j = ", j, "\n") if (ov.types[i] == "numeric" && ov.types[j] == "numeric") { # ordinary pearson correlation LIK[, pstar.idx] <- lav_bvreg_lik( Y1 = X[, i], Y2 = X[, j], eXo = eXo, wt = wt, evar.y1 = Sigma.hat[i, i], beta.y1 = c(Mu.hat[i], PI[i, ]), evar.y2 = Sigma.hat[j, j], beta.y2 = c(Mu.hat[j], PI[j, ]), rho = Cor.hat[i, j] ) } else if (ov.types[i] == "numeric" && ov.types[j] == "ordered") { # polyserial correlation ### FIXME: th.y2 should go into ps_lik!!! LIK[, pstar.idx] <- lav_bvmix_lik( Y1 = X[, i], Y2 = X[, j], eXo = eXo, wt = wt, evar.y1 = Sigma.hat[i, i], beta.y1 = c(Mu.hat[i], PI[i, ]), th.y2 = TH[th.idx == j], sl.y2 = PI[j, ], rho = Cor.hat[i, j] ) } else if (ov.types[j] == "numeric" && ov.types[i] == "ordered") { # polyserial correlation ### FIXME: th.y1 should go into ps_lik!!! LIK[, pstar.idx] <- lav_bvmix_lik( Y1 = X[, j], Y2 = X[, i], eXo = eXo, wt = wt, evar.y1 = Sigma.hat[j, j], beta.y1 = c(Mu.hat[j], PI[j, ]), th.y2 = TH[th.idx == i], sl.y2 = PI[i, ], rho = Cor.hat[i, j] ) } else if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { LIK[, pstar.idx] <- pc_lik_PL_with_cov( Y1 = X[, i], Y2 = X[, j], Rho = Sigma.hat[i, j], th.y1 = TH[th.idx == i], th.y2 = TH[th.idx == j], eXo = eXo, PI.y1 = PI[i, ], PI.y2 = PI[j, ], missing.ind = missing ) } } } # all pairs # check for zero likelihoods/probabilities # FIXME: or should we replace them with a tiny number? if (any(LIK == 0.0, na.rm = TRUE)) { OUT <- +Inf attr(OUT, "logl") <- as.numeric(NA) return(OUT) } # loglikelihood LogLIK.cases <- log(LIK) # sum over cases LogLIK.pairs <- colSums(LogLIK.cases, na.rm = TRUE) # sum over pairs logl <- logl_pairs <- sum(LogLIK.pairs) if (missing == "available.cases" && all(ov.types == "ordered") && nexo != 0L) { uni_LIK <- matrix(0, nrow(X), ncol(X)) for (i in seq_len(nvar)) { uni_LIK[, i] <- uni_lik( Y1 = X[, i], th.y1 = TH[th.idx == i], eXo = eXo, PI.y1 = PI[i, ] ) } if (any(uni_LIK == 0.0, na.rm = TRUE)) { OUT <- +Inf attr(OUT, "logl") <- as.numeric(NA) return(OUT) } uni_logLIK_cases <- log(uni_LIK) * lavcache$uniweights.casewise # sum over cases uni_logLIK_varwise <- colSums(uni_logLIK_cases) # sum over variables uni_logLIK <- sum(uni_logLIK_varwise) # add with the pairwise part of LogLik logl <- logl_pairs + uni_logLIK } # we minimise Fmin <- (-1) * logl } # here, we should have two quantities: logl and Fmin # function value as returned to the minimizer fx <- Fmin # attach 'loglikelihood' attr(fx, "logl") <- logl fx } # full information maximum likelihood # underlying multivariate normal approach (see Joreskog & Moustaki, 2001) # estimator.FML <- function(Sigma.hat = NULL, # model-based var/cov/cor TH = NULL, # model-based thresholds + means th.idx = NULL, # threshold idx per variable num.idx = NULL, # which variables are numeric X = NULL, # raw data lavcache = NULL) { # patterns # YR 27 aug 2013 # just for fun, and to compare with PML for small models # first of all: check if all correlations are within [-1,1] # if not, return Inf; (at least with nlminb, this works well) cors <- Sigma.hat[lower.tri(Sigma.hat)] if (any(abs(cors) > 1)) { return(+Inf) } nvar <- nrow(Sigma.hat) pstar <- nvar * (nvar - 1) / 2 ov.types <- rep("ordered", nvar) if (length(num.idx) > 0L) ov.types[num.idx] <- "numeric" MEAN <- rep(0, nvar) # shortcut for all ordered - per pattern if (all(ov.types == "ordered")) { PAT <- lavcache$pat npatterns <- nrow(PAT) freq <- as.numeric(rownames(PAT)) PI <- numeric(npatterns) TH.VAR <- lapply(1:nvar, function(x) c(-Inf, TH[th.idx == x], +Inf)) # FIXME!!! ok to set diagonal to 1.0? diag(Sigma.hat) <- 1.0 for (r in 1:npatterns) { # compute probability for each pattern lower <- sapply(1:nvar, function(x) TH.VAR[[x]][PAT[r, x]]) upper <- sapply(1:nvar, function(x) TH.VAR[[x]][PAT[r, x] + 1L]) # how accurate must we be here??? PI[r] <- sadmvn(lower, upper, mean = MEAN, varcov = Sigma.hat, maxpts = 10000 * nvar, abseps = 1e-07 ) } # sum (log)likelihood over all patterns # LogLik <- sum(log(PI) * freq) # more convenient fit function prop <- freq / sum(freq) # remove zero props # FIXME!!! or add 0.5??? zero.idx <- which(prop == 0.0) if (length(zero.idx) > 0L) { prop <- prop[-zero.idx] PI <- PI[-zero.idx] } Fmin <- sum(prop * log(prop / PI)) } else { # case-wise PI <- numeric(nobs) for (i in 1:nobs) { # compute probability for each case PI[i] <- lav_msg_stop(gettext("not implemented")) } # sum (log)likelihood over all observations LogLik <- sum(log(PI)) lav_msg_stop(gettext("not implemented")) } # function value as returned to the minimizer # fx <- -1 * LogLik fx <- Fmin fx } estimator.MML <- function(lavmodel = NULL, THETA = NULL, TH = NULL, GLIST = NULL, group = 1L, lavdata = NULL, sample.mean = NULL, sample.mean.x = NULL, lavcache = NULL) { # compute case-wise likelihoods lik <- lav_model_lik_mml( lavmodel = lavmodel, THETA = THETA, TH = TH, GLIST = GLIST, group = group, lavdata = lavdata, sample.mean = sample.mean, sample.mean.x = sample.mean.x, lavcache = lavcache ) # log + sum over observations logl <- sum(log(lik)) # function value as returned to the minimizer fx <- -logl fx } estimator.2L <- function(lavmodel = NULL, GLIST = NULL, Y1 = NULL, # only for missing Lp = NULL, Mp = NULL, lavsamplestats = NULL, group = 1L) { # compute model-implied statistics for all blocks implied <- lav_model_implied(lavmodel, GLIST = GLIST) # here, we assume only 2!!! levels, at [[1]] and [[2]] if (lavmodel@conditional.x) { Res.Sigma.W <- implied$res.cov[[(group - 1) * 2 + 1]] Res.Int.W <- implied$res.int[[(group - 1) * 2 + 1]] Res.Pi.W <- implied$res.slopes[[(group - 1) * 2 + 1]] Res.Sigma.B <- implied$res.cov[[(group - 1) * 2 + 2]] Res.Int.B <- implied$res.int[[(group - 1) * 2 + 2]] Res.Pi.B <- implied$res.slopes[[(group - 1) * 2 + 2]] } else { Sigma.W <- implied$cov[[(group - 1) * 2 + 1]] Mu.W <- implied$mean[[(group - 1) * 2 + 1]] Sigma.B <- implied$cov[[(group - 1) * 2 + 2]] Mu.B <- implied$mean[[(group - 1) * 2 + 2]] } if (lavsamplestats@missing.flag) { if (lavmodel@conditional.x) { lav_msg_stop(gettext("multilevel + conditional.x is not ready yet for fiml; rerun with conditional.x = FALSE")) } # SIGMA.B <- Sigma.B[Lp$both.idx[[2]], Lp$both.idx[[2]], drop = FALSE] # if(any(diag(SIGMA.B) < 0)) { # return(+Inf) # } # COR.B <- cov2cor(SIGMA.B) # if(any(abs(lav_matrix_vech(COR.B, diagonal = FALSE)) > 1)) { # return(+Inf) # } Y2 <- lavsamplestats@YLp[[group]][[2]]$Y2 Yp <- lavsamplestats@missing[[group]] loglik <- lav_mvnorm_cluster_missing_loglik_samplestats_2l( Y1 = Y1, Y2 = Y2, Lp = Lp, Mp = Mp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, log2pi = FALSE, minus.two = TRUE ) } else { YLp <- lavsamplestats@YLp[[group]] if (lavmodel@conditional.x) { loglik <- lav_mvreg_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B, log2pi = FALSE, minus.two = TRUE ) } else { loglik <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = YLp, Lp = Lp, Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, log2pi = FALSE, minus.two = TRUE ) } } # minimize objective <- 1 * loglik # divide by (N*2) objective <- objective / (lavsamplestats@ntotal * 2) # should be strictly positive # if(objective < 0) { # objective <- +Inf # } objective } lavaan/R/lav_options_estimator.R0000644000176200001440000004076614627656441016520 0ustar liggesusers# lav_options specific per estimator in separate functions LDW 06/04/2024 lav_options_est_ml <- function(opt) { # ML and friends: MLF, MLM, MLMV, MLMVS, MLR #### # se if (opt$se == "bootstrap" && opt$estimator %in% c("mlf", "mlm", "mlmv", "mlmvs", "mlr")) { lav_msg_stop(gettext("use ML estimator for bootstrap")) } else if (opt$se == "default") { if (opt$estimator %in% c("ml", "mlf")) { opt$se <- "standard" } else if (opt$estimator %in% c("mlm", "mlmv", "mlmvs")) { opt$se <- "robust.sem" } else if (opt$estimator == "mlr") { opt$se <- "robust.huber.white" } } else if (opt$se == "robust") { if (opt$missing %in% c("ml", "ml.x")) { opt$se <- "robust.huber.white" } else if (opt$missing == "two.stage") { # needed? opt$se <- "two.stage" } else if (opt$missing == "robust.two.stage") { # needed? opt$se <- "robust.two.stage" } else { opt$se <- "robust.sem" } } # information if (opt$estimator == "mlf") { if (opt$information[1] == "default") { opt$information[1] <- "first.order" } if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "first.order" } } # test if (!opt$test[1] == "none") { if (opt$estimator %in% c("ml", "mlf")) { if (opt$test[1] == "default") { opt$test <- "standard" } # else { # opt$test <- union("standard", opt$test) # } } else if (opt$estimator == "mlm") { if (opt$test[1] == "default") { opt$test <- "satorra.bentler" } else { opt$test <- union("satorra.bentler", opt$test) } } else if (opt$estimator == "mlmv") { if (opt$test[1] == "default") { opt$test <- "scaled.shifted" } else { opt$test <- union("scaled.shifted", opt$test) } } else if (opt$estimator == "mlmvs") { if (opt$test[1] == "default") { opt$test <- "mean.var.adjusted" } else { opt$test <- union("mean.var.adjusted", opt$test) } } } opt } lav_options_est_gls <- function(opt) { # GLS #### # FIXME: catch categorical, clustered, ... # se if (opt$se == "default") { opt$se <- "standard" } # test if (opt$test[1] == "default") { opt$test <- "standard" } bad.idx <- which(!opt$test %in% c( "standard", "none", "browne.residual.nt", # == standard "browne.residual.nt.model", "browne.residual.adf", "browne.residual.adf.model" )) if (length(bad.idx) > 0L) { lav_msg_stop(gettextf( "invalid value(s) in test= argument when estimator is GLS: %s.", paste(opt$test[bad.idx], collapse = " "))) } # missing opt$missing <- "listwise" # also pairwise? opt } lav_options_est_ntrls <- function(opt) { # NTRLS (experimental) #### # optim.gradient opt$optim.gradien <- "numerical" # se if (opt$se == "default") { opt$se <- "standard" } # test if (opt$test[1] == "default") { opt$test <- "standard" } bad.idx <- which(!opt$test %in% c( "standard", "none", "browne.residual.nt", "browne.residual.nt.model", "browne.residual.adf", "browne.residual.adf.model" )) if (length(bad.idx) > 0L) { lav_msg_stop(gettextf( "invalid value(s) in test= argument when estimator is NTRLS: %s.", paste(opt$test[bad.idx], collapse = " "))) } # missing opt$missing <- "listwise" opt } lav_options_est_catml <- function(opt) { # catML (experimental) #### # optim.gradient opt$optim.gradient <- "numerical" # for now # force correlation = TRUE, and categorical = FALSE opt$correlation <- TRUE opt$.categorical <- FALSE # we 'pretend' to have continuous data! # se if (opt$se == "default") { opt$se <- "robust.sem" # for now } # test if (opt$test[1] == "default") { opt$test <- "satorra.bentler" } # missing if (opt$missing %in% c("listwise", "pairwise")) { # nothing to do } else if (opt$missing == "default") { opt$missing <- "listwise" } else { lav_msg_stop(gettext( "missing argument should be listwise or pairwise if estimator is catML")) } opt } lav_options_est_wls <- function(opt) { # WLS #### # se if (opt$se == "default") { opt$se <- "standard" } # test if (opt$test[1] == "default") { opt$test <- "standard" } bad.idx <- which(!opt$test %in% c( "standard", "none", "browne.residual.nt", "browne.residual.nt.model", "browne.residual.adf", # == standard "browne.residual.adf.model" )) if (length(bad.idx) > 0L) { lav_msg_stop(gettextf( "invalid value(s) in test= argument when estimator is WLS: %s.", paste(opt$test[bad.idx], collapse = " "))) } # missing # opt$missing <- "listwise" (could be pairwise) opt } lav_options_est_dls <- function(opt) { # DLS #### # se if (opt$se == "default") { opt$se <- "robust.sem" } # test if (opt$test[1] == "default") { opt$test <- "satorra.bentler" } bad.idx <- which(!opt$test %in% c( "standard", "none", "satorra.bentler", "browne.residual.nt", # == standard "browne.residual.nt.model", "browne.residual.adf", "browne.residual.adf.model" )) if (length(bad.idx) > 0L) { lav_msg_stop(gettextf( "invalid value(s) in test= argument when estimator is DLS: %s.", paste(opt$test[bad.idx], collapse = " "))) } # always include "satorra.bentler" if (opt$test[1] %in% c( "browne.residual.nt", "browne.residual.adf", "browne.residual.nt.model", "browne.residual.adf.model" )) { opt$test <- union("satorra.bentler", opt$test) } # missing opt$missing <- "listwise" # estimator.args if (is.null(opt$estimator.args)) { opt$estimator.args <- list( dls.a = 1.0, dls.GammaNT = "model", dls.FtimesNmin1 = FALSE ) } else { if (is.null(opt$estimator.args$dls.a)) { opt$estimator.args$dls.a <- 1.0 } else { stopifnot(is.numeric(opt$estimator.args$dls.a)) if (opt$estimator.args$dls.a < 0.0 || opt$estimator.args$dls.a > 1.0) { lav_msg_stop(gettext( "dls.a value in estimator.args must be between 0 and 1.")) } } if (is.null(opt$estimator.args$dls.GammaNT)) { opt$estimator.args$dls.GammaNT <- "model" } else { stopifnot(is.character(opt$estimator.args$dls.GammaNT)) opt$estimator.args$dls.GammaNT <- tolower(opt$estimator.args$dls.GammaNT) if (!opt$estimator.args$dls.GammaNT %in% c("sample", "model")) { lav_msg_stop(gettextf( "dls.GammaNT value in estimator.args must be either %s.", lav_msg_view(c("sample", "model"), log.sep = "or"))) } } if (is.null(opt$estimator.args$dls.FtimesNminus1)) { opt$estimator.args$dls.FtimesNminus1 <- FALSE } else { stopifnot(is.logical(opt$estimator.args$dls.FtimesNminus1)) } } if (opt$estimator.args$dls.GammaNT == "sample") { if (opt$optim.method %in% c("nlminb", "gn")) { # nothing to do } else if (opt$optim.method == "default") { opt$optim.method <- "gn" } else { lav_msg_stop(gettext( "optim.method must be either nlminb or gn if estimator is DLS.")) } } else { if (opt$optim.method %in% c("gn")) { # nothing to do } else if (opt$optim.method == "default") { opt$optim.method <- "gn" } else if (opt$optim.method == "nlminb") { opt$optim.gradient <- "numerical" } else { lav_msg_stop(gettext( "optim.method must be either nlminb or gn if estimator is DLS.")) } } opt } lav_options_est_dwls <- function(opt) { # DWLS, WLSM, WLSMV, WLSMVS #### # new in 0.6-17: if !categorical, give a warning if (!opt$.categorical) { lav_msg_warn(gettextf( "estimator %s is not recommended for continuous data. Did you forget to set the ordered= argument?", dQuote(lav_options_estimatorgroup(opt$estimator)))) } # se if (opt$se == "bootstrap" && opt$estimator %in% c("wlsm", "wlsmv", "wlsmvs")) { lav_msg_stop(gettext("use (D)WLS estimator for bootstrap")) } else if (opt$se == "default") { if (opt$estimator == "dwls") { opt$se <- "standard" } else { opt$se <- "robust.sem" } } else if (opt$se == "robust") { opt$se <- "robust.sem" } # test if (!opt$test[1] == "none") { if (opt$estimator == "dwls") { if (opt$test[1] == "default") { opt$test <- "standard" } # else { # opt$test <- union("standard", opt$test) # } } else if (opt$estimator == "wlsm") { if (opt$test[1] == "default") { opt$test <- "satorra.bentler" } else { opt$test <- union("satorra.bentler", opt$test) } } else if (opt$estimator == "wlsmv") { if (opt$test[1] == "default") { opt$test <- "scaled.shifted" } else { opt$test <- union("scaled.shifted", opt$test) } } else if (opt$estimator == "wlsmvs") { if (opt$test[1] == "default") { opt$test <- "mean.var.adjusted" } else { opt$test <- union("mean.var.adjusted", opt$test) } } } opt } lav_options_est_uls <- function(opt) { # ULS, ULSM, ULSMV, ULSMVS #### # se if (opt$se == "bootstrap" && opt$estimator %in% c("ulsm", "ulsmv", "ulsmvs")) { lav_msg_stop(gettext("use ULS estimator for bootstrap")) } else if (opt$se == "default") { if (opt$estimator == "uls") { opt$se <- "standard" } else { opt$se <- "robust.sem" } } else if (opt$se == "robust") { opt$se <- "robust.sem" } # test if (!opt$test[1] == "none") { if (opt$estimator == "uls") { if (opt$test[1] == "default") { opt$test <- "standard" } # else { # opt$test <- union("standard", opt$test) # } } else if (opt$estimator == "ulsm") { if (opt$test[1] == "default") { opt$test <- "satorra.bentler" } else { opt$test <- union("satorra.bentler", opt$test) } } else if (opt$estimator == "ulsmv") { if (opt$test[1] == "default") { opt$test <- "scaled.shifted" } else { opt$test <- union("scaled.shifted", opt$test) } } else if (opt$estimator == "ulsmvs") { if (opt$test[1] == "default") { opt$test <- "mean.var.adjusted" } else { opt$test <- union("mean.var.adjusted", opt$test) } } } opt } lav_options_est_pml <- function(opt) { # PML #### # se if (opt$se == "default") { opt$se <- "robust.huber.white" } # information opt$information[1] <- "observed" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } if (length(opt$observed.information) > 1L && opt$observed.information[2] == "default") { opt$observed.information[2] <- "hessian" } # test if (length(opt$test) > 1L) { lav_msg_stop(gettext( "only a single test statistic is allow when estimator is PML.")) } if (!opt$test[1] == "none") { opt$test <- "mean.var.adjusted" } opt } lav_options_est_fml <- function(opt) { # FML - UMN #### # optim.gradient opt$optim.gradient <- "numerical" # se if (opt$se == "default") { opt$se <- "standard" } # information opt$information[1] <- "observed" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } # test if (!opt$test[1] == "none") { opt$test <- "standard" } opt } lav_options_est_reml <- function(opt) { # REML #### # se if (opt$se == "default") { opt$se <- "standard" } # information opt$information[1] <- "observed" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } # test if (!opt$test[1] == "none") { opt$test <- "standard" } # missing opt$missing <- "listwise" opt } lav_options_est_mml <- function(opt) { # MML #### # se if (opt$se == "default") { opt$se <- "standard" } # information opt$information[1] <- "observed" opt$meanstructure <- TRUE if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } # test opt$test <- "none" # link if (opt$link == "default") { # opt$link <- "logit" opt$link <- "probit" } else if (opt$link %in% c("logit", "probit")) { # nothing to do } else { lav_msg_stop(gettext("link must be `logit' or `probit'")) } # parameterization if (opt$parameterization == "default") { opt$parameterization <- "mml" } else { lav_msg_stop(gettext( "parameterization argument is ignored if estimator = MML")) } opt } lav_options_est_fabin <- function(opt) { # FABIN, MULTIPLE-GROUP-METHOD (MGM), BENTLER1982, ... #### # experimental, for cfa or sam only # se if (opt$se == "default") { opt$se <- "none" } # bounds if (!is.null(opt$bounds) && opt$bounds == "default" && length(opt$optim.bounds) == 0L) { opt$bounds <- "standard" } # test if (opt$test == "default") { opt$test <- "none" # for now } # missing opt$missing <- "listwise" # for now (until we have two-stage working) # options for fabin if (lav_options_estimatorgroup(opt$estimator) %in% c("FABIN2", "FABIN3")) { if (is.null(opt$estimator.args)) { opt$estimator.args <- list(thetapsi.method = "GLS") } else { if (is.null(opt$estimator.args$thetapsi.method)) { opt$estimator.args$thetapsi.method <- "GLS" } else { opt$estimator.args$thetapsi.method <- toupper(opt$estimator.args$thetapsi.method) if (opt$estimator.args$thetapsi.method %in% c( "ULS", "GLS", "WLS", "ULS.ML", "GLS.ML", "WLS.ML" )) { if (opt$estimator.args$thetapsi.method == "WLS") { opt$estimator.args$thetapsi.method <- "GLS" } if (opt$estimator.args$thetapsi.method == "WLS.ML") { opt$estimator.args$thetapsi.method <- "GLS.ML" } } else { lav_msg_stop(gettextf( "unknown value for estimator.args$thetapsi.method option: %s.", opt$estimator.args$thetapsi.method)) } } } } # options for Bentler if (lav_options_estimatorgroup(opt$estimator) == "BENTLER1982") { if (is.null(opt$estimator.args)) { opt$estimator.args <- list(GLS = FALSE, quadprog = FALSE) } else { if (is.null(opt$estimator.args$GLS)) { opt$estimator.args$GLS <- FALSE } if (is.null(opt$estimator.args$quadprog)) { opt$estimator.args$quadprog <- FALSE } } } # options for guttman1952 multiple group method if (lav_options_estimatorgroup(opt$estimator) == "MGM") { if (is.null(opt$estimator.args)) { opt$estimator.args <- list( psi.mapping = FALSE, quadprog = FALSE ) } else { if (is.null(opt$estimator.args$psi.mapping)) { opt$estimator.args$psi.mapping <- FALSE } if (is.null(opt$estimator.args$quadprog)) { opt$estimator.args$quadprog <- FALSE } } } # brute-force override opt$optim.method <- "noniter" opt$start <- "simple" opt } lav_options_est_miiv <- function(opt) { # MIIV-2SLS and friends #### # se if (opt$se == "default") { opt$se <- "none" # for now } # bounds if (!is.null(opt$bounds) && opt$bounds == "default" && length(opt$optim.bounds) == 0L) { opt$bounds <- "standard" } # test if (opt$test == "default") { opt$test <- "none" # for now } # missing opt$missing <- "listwise" # for now # estimator options if (is.null(opt$estimator.args)) { opt$estimator.args <- list(method = "2SLS") } else { if (is.null(opt$estimator.args$method)) { opt$estimator.args$method <- "2SLS" } } # brute-force override opt$optim.method <- "noniter" opt$start <- "simple" opt } lav_options_est_none <- function(opt) { # NONE #### # se if (opt$se == "default") { opt$se <- "none" } # test if (opt$test[1] == "default") { opt$test <- "none" } opt }lavaan/R/lav_sam_utils.R0000644000176200001440000004450714627656441014733 0ustar liggesusers# utility functions for the sam() function # YR 4 April 2023 # construct 'mapping matrix' M using either "ML", "GLS" or "ULS" method # optionally return MTM (for ML) # # by construction, M %*% LAMBDA = I (the identity matrix) lav_sam_mapping_matrix <- function(LAMBDA = NULL, THETA = NULL, S = NULL, S.inv = NULL, method = "ML") { method <- toupper(method) # ULS # M == solve( t(LAMBDA) %*% LAMBDA ) %*% t(LAMBDA) # == MASS:::ginv(LAMBDA) if (method == "ULS") { # M == solve( t(LAMBDA) %*% LAMBDA ) %*% t(LAMBDA) # == MASS:::ginv(LAMBDA) M <- try(tcrossprod(solve(crossprod(LAMBDA)), LAMBDA), silent = TRUE ) if (inherits(M, "try-error")) { lav_msg_warn(gettext( "cannot invert crossprod(LAMBDA); using generalized inverse")) M <- MASS::ginv(LAMBDA) } # GLS # M == solve( t(LAMBDA) %*% S.inv %*% LAMBDA ) %*% t(LAMBDA) %*% S.inv } else if (method == "GLS") { if (is.null(S.inv)) { S.inv <- try(solve(S), silent = TRUE) } if (inherits(S.inv, "try-error")) { lav_msg_warn(gettext("S is not invertible; switching to ULS method")) M <- lav_sam_mapping_matrix(LAMBDA = LAMBDA, method = "ULS") } else { tLSinv <- t(LAMBDA) %*% S.inv tLSinvL <- tLSinv %*% LAMBDA M <- try(solve(tLSinvL, tLSinv), silent = TRUE) if (inherits(M, "try-error")) { lav_msg_warn(gettext("problem contructing mapping matrix; switching to generalized inverse")) M <- MASS::ginv(tLSinvL) %*% tLSinv } } # ML # M == solve(t(LAMBDA) %*% THETA.inv %*% LAMBDA) %*% t(LAMBDA) %*% THETA.inv } else if (method == "ML") { # Problem: if THETA has zero elements on the diagonal, we cannot invert # As we do not have access to Sigma(.inv), we cannot # use the trick as in lavPredict() (where we replace THETA.inv by # Sigma.inv) # old method (<0.6-16): remove rows/cols with zero values # on the diagonal of THETA, invert the submatrix and # set the '0' diagonal elements to one. This resulted in (somewhat) # distorted results. # new in 0.6-16: we use the Wall & Amemiya (2000) method using # the so-called 'T' transformation # new in 0.6-18: if we cannot use the marker method (eg growth models), # use the 'old' method anyway: remove zero rows/cols and invert submatrix zero.theta.idx <- which(abs(diag(THETA)) < 1e-4) # be conservative if (length(zero.theta.idx) == 0L) { # ok, no zero diagonal elements: try to invert THETA if (lav_matrix_is_diagonal(THETA)) { THETA.inv <- diag(1 / diag(THETA), nrow = nrow(THETA)) } else { THETA.inv <- try(solve(THETA), silent = TRUE) if (inherits(THETA, "try-error")) { THETA.inv <- NULL } } } else { # see if we can use marker method marker.idx <- lav_utils_get_marker(LAMBDA = LAMBDA, std.lv = TRUE) if (any(is.na(marker.idx))) { THETA.inv <- try(lav_matrix_symmetric_inverse(THETA), silent = TRUE) if (inherits(THETA.inv, "try-error")) { THETA.inv <- NULL } else { diag(THETA.inv)[zero.theta.idx] <- 1 } } else { # try tmat method later on THETA.inv <- NULL } } # could we invert THETA? if (!is.null(THETA.inv)) { # ha, all is good; compute M the usual way tLTi <- t(LAMBDA) %*% THETA.inv tLTiL <- tLTi %*% LAMBDA M <- try(solve(tLTiL, tLTi), silent = TRUE) if (inherits(M, "try-error")) { lav_msg_warn(gettext( "problem contructing ML mapping matrix; switching to ULS")) M <- lav_sam_mapping_matrix(LAMBDA = LAMBDA, method = "ULS") } } else { # use W&A2000's method using the 'T' transformation M <- try(lav_sam_mapping_matrix_tmat( LAMBDA = LAMBDA, THETA = THETA ), silent = TRUE) if (inherits(M, "try-error")) { lav_msg_warn(gettext( "problem contructing ML mapping matrix; switching to ULS")) M <- lav_sam_mapping_matrix(LAMBDA = LAMBDA, method = "ULS") } } } # ML M } # use 'T' transformation to create the 'Bartlett/ML' mapping matrix # see Wall & Amemiya (2000) eq (7) # see also Fuller 1987 page 357 (where T is called H), and page 364 # although Fuller/W&A always assumed that THETA is diagonal, # their method seems to work equally well for non-diagonal THETA # # in our implementation: # - we do NOT reorder the rows of LAMBDA # - if std.lv = TRUE, we first rescale to 'create' marker indicators # and then rescale back at the end # lav_sam_mapping_matrix_tmat <- function(LAMBDA = NULL, THETA = NULL, marker.idx = NULL, std.lv = NULL) { LAMBDA <- as.matrix.default(LAMBDA) nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) # do we have marker.idx? if (is.null(marker.idx)) { # 'marker' indicator has a single non-zero element in a row marker.idx <- lav_utils_get_marker(LAMBDA = LAMBDA, std.lv = TRUE) if (any(is.na(marker.idx))) { lav_msg_stop(gettext("no clear markers in LAMBDA matrix")) } } # std.lv TRUE or FALSE? if (is.null(std.lv)) { std.lv <- FALSE if (any(diag(LAMBDA[marker.idx, , drop = FALSE]) != 1)) { std.lv <- TRUE } } # if std.lv = TRUE, rescale if (std.lv) { MARKER <- LAMBDA[marker.idx, , drop = FALSE] marker.inv <- 1 / diag(MARKER) LAMBDA <- t(t(LAMBDA) * marker.inv) } # compute 'T' matrix TMAT <- lav_sam_tmat( LAMBDA = LAMBDA, THETA = THETA, marker.idx = marker.idx ) # ML mapping matrix M <- TMAT[marker.idx, , drop = FALSE] if (std.lv) { M <- M * marker.inv } M } # create 'T' matrix (tmat) for T-transformation # # Notes: - here we assume that LAMBDA has unity markers (no std.lv = TRUE) # - TMAT is NOT symmetric! # - Yc %*% t(TMAT) transforms the data in such a way that we get: # 1) Bartlett factor scores in the marker columns # 2) 'V' values in the non-marker columns, where: # V = Yc - Yc[,marker.idx] %*% t(LAMBDA) # lav_sam_tmat <- function(LAMBDA = NULL, THETA = NULL, marker.idx = NULL) { LAMBDA <- as.matrix.default(LAMBDA) nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) # do we have marker.idx? if (is.null(marker.idx)) { # 'marker' indicator has a single 1 element in a row marker.idx <- lav_utils_get_marker(LAMBDA = LAMBDA, std.lv = FALSE) if (any(is.na(marker.idx))) { lav_msg_stop(gettext("no clear markers in LAMBDA matrix")) } } # construct 'C' matrix C2 <- diag(nvar) C2[, marker.idx] <- -1 * LAMBDA C <- C2[-marker.idx, , drop = FALSE] # compute Sigma.ve and Sigma.vv Sigma.ve <- C %*% THETA # Sigma.vv <- C %*% THETA %*% t(C) Sigma.vv <- Sigma.ve %*% t(C) # construct 'Gamma' (and Gamma2) matrix # Gamma <- (t(Sigma.ve) %*% solve(Sigma.vv))[marker.idx,, drop = FALSE] Gamma <- try(t(solve(Sigma.vv, Sigma.ve)[, marker.idx, drop = FALSE]), silent = TRUE ) if (inherits(Gamma, "try-error")) { tmp <- t(Sigma.ve) %*% MASS::ginv(Sigma.vv) Gamma <- tmp[marker.idx, , drop = FALSE] } Gamma2 <- matrix(0, nfac, nvar) Gamma2[, -marker.idx] <- Gamma Gamma2[, marker.idx] <- diag(nfac) # transformation matrix 'T' (we call it here 'Tmat') Tmat <- matrix(0, nvar, nvar) Tmat[-marker.idx, ] <- C Tmat[marker.idx, ] <- -Gamma2 %*% C2 Tmat } # compute VETA # - if alpha.correction == 0 -> same as local SAM (or MOC) # - if alpha.correction == (N-1) -> same as FSR+Bartlett lav_sam_veta <- function(M = NULL, S = NULL, THETA = NULL, alpha.correction = 0L, lambda.correction = TRUE, N = 20L, dummy.lv.idx = integer(0L), extra = FALSE) { # MSM MSM <- M %*% S %*% t(M) # MTM MTM <- M %*% THETA %*% t(M) # new in 0.6-16: make sure MTM is pd # (otherwise lav_matrix_symmetric_diff_smallest_root will fail) if (length(dummy.lv.idx) > 0L) { MTM.nodummy <- MTM[-dummy.lv.idx, -dummy.lv.idx, drop = FALSE] MTM.nodummy <- zapsmall(lav_matrix_symmetric_force_pd( MTM.nodummy, tol = 1e-04 )) MTM[-dummy.lv.idx, -dummy.lv.idx] <- MTM.nodummy } else { MTM <- zapsmall(lav_matrix_symmetric_force_pd(MTM, tol = 1e-04)) } # apply small sample correction (if requested) if (alpha.correction > 0) { alpha.N1 <- alpha.correction / (N - 1) if (alpha.N1 > 1.0) { alpha.N1 <- 1.0 } else if (alpha.N1 < 0.0) { alpha.N1 <- 0.0 } MTM <- (1 - alpha.N1) * MTM alpha <- alpha.correction } else { alpha <- alpha.correction } if (lambda.correction) { # use Fuller (1987) approach to ensure VETA is positive lambda <- try(lav_matrix_symmetric_diff_smallest_root(MSM, MTM), silent = TRUE ) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) VETA <- MSM - MTM # and hope for the best } else { cutoff <- 1 + 1 / (N - 1) if (lambda < cutoff) { lambda.star <- lambda - 1 / (N - 1) VETA <- MSM - lambda.star * MTM } else { VETA <- MSM - MTM } } } else { VETA <- MSM - MTM } # extra attributes? if (extra) { attr(VETA, "lambda") <- lambda attr(VETA, "alpha") <- alpha attr(VETA, "MSM") <- MSM attr(VETA, "MTM") <- MTM } VETA } # compute EETA = E(Eta) = M %*% [YBAR - NU] lav_sam_eeta <- function(M = NULL, YBAR = NULL, NU = NULL) { EETA <- M %*% (YBAR - NU) EETA } # compute veta including quadratic/interaction terms lav_sam_veta2 <- function(FS = NULL, M = NULL, VETA = NULL, EETA = NULL, THETA = NULL, lv.names = NULL, lv.int.names = NULL, dummy.lv.names = character(0L), alpha.correction = 0L, lambda.correction = TRUE, extra = FALSE) { # small utility function: var() divided by N varn <- function(x, N) { var(x, use = "pairwise.complete.obs") * (N - 1) / N } if (length(lv.int.names) == 0L) { lav_msg_stop(gettext("lv.int.names is empty: no lv quadratic/interaction terms are provided")) } if (is.null(lv.names)) { lv.names <- paste("eta", seq_len(ncol(FS)), sep = "") } # MTM MTM <- M %*% THETA %*% t(M) # new in 0.6-16: make sure MTM is pd # (otherwise lav_matrix_symmetric_diff_smallest_root will fail) dummy.lv.idx <- which(lv.names %in% dummy.lv.names) if (length(dummy.lv.idx) > 0L) { MTM.nodummy <- MTM[-dummy.lv.idx, -dummy.lv.idx, drop = FALSE] MTM.nodummy <- zapsmall(lav_matrix_symmetric_force_pd( MTM.nodummy, tol = 1e-04 )) MTM[-dummy.lv.idx, -dummy.lv.idx] <- MTM.nodummy } else { MTM <- zapsmall(lav_matrix_symmetric_force_pd(MTM, tol = 1e-04)) } # augment to include intercept FS <- cbind(1, FS) N <- nrow(FS) MTM <- lav_matrix_bdiag(0, MTM) VETA <- lav_matrix_bdiag(0, VETA) EETA <- c(1, EETA) lv.names <- c("..int..", lv.names) nfac <- ncol(FS) idx1 <- rep(seq_len(nfac), each = nfac) idx2 <- rep(seq_len(nfac), times = nfac) NAMES <- paste(lv.names[idx1], lv.names[idx2], sep = ":") NAMES[seq_len(nfac)] <- lv.names FS2 <- FS[, idx1] * FS[, idx2] K.nfac <- lav_matrix_commutation(nfac, nfac) IK <- diag(nfac * nfac) + K.nfac EETA <- as.matrix(drop(EETA)) VETAkMTM <- VETA %x% MTM # normal version (for now): Gamma.ME22 <- IK %*% (MTM %x% MTM) # ingredients (normal ME case) Var.FS2 <- varn(FS2, N) Var.ETAkME <- (tcrossprod(EETA) %x% MTM + VETAkMTM) Var.MEkETA <- lav_matrix_commutation_pre_post(Var.ETAkME) Var.ME2 <- Gamma.ME22 cov.ETAkME.MEkETA <- lav_matrix_commutation_post(Var.ETAkME) cov.MEkETA.ETAkME <- t(cov.ETAkME.MEkETA) Var.ERROR <- (Var.ETAkME + Var.MEkETA + cov.ETAkME.MEkETA + cov.MEkETA.ETAkME + Var.ME2) # select only what we need colnames(Var.FS2) <- rownames(Var.FS2) <- NAMES colnames(Var.ERROR) <- rownames(Var.ERROR) <- NAMES lv.keep <- c(lv.names[-1], lv.int.names) Var.FS2 <- Var.FS2[lv.keep, lv.keep] Var.ERROR <- Var.ERROR[lv.keep, lv.keep] # apply small sample correction (if requested) if (alpha.correction > 0) { alpha.N1 <- alpha.correction / (N - 1) if (alpha.N1 > 1.0) { alpha.N1 <- 1.0 } else if (alpha.N1 < 0.0) { alpha.N1 <- 0.0 } Var.ERROR <- (1 - alpha.N1) * Var.ERROR alpha <- alpha.correction } else { alpha <- alpha.correction } if (lambda.correction) { # use Fuller (1987) approach to ensure VETA2 is positive lambda <- try(lav_matrix_symmetric_diff_smallest_root( Var.FS2, Var.ERROR ), silent = TRUE) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) VETA2 <- Var.FS2 - Var.ERROR # and hope for the best } else { #cutoff <- 1 + 1 / (N - 1) cutoff <- 1 + 2/N # be more conservative for VETA2 if (lambda < cutoff) { #lambda.star <- lambda - 1 / (N - 1) lambda.star <- max(c(0, lambda - 2/N)) VETA2 <- Var.FS2 - lambda.star * Var.ERROR } else { VETA2 <- Var.FS2 - Var.ERROR } } } else { VETA2 <- Var.FS2 - Var.ERROR } # extra attributes? if (extra) { attr(VETA2, "lambda") <- lambda attr(VETA2, "alpha") <- alpha attr(VETA2, "MSM") <- Var.FS2 attr(VETA2, "MTM") <- Var.ERROR } VETA2 } lav_sam_eeta2 <- function(EETA = NULL, VETA = NULL, lv.names = NULL, lv.int.names = NULL) { if (length(lv.int.names) == 0L) { lav_msg_stop(gettext("lv.int.names is empty: no lv quadratic/interaction terms are provided")) } if (is.null(lv.names)) { lv.names <- paste("eta", seq_len(ncol(VETA)), sep = "") } nfac <- nrow(VETA) idx1 <- rep(seq_len(nfac), each = nfac) idx2 <- rep(seq_len(nfac), times = nfac) NAMES <- c(lv.names, paste(lv.names[idx1], lv.names[idx2], sep = ":")) # E(\eta %x% \eta) EETA2 <- lav_matrix_vec(VETA) + EETA %x% EETA # add 1st order EETA2.aug <- c(EETA, EETA2) # select only what we need names(EETA2.aug) <- NAMES lv.keep <- c(lv.names, lv.int.names) EETA2.aug <- EETA2.aug[lv.keep] EETA2.aug } # compute veta including quadratic/interaction terms lav_sam_fs2 <- function(FS = NULL, lv.names = NULL, lv.int.names = NULL) { varn <- function(x, N) { var(x) * (N - 1) / N } if (length(lv.int.names) == 0L) { lav_msg_stop(gettext("lv.int.names is empty: no lv quadratic/interaction terms are provided")) } if (is.null(lv.names)) { lv.names <- paste("eta", seq_len(ncol(FS)), sep = "") } # augment to include intercept FS <- cbind(1, FS) N <- nrow(FS) lv.names <- c("..int..", lv.names) nfac <- ncol(FS) idx1 <- rep(seq_len(nfac), each = nfac) idx2 <- rep(seq_len(nfac), times = nfac) NAMES <- paste(lv.names[idx1], lv.names[idx2], sep = ":") FS2 <- FS[, idx1] * FS[, idx2] Var.FS2 <- varn(FS2, N) # select only what we need colnames(Var.FS2) <- rownames(Var.FS2) <- NAMES lv.main <- paste(lv.names[-1], "..int..", sep = ":") lv.keep <- c(lv.main, lv.int.names) Var.FS2 <- Var.FS2[lv.keep, lv.keep] Var.FS2 } # create consistent lavaan object, based on (filled in) PT lav_sam_step3_joint <- function(FIT = NULL, PT = NULL, sam.method = "local") { lavoptions <- FIT@Options lavoptions.joint <- lavoptions lavoptions.joint$optim.method <- "none" lavoptions.joint$optim.force.converged <- TRUE lavoptions.joint$check.gradient <- FALSE lavoptions.joint$check.start <- FALSE lavoptions.joint$check.post <- FALSE lavoptions.joint$rotation <- "none" lavoptions.joint$se <- "none" lavoptions.joint$store.vcov <- FALSE # we do this manually if (sam.method %in% c("local", "fsr")) { lavoptions.joint$baseline <- FALSE lavoptions.joint$sample.icov <- FALSE lavoptions.joint$h1 <- FALSE lavoptions.joint$test <- "none" lavoptions.joint$estimator <- "none" } else { lavoptions.joint$test <- lavoptions$test lavoptions.joint$estimator <- lavoptions$estimator } # set ustart values PT$ustart <- PT$est # as this is used if optim.method == "none" JOINT <- lavaan::lavaan(PT, slotOptions = lavoptions.joint, slotSampleStats = FIT@SampleStats, slotData = FIT@Data, verbose = FALSE ) JOINT } lav_sam_table <- function(JOINT = NULL, STEP1 = NULL, FIT.PA = FIT.PA, mm.args = list(), struc.args = list(), sam.method = "local", local.options = list(), global.options = list()) { MM.FIT <- STEP1$MM.FIT sam.mm.table <- data.frame( Block = seq_len(length(STEP1$mm.list)), Latent = sapply(MM.FIT, function(x) { paste(unique(unlist(x@pta$vnames$lv)), collapse = ",") }), Nind = sapply(MM.FIT, function(x) { length(unique(unlist(x@pta$vnames$ov))) }), # Estimator = sapply(MM.FIT, function(x) { x@Model@estimator} ), Chisq = sapply(MM.FIT, function(x) { x@test[[1]]$stat }), Df = sapply(MM.FIT, function(x) { x@test[[1]]$df }) ) # pvalue = sapply(MM.FIT, function(x) {x@test[[1]]$pvalue}) ) class(sam.mm.table) <- c("lavaan.data.frame", "data.frame") # extra info for @internal slot if (sam.method %in% c("local", "fsr")) { sam.struc.fit <- try( fitMeasures( FIT.PA, c( "chisq", "df", # "pvalue", "cfi", "rmsea", "srmr" ) ), silent = TRUE ) if (inherits(sam.struc.fit, "try-error")) { sam.struc.fit <- "(unable to obtain fit measures)" names(sam.struc.fit) <- "warning" } sam.mm.rel <- STEP1$REL } else { sam.struc.fit <- "no local fit measures available for structural part if sam.method is global" names(sam.struc.fit) <- "warning" sam.mm.rel <- numeric(0L) } SAM <- list( sam.method = sam.method, sam.local.options = local.options, sam.global.options = global.options, sam.mm.list = STEP1$mm.list, sam.mm.estimator = MM.FIT[[1]]@Model@estimator, sam.mm.args = mm.args, sam.mm.ov.names = lapply(MM.FIT, function(x) { x@pta$vnames$ov }), sam.mm.table = sam.mm.table, sam.mm.rel = sam.mm.rel, sam.struc.estimator = FIT.PA@Model@estimator, sam.struc.args = struc.args, sam.struc.fit = sam.struc.fit ) SAM } lavaan/R/lav_lavaan_step16_rotation.R0000644000176200001440000001762514627656441017317 0ustar liggesuserslav_lavaan_step16_rotation <- function(lavoptions = NULL, lavmodel = NULL, lavpartable = NULL, lavh1 = NULL, lavdata = NULL, x = NULL, lavvcov = NULL, VCOV = NULL, # nolint lavcache = NULL, lavimplied = NULL, lavsamplestats = NULL) { # # # # # # # # # # # # # 16. rotation # # # # # # # # # # # # # # if lavmodel@nefa > 0L and lavoptions$rotation not "none" # store unrotated solution in partable (column est.unrotated) # rotate lavmodel via lav_model_efa_rotate and overwrite column est # in partable # if lavoptions$se not in none, bootstrap, external, twostep # if lavoptions$rotation.se == "delta" # re-compute vcov with delta rule (*) # re-compute SE and store them in lavpartable (*) # else if lavoptions$rotation.se == "bordered" # create 'new' partable where the user = 7/77 parameters are free (*) # # (*) code too complicated to summarize here if ((.hasSlot(lavmodel, "nefa")) && (lavmodel@nefa > 0L) && (lavoptions$rotation != "none")) { # store unrotated solution in partable lavpartable$est.unrotated <- lavpartable$est lavpartable$se.unrotated <- lavpartable$se # rotate, and create new lavmodel if (lav_verbose()) { cat( "rotating EFA factors using rotation method =", toupper(lavoptions$rotation), "..." ) } x.unrotated <- as.numeric(x) lavmodel.unrot <- lavmodel efa.out <- lav_model_efa_rotate( lavmodel = lavmodel, lavoptions = lavoptions ) # adapt partable: # - change 'free' column to reflect that user = 7/77 parameters are free # - save unrotated free column in free.unrotated lavpartable$free.unrotated <- lavpartable$free user7.idx <- which((lavpartable$user == 7L | lavpartable$user == 77L) & lavpartable$free == 0L) lavpartable$free[user7.idx] <- 1L lavpartable$free[lavpartable$free > 0L] <- seq_len(sum(lavpartable$free > 0L)) # create 'rotated' lavmodel, reflecting the 'new' free parameters lavmodel <- lav_model( lavpartable = lavpartable, lavoptions = lavoptions, th.idx = lavmodel@th.idx ) # add rotated information lavmodel@H <- efa.out$H lavmodel@lv.order <- efa.out$lv.order lavmodel@GLIST <- efa.out$GLIST # add con.jac information (if any) lavmodel@con.lambda <- lavmodel.unrot@con.lambda if (nrow(lavmodel.unrot@con.jac) > 0L) { con.jac <- rbind(lavmodel@ceq.JAC, lavmodel@cin.JAC) attr(con.jac, "inactive.idx") <- attr(lavmodel.unrot@con.jac, "inactive.idx") attr(con.jac, "cin.idx") <- attr(lavmodel.unrot@con.jac, "cin.idx") attr(con.jac, "ceq.idx") <- attr(lavmodel.unrot@con.jac, "ceq.idx") lavmodel@con.jac <- con.jac } # overwrite parameters in @ParTable$est lavpartable$est <- lav_model_get_parameters( lavmodel = lavmodel, type = "user", extra = TRUE ) if (lav_verbose()) { cat(" done.\n") } # VCOV rotated parameters if (!lavoptions$se %in% c("none", "bootstrap", "external", "two.step")) { if (lav_verbose()) { cat( "computing VCOV for se =", lavoptions$se, "and rotation.se =", lavoptions$rotation.se, "..." ) } # use delta rule to recompute vcov if (lavoptions$rotation.se == "delta") { # Jacobian JAC <- numDeriv::jacobian( # nolint func = lav_model_efa_rotate_x, x = x.unrotated, lavmodel = lavmodel.unrot, init.rot = lavmodel@H, lavoptions = lavoptions, type = "user", extra = FALSE, method.args = list(eps = 0.0050), method = "simple" ) # important! # force VCOV to be pd, before we transform (not very elegant) VCOV.in <- lav_matrix_symmetric_force_pd(lavvcov$vcov, # nolint tol = 1e-10 ) # apply Delta rule VCOV.user <- JAC %*% VCOV.in %*% t(JAC) # nolint # re-compute SE and store them in lavpartable tmp <- diag(VCOV.user) min.idx <- which(tmp < 0) if (length(min.idx) > 0L) { tmp[min.idx] <- as.numeric(NA) } tmp <- sqrt(tmp) # catch near-zero SEs (was ^(1/2) < 0.6) zero.idx <- which(tmp < .Machine$double.eps^(1 / 3)) if (length(zero.idx) > 0L) { tmp[zero.idx] <- 0.0 } lavpartable$se <- tmp # store rotated VCOV # lavvcov$vcov.unrotated <- lavvcov$vcov if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { free.idx <- which(lavpartable$free > 0L && !duplicated(lavpartable$free)) } else { free.idx <- which(lavpartable$free > 0L) } lavvcov$vcov <- VCOV.user[free.idx, free.idx, drop = FALSE] # rotation.se = "bordered" is the default } else if (lavoptions$rotation.se == "bordered") { # create 'border' for augmented information matrix x.rot <- lav_model_get_parameters(lavmodel) JAC <- numDeriv::jacobian( # nolint func = lav_model_efa_rotate_border_x, x = x.rot, lavmodel = lavmodel, lavoptions = lavoptions, lavpartable = lavpartable, # method.args = list(eps = 0.0005), # method = "simple") method = "Richardson" ) # store JAC lavmodel@ceq.efa.JAC <- JAC # no other constraints if (length(lavmodel@ceq.linear.idx) == 0L && length(lavmodel@ceq.nonlinear.idx) == 0L && length(lavmodel@cin.linear.idx) == 0L && length(lavmodel@cin.nonlinear.idx) == 0L) { lavmodel@con.jac <- JAC attr(lavmodel@con.jac, "inactive.idx") <- integer(0L) attr(lavmodel@con.jac, "ceq.idx") <- seq_len(nrow(JAC)) attr(lavmodel@con.jac, "cin.idx") <- integer(0L) lavmodel@con.lambda <- rep(0, nrow(JAC)) # other constraints } else { inactive.idx <- attr(lavmodel@con.jac, "inactive.idx") ceq.idx <- attr(lavmodel@con.jac, "ceq.idx") cin.idx <- attr(lavmodel@con.jac, "cin.idx") lambda <- lavmodel@con.lambda nbord <- nrow(JAC) # reconstruct con.jac CON.JAC <- rbind(JAC, lavmodel@ceq.JAC, lavmodel@cin.JAC) # nolint attr(CON.JAC, "cin.idx") <- cin.idx + nbord # nolint attr(CON.JAC, "ceq.idx") <- c(1:nbord, ceq.idx + nbord) # nolint attr(CON.JAC, "inactive.idx") <- inactive.idx + nbord # nolint lavmodel@con.jac <- CON.JAC lavmodel@con.lambda <- c(rep(0, nbord), lambda) } # compute VCOV, taking 'rotation constraints' into account VCOV <- lav_model_vcov( # nolint lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1 ) # compute SE and store them in lavpartable tmp <- lav_model_vcov_se( lavmodel = lavmodel, lavpartable = lavpartable, VCOV = VCOV ) lavpartable$se <- tmp # store rotated VCOV in lavvcov tmp.attr <- attributes(VCOV) VCOV1 <- VCOV # nolint attributes(VCOV1) <- tmp.attr["dim"] # nolint # lavvcov$vcov.unrotated <- lavvcov$vcov lavvcov$vcov <- VCOV1 } # bordered if (lav_verbose()) { cat(" done.\n") } } # vcov } # efa list( lavpartable = lavpartable, lavmodel = lavmodel, lavvcov = lavvcov ) } lavaan/R/lav_lavaan_step09_model.R0000644000176200001440000001043214627656441016547 0ustar liggesuserslav_lavaan_step09_model <- function(slotModel = NULL, # nolint lavoptions = NULL, lavpartable = NULL, lavsamplestats = NULL, lavdata = NULL) { # # # # # # # # # # # # # 9. lavmodel # # # # # # # # # # # # # # if slotModel not NULL # copy to lavmodel # else # compute lavmodel via lav_model # if lavdata@data.type == "none" and categorical mode # set parameters in lavmodel via lav_model_set_parameters and # re-adjust start column in lavpartable # if differences between start and ustart column (in lavpartable) # if lavmodel$parameterization == "delta" # if user specified delta values : ** warning ** # if lavmodel$parameterization == "theta" # if user specified theta values : ** warning ** if (!is.null(slotModel)) { lavmodel <- slotModel } else { if (lav_verbose()) { cat("lavmodel ...") } lavmodel <- lav_model( lavpartable = lavpartable, lavoptions = lavoptions, th.idx = lavsamplestats@th.idx ) # no longer needed: x values are in start # cov.x = lavsamplestats@cov.x, # mean.x = lavsamplestats@mean.x) # if no data, call lav_model_set_parameters once (for categorical case) if (lavdata@data.type == "none" && lavmodel@categorical) { lavmodel <- lav_model_set_parameters( lavmodel = lavmodel, x = lav_model_get_parameters(lavmodel) ) # re-adjust parameter table lavpartable$start <- lav_model_get_parameters(lavmodel, type = "user") # check/warn if theta/delta values make sense if (!all(lavpartable$start == lavpartable$ustart)) { if (lavmodel@parameterization == "delta") { # did the user specify theta values? user.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs & lavpartable$lhs %in% unlist(attr(lavpartable, "vnames")$ov.ord) & lavpartable$user == 1L) if (length(user.var.idx)) { lav_msg_warn( gettextf("variance (theta) values for categorical variables are ignored if parameterization = %s!", "'delta'") ) } } else if (lavmodel@parameterization == "theta") { # did the user specify theta values? user.delta.idx <- which(lavpartable$op == "~*~" & lavpartable$lhs == lavpartable$rhs & lavpartable$lhs %in% unlist(attr(lavpartable, "vnames")$ov.ord) & lavpartable$user == 1L) if (length(user.delta.idx)) { lav_msg_warn( gettextf("scaling (~*~) values for categorical variables are ignored if parameterization = %s!", "'theta'") ) } } } } if (lav_verbose()) { cat(" done.\n") } } list( lavpartable = lavpartable, lavmodel = lavmodel ) } # 9b. bounds for EFA -- to force diag(LAMBDA) to be positive (new in 0.6-7) # if((.hasSlot(lavmodel, "nefa")) && (lavmodel@nefa > 0L) && # (lavoptions$rotation != "none")) { # # # add lower column # if (is.null(lavpartable$lower)) { # lavpartable$lower <- rep(-Inf, length(lavpartable$lhs)) # } # efa.values <- lav_partable_efa_values(lavpartable) # group.values <- lav_partable_group_values(lavpartable) # for (g in seq_len(lavdata@ngroups)) { # for (set in seq_len(lavmodel@nefa)) { # lv.efa <- # unique(lavpartable$lhs[lavpartable$op == "=~" & # lavpartable$block == g & # lavpartable$efa == efa.values[set] ]) # for (f in seq_len(length(lv.efa))) { # lambda.idx <- which(lavpartable$lhs == lv.efa[f] & # lavpartable$op == "=~" & # lavpartable$group == group.values[g]) # # get diagonal element of LAMBDA # midx <- lambda.idx[f] # diagonal element of LAMBDA # lavpartable$lower[midx] <- 0 # } # factors # } # sets # } # groups # } lavaan/R/lav_model_information.R0000644000176200001440000005032014627656441016426 0ustar liggesusers# here, we compute various versions of the `information' matrix # NOTE: # 1) we ALWAYS compute the UNIT information (not the total information) # # 2) by default, we ignore the constraints (we deal with this when we # take the inverse later on) lav_model_information <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, Delta = NULL, lavcache = NULL, lavoptions = NULL, extra = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { if (.hasSlot(lavmodel, "estimator")) { estimator <- lavmodel@estimator } else { estmator <- lavoptions$estimator } information <- lavoptions$information[1] # ALWAYS used the first one # called can control it # rotation? # if(!is.null(lavoptions$rotation) && lavoptions$rotation != "none") { # use.ginv <- TRUE # } if (is.null(lavh1)) { lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) } # compute information matrix if (information == "observed") { if (lavsamplestats@missing.flag || lavdata@nlevels > 1L) { group.weight <- FALSE } else { group.weight <- TRUE } E <- lav_model_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, group.weight = group.weight, lavoptions = lavoptions, extra = extra, augmented = augmented, inverted = inverted, use.ginv = use.ginv ) } else if (information == "expected") { E <- lav_model_information_expected( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions, extra = extra, augmented = augmented, inverted = inverted, use.ginv = use.ginv ) } else if (information == "first.order") { E <- lav_model_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache, lavoptions = lavoptions, # extra = extra, check.pd = FALSE, augmented = augmented, inverted = inverted, use.ginv = use.ginv ) } # information, augmented information, or inverted information E } # fisher/expected information # # information = Delta' I1 Delta, where I1 is the unit information of # the saturated model (evaluated either at the structured or unstructured # estimates) lav_model_information_expected <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, Delta = NULL, lavcache = NULL, extra = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { if (inverted) { augmented <- TRUE } # 1. Delta if (is.null(Delta)) { Delta <- computeDelta(lavmodel = lavmodel) } # 2. H1 information (single level) if (lavdata@nlevels == 1L) { A1 <- lav_model_h1_information_expected( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache ) } else { # force conditional.x = FALSE lavimplied <- lav_model_implied_cond2uncond(lavimplied) } # 3. compute Information per group Info.group <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { # note LISREL documentation suggests (Ng - 1) instead of Ng... fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal # multilevel if (lavdata@nlevels > 1L) { # here, we assume only 2 levels, at [[1]] and [[2]] if (lavoptions$h1.information[1] == "structured") { Sigma.W <- lavimplied$cov[[(g - 1) * 2 + 1]] Mu.W <- lavimplied$mean[[(g - 1) * 2 + 1]] Sigma.B <- lavimplied$cov[[(g - 1) * 2 + 2]] Mu.B <- lavimplied$mean[[(g - 1) * 2 + 2]] } else { Sigma.W <- lavh1$implied$cov[[(g - 1) * 2 + 1]] Mu.W <- lavh1$implied$mean[[(g - 1) * 2 + 1]] Sigma.B <- lavh1$implied$cov[[(g - 1) * 2 + 2]] Mu.B <- lavh1$implied$mean[[(g - 1) * 2 + 2]] } Lp <- lavdata@Lp[[g]] Info.g <- lav_mvnorm_cluster_information_expected_delta( Lp = Lp, Delta = Delta[[g]], Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, Sinv.method = "eigen" ) Info.group[[g]] <- fg * Info.g } else { # compute information for this group if (lavmodel@estimator %in% c("DWLS", "ULS")) { # diagonal weight matrix Delta2 <- sqrt(A1[[g]]) * Delta[[g]] Info.group[[g]] <- fg * crossprod(Delta2) } else { # full weight matrix Info.group[[g]] <- fg * (crossprod(Delta[[g]], A1[[g]]) %*% Delta[[g]]) } } } # g # 4. assemble over groups Information <- Info.group[[1]] if (lavsamplestats@ngroups > 1) { for (g in 2:lavsamplestats@ngroups) { Information <- Information + Info.group[[g]] } } # 5. augmented information? if (augmented) { Information <- lav_model_information_augment_invert( lavmodel = lavmodel, information = Information, inverted = inverted, use.ginv = use.ginv ) } if (extra) { attr(Information, "Delta") <- Delta attr(Information, "WLS.V") <- A1 # unweighted } # possibly augmented/inverted Information } # only for Mplus MLM lav_model_information_expected_MLM <- function(lavmodel = NULL, lavsamplestats = NULL, Delta = NULL, extra = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { if (inverted) { augmented <- TRUE } if (is.null(Delta)) { Delta <- computeDelta(lavmodel = lavmodel) } # compute A1 A1 <- vector("list", length = lavsamplestats@ngroups) if (lavmodel@group.w.free) { GW <- unlist(computeGW(lavmodel = lavmodel)) } for (g in 1:lavsamplestats@ngroups) { A1[[g]] <- lav_mvnorm_h1_information_expected( sample.cov = lavsamplestats@cov[[g]], sample.cov.inv = lavsamplestats@icov[[g]], x.idx = lavsamplestats@x.idx[[g]] ) # the same as GLS... (except for the N/N-1 scaling) if (lavmodel@group.w.free) { # unweight!! a <- exp(GW[g]) / lavsamplestats@nobs[[g]] # a <- exp(GW[g]) * lavsamplestats@ntotal / lavsamplestats@nobs[[g]] A1[[g]] <- lav_matrix_bdiag(matrix(a, 1, 1), A1[[g]]) } } # compute Information per group Info.group <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal # compute information for this group Info.group[[g]] <- fg * (t(Delta[[g]]) %*% A1[[g]] %*% Delta[[g]]) } # assemble over groups Information <- Info.group[[1]] if (lavsamplestats@ngroups > 1) { for (g in 2:lavsamplestats@ngroups) { Information <- Information + Info.group[[g]] } } # augmented information? if (augmented) { Information <- lav_model_information_augment_invert( lavmodel = lavmodel, information = Information, inverted = inverted, use.ginv = use.ginv ) } if (extra) { attr(Information, "Delta") <- Delta attr(Information, "WLS.V") <- A1 # unweighted } Information } lav_model_information_observed <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL, extra = FALSE, group.weight = TRUE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { if (inverted) { augmented <- TRUE } # observed.information: # - "hessian": second derivative of objective function # - "h1": observed information matrix of saturated (h1) model, # pre- and post-multiplied by the jacobian of the model # parameters (Delta), usually evaluated at the structured # sample statistics (but this depends on the h1.information # option) if (!is.null(lavoptions) && !is.null(lavoptions$observed.information[1]) && lavoptions$observed.information[1] == "h1") { observed.information <- "h1" } else { observed.information <- "hessian" } # HESSIAN based if (observed.information == "hessian") { Hessian <- lav_model_hessian( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavcache = lavcache, group.weight = group.weight, ceq.simple = FALSE ) # NOTE! What is the relationship between the Hessian of the objective # function, and the `information' matrix (unit or total) # 1. in lavaan, we ALWAYS minimize, so the Hessian is already pos def # 2. currently, all estimators give unit information, except MML and PML # so, no need to divide by N Information <- Hessian # divide by 'N' for MML and PML if (lavmodel@estimator == "PML" || lavmodel@estimator == "MML") { Information <- Information / lavsamplestats@ntotal # HJ: Does this need to be divided by sum of weights instead? } # if multilevel, we should divide by 'J', the number of clusters if (lavdata@nlevels > 1L) { NC <- 0 for (g in 1:lavsamplestats@ngroups) { NC <- NC + lavdata@Lp[[g]]$nclusters[[2]] } Information <- Information * lavsamplestats@ntotal / NC } } # using 'observed h1 information' # we need DELTA and 'WLS.V' (=A1) if (observed.information == "h1" || extra) { # 1. Delta Delta <- computeDelta(lavmodel = lavmodel) # 2. H1 information A1 <- lav_model_h1_information_observed( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache ) } if (observed.information == "h1") { # compute Information per group Info.group <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal # compute information for this group if (lavmodel@estimator %in% c("DWLS", "ULS")) { # diagonal weight matrix Delta2 <- sqrt(A1[[g]]) * Delta[[g]] Info.group[[g]] <- fg * crossprod(Delta2) } else { # full weight matrix Info.group[[g]] <- fg * (crossprod(Delta[[g]], A1[[g]]) %*% Delta[[g]]) } } # assemble over groups Information <- Info.group[[1]] if (lavsamplestats@ngroups > 1) { for (g in 2:lavsamplestats@ngroups) { Information <- Information + Info.group[[g]] } } } # augmented information? if (augmented) { Information <- lav_model_information_augment_invert( lavmodel = lavmodel, information = Information, inverted = inverted, use.ginv = use.ginv ) } if (extra) { attr(Information, "Delta") <- Delta attr(Information, "WLS.V") <- A1 } Information } # outer product of the case-wise scores (gradients) # HJ 18/10/23: Need to divide sum of crossproduct of individual log-likelihoods # by sum of weights rather than sample size. lav_model_information_firstorder <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavimplied = NULL, lavh1 = NULL, lavcache = NULL, lavoptions = NULL, check.pd = FALSE, extra = FALSE, augmented = FALSE, inverted = FALSE, use.ginv = FALSE) { if (!lavmodel@estimator %in% c("ML", "PML")) { lav_msg_stop(gettext( "information = \"first.order\" not available for estimator"), sQuote(lavmodel@estimator)) } if (inverted) { augmented <- TRUE } B0.group <- vector("list", lavsamplestats@ngroups) # 1. Delta Delta <- computeDelta(lavmodel = lavmodel) # 2. H1 information B1 <- lav_model_h1_information_firstorder( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavimplied = lavimplied, lavh1 = lavh1, lavcache = lavcache ) # 3. compute Information per group Info.group <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { # unweighted (needed in lav_test?) B0.group[[g]] <- t(Delta[[g]]) %*% B1[[g]] %*% Delta[[g]] # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # NOTE: UNSURE ABOUT THIS PART. WHAT IS THE ROLE OF fg? if (.hasSlot(lavdata, "weights")) { wt <- lavdata@weights[[g]] } else { # pre-0.6 object wt <- NULL } if (is.null(wt)) { fg <- lavsamplestats@nobs[[g]] / lavsamplestats@ntotal } else { totalwt <- sum(unlist(lavdata@weights)) fg <- sum(wt) / totalwt } # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # compute information for this group Info.group[[g]] <- fg * B0.group[[g]] } # 4. assemble over groups Information <- Info.group[[1]] if (lavsamplestats@ngroups > 1) { for (g in 2:lavsamplestats@ngroups) { Information <- Information + Info.group[[g]] } } # NOTE: for MML and PML, we get 'total' information (instead of unit) divide # by 'N' for MML and PML. For weighted sample, use the sum of weights # instead of sample size # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> if (lavmodel@estimator == "PML" || lavmodel@estimator == "MML") { if (!.hasSlot(lavdata, "sampling.weights") || length(lavdata@sampling.weights) == 0) { the_N <- lavsamplestats@ntotal } else { the_N <- sum(unlist(lavdata@weights)) } Information <- Information / the_N for (g in 1:lavsamplestats@ngroups) { B0.group[[g]] <- B0.group[[g]] / the_N } } # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # augmented information? if (augmented) { Information <- lav_model_information_augment_invert( lavmodel = lavmodel, information = Information, check.pd = check.pd, inverted = inverted, use.ginv = use.ginv ) } if (extra) { attr(Information, "B0.group") <- B0.group attr(Information, "Delta") <- Delta attr(Information, "WLS.V") <- B1 } Information } # create augmented information matrix (if needed), and take the inverse # (if inverted = TRUE), returning only the [1:npar, 1:npar] elements # # rm.idx is used by lav_sam_step2_se; it used when the structural model # contains more parameters than the joint model; therefore, information # will be 'too small', and we need to remove some columns in H lav_model_information_augment_invert <- function(lavmodel = NULL, information = NULL, inverted = FALSE, check.pd = FALSE, use.ginv = FALSE, rm.idx = integer(0L)) { npar <- nrow(information) is.augmented <- FALSE # handle constraints if (nrow(lavmodel@con.jac) > 0L) { H <- lavmodel@con.jac if (length(rm.idx) > 0L) { H <- H[, -rm.idx, drop = FALSE] } inactive.idx <- attr(H, "inactive.idx") lambda <- lavmodel@con.lambda # lagrangean coefs if (length(inactive.idx) > 0L) { H <- H[-inactive.idx, , drop = FALSE] lambda <- lambda[-inactive.idx] } if (nrow(H) > 0L) { is.augmented <- TRUE H0 <- matrix(0, nrow(H), nrow(H)) H10 <- matrix(0, ncol(information), nrow(H)) DL <- 2 * diag(lambda, nrow(H), nrow(H)) # FIXME: better include inactive + slacks?? # INFO <- information # or INFO <- information + crossprod(H) E3 <- rbind( cbind(INFO, H10, t(H)), cbind(t(H10), DL, H0), cbind(H, H0, H0) ) information <- E3 } } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { H <- t(lav_matrix_orthogonal_complement(lavmodel@ceq.simple.K)) if (length(rm.idx) > 0L) { H <- H[, -rm.idx, drop = FALSE] } if (nrow(H) > 0L) { is.augmented <- TRUE H0 <- matrix(0, nrow(H), nrow(H)) H10 <- matrix(0, ncol(information), nrow(H)) INFO <- information + crossprod(H) E2 <- rbind( cbind(INFO, t(H)), cbind(H, H0) ) information <- E2 } } if (check.pd) { eigvals <- eigen(information, symmetric = TRUE, only.values = TRUE )$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettext( "information matrix is not positive definite; the model may not be identified")) } } if (inverted) { if (is.augmented) { # note: default tol in MASS::ginv is sqrt(.Machine$double.eps) # which seems a bit too conservative # from 0.5-20, we changed this to .Machine$double.eps^(3/4) information <- try( MASS::ginv(information, tol = .Machine$double.eps^(3 / 4) )[1:npar, 1:npar, drop = FALSE ], silent = TRUE ) } else { if (use.ginv) { information <- try( MASS::ginv(information, tol = .Machine$double.eps^(3 / 4) ), silent = TRUE ) } else { information <- try(solve(information), silent = TRUE) } } } # augmented/inverted information information } lav_model_information_expected_2l <- function(lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavimplied = NULL, lavh1 = NULL, g = 1L) { # see Yuan & Bentler (2002), p.549 top line # I.j = nj. Delta.mu' sigma.j.inv + # Delta.sigma.j' W.j Delta.sigma.j + # (nj-1) Delta.sigma.w' W.w Delta.sigma.w # # where # - sigma.j = sigma.w + n.j * sigma.b # - W.w = 1/2 * D'(sigma.w.inv %x% sigma.w.inv) D # - W.j = 1/2 * D'(sigma.j.inv %x% sigma.j.inv) D } lavaan/R/lav_mvreg.R0000644000176200001440000004720614627656441014052 0ustar liggesusers# the multivariate linear model using maximum likelihood # 1) loglikelihood (from raw data, or sample statistics) # 2) derivatives with respect to Beta, res.cov, vech(res.cov) # 3) casewise scores with respect to Beta, vech(res.cov), Beta + vech(res.cov) # 4) hessian Beta + vech(res.cov) # 5) information h0 Beta + vech(res.cov) # 5a: (unit) expected information # 5b: (unit) observed information # 5c: (unit) first.order information # YR 24 Mar 2016: first version # YR 20 Jan 2017: removed added 'N' in many equations, to be consistent with # lav_mvnorm_* # YR 18 Okt 2018: add 'information' functions, change arguments # (X -> eXo, Sigma -> res.cov, Beta -> res.int + res.slopes) # 1. loglikelihood # 1a. input is raw data lav_mvreg_loglik_data <- function(Y = NULL, eXo = NULL, # no intercept Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, casewise = FALSE, Sinv.method = "eigen") { Y <- unname(Y) Q <- NCOL(Y) N <- NROW(Y) X <- cbind(1, unname(eXo)) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } if (casewise) { LOG.2PI <- log(2 * pi) # invert res.cov if (Sinv.method == "chol") { cS <- chol(res.cov) icS <- backsolve(cS, diag(Q)) logdet <- -2 * sum(log(diag(icS))) RES <- Y - X %*% Beta DIST <- rowSums((RES %*% icS)^2) } else { res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(res.cov.inv, "logdet") RES <- Y - X %*% Beta DIST <- rowSums(RES %*% res.cov.inv * RES) } loglik <- -(Q * LOG.2PI + logdet + DIST) / 2 } else { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(res.cov.inv, "logdet") RES <- Y - X %*% Beta # TOTAL <- TR( (Y - X%*%Beta) %*% res.cov.inv %*% t(Y - X%*%Beta) ) TOTAL <- sum(rowSums(RES %*% res.cov.inv * RES)) loglik <- -(N * Q / 2) * log(2 * pi) - (N / 2) * logdet - (1 / 2) * TOTAL } loglik } # 2b. input are sample statistics (res.int, res.slopes, res.cov, N) only lav_mvreg_loglik_samplestats <- function(sample.res.int = NULL, sample.res.slopes = NULL, sample.res.cov = NULL, sample.mean.x = NULL, sample.cov.x = NULL, sample.nobs = NULL, Beta = NULL, # optional res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Q <- NCOL(sample.res.cov) N <- sample.nobs LOG.2PI <- log(2 * pi) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # construct 'saturated' (sample-based) B sample.B <- rbind(matrix(sample.res.int, nrow = 1), t(sample.res.slopes)) # construct sample.xx = 1/N*crossprod(X1) (including intercept) sample.xx <- rbind( cbind(1, matrix(sample.mean.x, nrow = 1, )), cbind( matrix(sample.mean.x, ncol = 1), sample.cov.x + tcrossprod(sample.mean.x) ) ) # res.cov.inv if (is.null(res.cov.inv)) { res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = TRUE, Sinv.method = Sinv.method ) logdet <- attr(res.cov.inv, "logdet") } else { logdet <- attr(res.cov.inv, "logdet") if (is.null(logdet)) { # compute - ln|res.cov.inv| ev <- eigen(res.cov.inv, symmetric = TRUE, only.values = TRUE) logdet <- -1 * sum(log(ev$values)) } } # tr(res.cov^{-1} %*% S) DIST1 <- sum(res.cov.inv * sample.res.cov) # tr( res.cov^{-1} (B-beta)' X'X (B-beta) Diff <- sample.B - Beta DIST2 <- sum(res.cov.inv * crossprod(Diff, sample.xx) %*% Diff) loglik <- -(N / 2) * (Q * log(2 * pi) + logdet + DIST1 + DIST2) loglik } # 2. Derivatives # 2a. derivative logl with respect to Beta (=intercepts and slopes) lav_mvreg_dlogl_dbeta <- function(Y = NULL, eXo = NULL, Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) X <- cbind(1, unname(eXo)) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # substract 'X %*% Beta' from Y RES <- Y - X %*% Beta # derivative dbeta <- as.numeric(t(X) %*% RES %*% res.cov.inv) dbeta } # 2b: derivative logl with respect to res.cov (full matrix, ignoring symmetry) lav_mvreg_dlogl_drescov <- function(Y = NULL, eXo = NULL, Beta = NULL, res.cov = NULL, res.int = NULL, res.slopes = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) N <- NROW(Y) X <- cbind(1, unname(eXo)) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.in if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # substract 'X %*% Beta' from Y RES <- Y - X %*% Beta # W.tilde W.tilde <- crossprod(RES) / N # derivative dres.cov <- -(N / 2) * (res.cov.inv - (res.cov.inv %*% W.tilde %*% res.cov.inv)) dres.cov } # 2c: derivative logl with respect to vech(res.cov) lav_mvreg_dlogl_dvechrescov <- function(Y = NULL, eXo = NULL, Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) N <- NROW(Y) X <- cbind(1, unname(eXo)) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # substract 'X %*% Beta' from Y RES <- Y - X %*% Beta # W.tilde W.tilde <- crossprod(RES) / N # derivative dres.cov <- -(N / 2) * (res.cov.inv - (res.cov.inv %*% W.tilde %*% res.cov.inv)) dvechres.cov <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(dres.cov)) )) dvechres.cov } # 3. Casewise scores # 3a: casewise scores with respect to Beta (=intercepts and slopes) # column order: Y1_int, Y1_x1, Y1_x2, ...| Y2_int, Y2_x1, Y2_x2, ... | lav_mvreg_scores_beta <- function(Y = NULL, eXo = NULL, Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) Q <- NCOL(Y) X <- cbind(1, unname(eXo)) P <- NCOL(X) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # substract Mu RES <- Y - X %*% Beta # post-multiply with res.cov.inv RES <- RES %*% res.cov.inv SC.Beta <- X[, rep(1:P, times = Q), drop = FALSE] * RES[, rep(1:Q, each = P), drop = FALSE] SC.Beta } # 3b: casewise scores with respect to vech(res.cov) lav_mvreg_scores_vech_sigma <- function(Y = NULL, eXo = NULL, Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) Q <- NCOL(Y) X <- cbind(1, unname(eXo)) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # vech(res.cov.inv) isigma <- lav_matrix_vech(res.cov.inv) # substract X %*% Beta RES <- Y - X %*% Beta # postmultiply with res.cov.inv RES <- RES %*% res.cov.inv # tcrossprod idx1 <- lav_matrix_vech_col_idx(Q) idx2 <- lav_matrix_vech_row_idx(Q) Z <- RES[, idx1] * RES[, idx2] # substract isigma from each row SC <- t(t(Z) - isigma) # adjust for vech (and avoiding the 1/2 factor) SC[, lav_matrix_diagh_idx(Q)] <- SC[, lav_matrix_diagh_idx(Q)] / 2 SC } # 3c: casewise scores with respect to beta + vech(res.cov) lav_mvreg_scores_beta_vech_sigma <- function(Y = NULL, eXo = NULL, Beta = NULL, res.int = NULL, res.slopes = NULL, res.cov = NULL, Sinv.method = "eigen", res.cov.inv = NULL) { Y <- unname(Y) Q <- NCOL(Y) X <- cbind(1, unname(eXo)) P <- NCOL(X) # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # vech(res.cov.inv) isigma <- lav_matrix_vech(res.cov.inv) # substract X %*% Beta RES <- Y - X %*% Beta # postmultiply with res.cov.inv RES <- RES %*% res.cov.inv SC.Beta <- X[, rep(1:P, times = Q), drop = FALSE] * RES[, rep(1:Q, each = P), drop = FALSE] # tcrossprod idx1 <- lav_matrix_vech_col_idx(Q) idx2 <- lav_matrix_vech_row_idx(Q) Z <- RES[, idx1] * RES[, idx2] # substract isigma from each row SC <- t(t(Z) - isigma) # adjust for vech (and avoiding the 1/2 factor) SC[, lav_matrix_diagh_idx(Q)] <- SC[, lav_matrix_diagh_idx(Q)] / 2 cbind(SC.Beta, SC) } # 4. hessian of logl # 4a. hessian logl Beta and vech(res.cov) from raw data lav_mvreg_logl_hessian_data <- function(Y = NULL, eXo = NULL, # no int Beta = NULL, # int+slopes res.int = NULL, res.slopes = NULL, res.cov = NULL, res.cov.inv = NULL, Sinv.method = "eigen") { # sample size N <- NROW(Y) # observed information observed <- lav_mvreg_information_observed_data( Y = Y, eXo = eXo, Beta = Beta, res.int = res.int, res.slopes = res.slopes, res.cov = res.cov, res.cov.inv = res.cov.inv, Sinv.method = Sinv.method ) # hessian -N * observed } # 4b. hessian logl Beta and vech(res.cov) from samplestats lav_mvreg_logl_hessian_samplestats <- function(sample.res.int = NULL, sample.res.slopes = NULL, sample.res.cov = NULL, sample.mean.x = NULL, sample.cov.x = NULL, sample.nobs = NULL, Beta = NULL, # int + slopes res.int = NULL, # intercepts only res.slopes = NULL, # slopes only (y x x) res.cov = NULL, # res.cov Sinv.method = "eigen", res.cov.inv = NULL) { # sample size N <- sample.nobs # information observed <- lav_mvreg_information_observed_samplestats( sample.res.int = sample.res.int, sample.res.slopes = sample.res.slopes, sample.res.cov = sample.res.cov, sample.mean.x = sample.mean.x, sample.cov.x = sample.cov.x, Beta = Beta, res.int = res.int, res.slopes = res.slopes, res.cov = res.cov, Sinv.method = Sinv.method, res.cov.inv = res.cov.inv ) # hessian -N * observed } # Information h0 # 5a: unit expected information h0 Beta and vech(res.cov) lav_mvreg_information_expected <- function(Y = NULL, # not used eXo = NULL, # not used sample.mean.x = NULL, sample.cov.x = NULL, sample.nobs = NULL, Beta = NULL, # not used res.int = NULL, # not used res.slopes = NULL, # not used res.cov = NULL, res.cov.inv = NULL, Sinv.method = "eigen") { eXo <- unname(eXo) # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } # N if (is.null(sample.nobs)) { sample.nobs <- nrow(eXo) # hopefully not NULL either } else { N <- sample.nobs } # sample.mean.x + sample.cov.x if (is.null(sample.mean.x)) { sample.mean.x <- base::.colMeans(eXo, m = NROW(eXo), n = NCOL(eXo)) } if (is.null(sample.cov.x)) { sample.cov.x <- lav_matrix_cov(eXo) } # construct sample.xx = 1/N*crossprod(X1) (including intercept) sample.xx <- rbind( cbind(1, matrix(sample.mean.x, nrow = 1, )), cbind( matrix(sample.mean.x, ncol = 1), sample.cov.x + tcrossprod(sample.mean.x) ) ) # expected information I11 <- res.cov.inv %x% sample.xx I22 <- 0.5 * lav_matrix_duplication_pre_post(res.cov.inv %x% res.cov.inv) lav_matrix_bdiag(I11, I22) } # 5b: unit observed information h0 lav_mvreg_information_observed_data <- function(Y = NULL, eXo = NULL, # no int Beta = NULL, # int+slopes res.int = NULL, res.slopes = NULL, res.cov = NULL, res.cov.inv = NULL, Sinv.method = "eigen") { # create sample statistics Y <- unname(Y) X1 <- cbind(1, unname(eXo)) N <- NROW(Y) # find 'B' QR <- qr(X1) sample.B <- qr.coef(QR, Y) sample.res.int <- as.numeric(sample.B[1, ]) sample.res.slopes <- t(sample.B[-1, , drop = FALSE]) # transpose! sample.res.cov <- cov(qr.resid(QR, Y)) * (N - 1) / N sample.mean.x <- base::.colMeans(eXo, m = NROW(eXo), n = NCOL(eXo)) sample.cov.x <- lav_matrix_cov(eXo) lav_mvreg_information_observed_samplestats( sample.res.int = sample.res.int, sample.res.slopes = sample.res.slopes, sample.res.cov = sample.res.cov, sample.mean.x = sample.mean.x, sample.cov.x = sample.cov.x, Beta = Beta, res.int = res.int, res.slopes = res.slopes, res.cov = res.cov, Sinv.method = Sinv.method, res.cov.inv = res.cov.inv ) } # 5b-bis: observed information h0 from sample statistics lav_mvreg_information_observed_samplestats <- function(sample.res.int = NULL, sample.res.slopes = NULL, sample.res.cov = NULL, sample.mean.x = NULL, sample.cov.x = NULL, Beta = NULL, # int + slopes res.int = NULL, # intercepts only res.slopes = NULL, # slopes only (y x x) res.cov = NULL, # res.cov Sinv.method = "eigen", res.cov.inv = NULL) { # construct model-implied Beta if (is.null(Beta)) { Beta <- rbind(matrix(res.int, nrow = 1), t(res.slopes)) } # construct 'saturated' (sample-based) B sample.B <- rbind(matrix(sample.res.int, nrow = 1), t(sample.res.slopes)) # construct sample.xx = 1/N*crossprod(X1) (including intercept) sample.xx <- rbind( cbind(1, matrix(sample.mean.x, nrow = 1, )), cbind( matrix(sample.mean.x, ncol = 1), sample.cov.x + tcrossprod(sample.mean.x) ) ) # W.tilde = S + t(B - Beta) %*% (1/N)*X'X %*% (B - Beta) W.tilde <- (sample.res.cov + t(sample.B - Beta) %*% sample.xx %*% (sample.B - Beta)) # res.cov.inv if (is.null(res.cov.inv)) { # invert res.cov res.cov.inv <- lav_matrix_symmetric_inverse( S = res.cov, logdet = FALSE, Sinv.method = Sinv.method ) } H11 <- res.cov.inv %x% sample.xx H21 <- lav_matrix_duplication_pre(res.cov.inv %x% (res.cov.inv %*% (crossprod(sample.B - Beta, sample.xx)))) H12 <- t(H21) AAA <- res.cov.inv %*% (2 * W.tilde - res.cov) %*% res.cov.inv H22 <- (1 / 2) * lav_matrix_duplication_pre_post(res.cov.inv %x% AAA) out <- rbind( cbind(H11, H12), cbind(H21, H22) ) out } # 5c: unit first-order information h0 lav_mvreg_information_firstorder <- function(Y = NULL, eXo = NULL, # no int Beta = NULL, # int+slopes res.int = NULL, res.slopes = NULL, res.cov = NULL, res.cov.inv = NULL, Sinv.method = "eigen") { N <- NROW(Y) # scores SC <- lav_mvreg_scores_beta_vech_sigma( Y = Y, eXo = eXo, Beta = Beta, res.int = res.int, res.slopes = res.slopes, res.cov = res.cov, Sinv.method = Sinv.method, res.cov.inv = res.cov.inv ) crossprod(SC) / N } # 6: inverted information h0 # 6a: inverted unit expected information h0 Beta and vech(res.cov) # # lav_mvreg_inverted_information_expected <- function(Y = NULL, # unused! # } lavaan/R/lav_lavaan_step13_vcov.R0000644000176200001440000000751014627656441016422 0ustar liggesuserslav_lavaan_step13_vcov_boot <- function(lavoptions = NULL, lavmodel = NULL, lavsamplestats = NULL, lavdata = NULL, lavpartable = NULL, lavcache = NULL, lavimplied = NULL, lavh1 = NULL, x = NULL) { # # # # # # # # # # # # # # # # # # 13. lavvcov + lavboot # # # # # # # # # # # # # # # # # # # set VCOV to NULL # if lavoptions$se not "none", "external", "twostep" and # lavmodel@nx.free > 0L and x converged or optim.method == "none" # compute VCOV via lav_model_vcov # if attribute BOOT.COEFF of VCOV not NULL, store it in lavboot$coef # lavvcov <- list(se = lavoptions$se, information = lavoptions$information, # vcov = VCOV1) # where VCOV1 = VCOV without attributes (except dim) or ... # NULL if lavoptions$store.vcov FALSE or # store.vcov=="default" and rotation="none" # if lavoptions$se == "external" # if lavpartable$se NULL # lavpartable$se <- lav_model_vcov_se(..., VCOV=NULL, BOOT=NULL) # + ** warning ** # if lavpartable not "external" or "none" or "twostep" # lavpartable$se <- lav_model_vcov_se(...) VCOV <- NULL # nolint if (lavoptions$se != "none" && lavoptions$se != "external" && lavoptions$se != "twostep" && # (.hasSlot(lavmodel, "nefa") && # (lavmodel@nefa == 0L || # (lavmodel@nefa > 0L && lavoptions$rotation == "none") || # (lavmodel@nefa > 0L && lavoptions$rotation.se == "delta") # ) # ) && lavmodel@nx.free > 0L && (attr(x, "converged") || lavoptions$optim.method == "none")) { if (lav_verbose()) { cat("computing VCOV for se =", lavoptions$se, "...") } VCOV <- lav_model_vcov( # nolint lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1 ) if (lav_verbose()) { cat(" done.\n") } } # VCOV # extract bootstrap results (if any) if (!is.null(attr(VCOV, "BOOT.COEF"))) { lavboot <- list() lavboot$coef <- attr(VCOV, "BOOT.COEF") } else { lavboot <- list() } # store VCOV in vcov # strip all attributes but 'dim' tmp.attr <- attributes(VCOV) VCOV1 <- VCOV # nolint attributes(VCOV1) <- tmp.attr["dim"] # nolint # store vcov? new in 0.6-6 if (!is.null(lavoptions$store.vcov) && !is.null(VCOV1)) { if (is.logical(lavoptions$store.vcov) && !lavoptions$store.vcov) { VCOV1 <- NULL # nolint } if (is.character(lavoptions$store.vcov) && lavoptions$rotation == "none" && lavoptions$store.vcov == "default" && ncol(VCOV1) > 200L) { VCOV1 <- NULL # nolint } } lavvcov <- list( se = lavoptions$se, information = lavoptions$information[1], vcov = VCOV1 ) # store se in partable if (lavoptions$se == "external") { if (is.null(lavpartable$se)) { lavpartable$se <- lav_model_vcov_se( lavmodel = lavmodel, lavpartable = lavpartable, VCOV = NULL, BOOT = NULL ) lav_msg_warn(gettext( "se = \"external\" but parameter table does not contain a `se' column")) } } else if (lavoptions$se %in% c("none", "twostep")) { # do nothing } else { lavpartable$se <- lav_model_vcov_se( lavmodel = lavmodel, lavpartable = lavpartable, VCOV = VCOV, BOOT = lavboot$coef ) } list( lavpartable = lavpartable, lavvcov = lavvcov, VCOV = VCOV, lavmodel = lavmodel, lavboot = lavboot ) } lavaan/R/xxx_lavaanList.R0000644000176200001440000003523114627656441015070 0ustar liggesusers# lavaanList: fit the *same* model, on different datasets # YR - 29 Jun 2016 # YR - 27 Jan 2017: change lavoptions; add dotdotdot to each call # TDJ - 23 Aug 2018: change wrappers to preserve arguments from match.call() lavaanList <- function(model = NULL, # model dataList = NULL, # list of datasets dataFunction = NULL, # generating function dataFunction.args = list(), # optional arguments ndat = length(dataList), # how many datasets? cmd = "lavaan", ..., store.slots = c("partable"), # default is partable FUN = NULL, # arbitrary FUN show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) { # store.slots call mc <- match.call() # check store.slots store.slots <- tolower(store.slots) if (length(store.slots) == 1L && store.slots == "all") { store.slots <- c( "timing", "partable", "data", "samplestats", "cache", "loglik", "h1", "baseline", "external", "vcov", "test", "optim", "implied" ) } # dataList or function? if (is.function(dataFunction)) { if (ndat == 0L) { lav_msg_stop(gettext("please specify number of requested datasets (ndat)")) } firstData <- do.call(dataFunction, args = dataFunction.args) # dataList <- vector("list", length = ndat) } else { firstData <- dataList[[1]] } # check data if (is.matrix(firstData)) { # check if we have column names? NAMES <- colnames(firstData) if (is.null(NAMES)) { lav_msg_stop(gettext("data is a matrix without column names")) } } else if (inherits(firstData, "data.frame")) { # check? } else { lav_msg_stop(gettext("(generated) data is not a data.frame (or a matrix)")) } # parallel (see boot package) if (missing(parallel)) { # parallel <- getOption("boot.parallel", "no") parallel <- "no" } parallel <- match.arg(parallel) have_mc <- have_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { have_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") { have_snow <- TRUE } if (!have_mc && !have_snow) { ncpus <- 1L } loadNamespace("parallel") } # dot dot dot dotdotdot <- list(...) # if 'model' is a lavaan object (perhaps from lavSimulate), no need to # call `cmd' if (inherits(model, "lavaan")) { FIT <- model } else { # adapt for FIT # dotdotdotFIT <- dotdotdot # dotdotdotFIT$do.fit <- TRUE # to get starting values # dotdotdotFIT$se <- "none" # dotdotdotFIT$test <- "none" # initial model fit, using first dataset FIT <- do.call(cmd, args = c(list( model = model, data = firstData ), dotdotdot) ) } lavoptions <- FIT@Options lavmodel <- FIT@Model lavpartable <- FIT@ParTable lavpta <- FIT@pta # remove any options in lavoptions from dotdotdot if (length(dotdotdot) > 0L) { rm.idx <- which(names(dotdotdot) %in% names(lavoptions)) if (length(rm.idx) > 0L) { dotdotdot <- dotdotdot[-rm.idx] } } # remove start/est/se columns from lavpartable lavpartable$start <- lavpartable$est <- lavpartable$se <- NULL # empty slots timingList <- ParTableList <- DataList <- SampleStatsList <- CacheList <- vcovList <- testList <- optimList <- h1List <- loglikList <- baselineList <- impliedList <- funList <- list() # prepare store.slotsd slots if ("timing" %in% store.slots) { timingList <- vector("list", length = ndat) } if ("partable" %in% store.slots) { ParTableList <- vector("list", length = ndat) } if ("data" %in% store.slots) { DataList <- vector("list", length = ndat) } if ("samplestats" %in% store.slots) { SampleStatsList <- vector("list", length = ndat) } if ("cache" %in% store.slots) { CacheList <- vector("list", length = ndat) } if ("vcov" %in% store.slots) { vcovList <- vector("list", length = ndat) } if ("test" %in% store.slots) { testList <- vector("list", length = ndat) } if ("optim" %in% store.slots) { optimList <- vector("list", length = ndat) } if ("implied" %in% store.slots) { impliedList <- vector("list", length = ndat) } if ("loglik" %in% store.slots) { loglikList <- vector("list", length = ndat) } if ("h1" %in% store.slots) { h1List <- vector("list", length = ndat) } if ("baseline" %in% store.slots) { baselineList <- vector("list", length = ndat) } if (!is.null(FUN)) { funList <- vector("list", length = ndat) } # single run fn <- function(i) { if (show.progress) { cat(" ... data set number:", sprintf("%4d", i)) } # get new dataset if (i == 1L) { DATA <- firstData } else if (is.function(dataFunction)) { DATA <- do.call(dataFunction, args = dataFunction.args) } else if (is.list(dataList)) { DATA <- dataList[[i]] } # if categorical, check if we have enough response categories # for each ordered variables in DATA data.ok.flag <- TRUE if (FIT@Model@categorical) { # expected nlev ord.idx <- unique(unlist(FIT@pta$vidx$ov.ord)) NLEV.exp <- FIT@Data@ov$nlev[ord.idx] # observed nlev NLEV.obs <- sapply( DATA[, unique(unlist(FIT@pta$vnames$ov.ord)), drop = FALSE ], function(x) length(unique(na.omit(x))) ) wrong.idx <- which(NLEV.exp - NLEV.obs != 0) if (length(wrong.idx) > 0L) { data.ok.flag <- FALSE } } # adapt lavmodel for this new dataset # - starting values will be different # - ov.x variances/covariances # FIXME: can we not make the changes internally? # if(lavmodel@fixed.x && length(vnames(lavpartable, "ov.x")) > 0L) { # for(g in 1:FIT@Data@ngroups) { # # } lavmodel <- NULL # } # fit model with this (new) dataset if (data.ok.flag) { if (cmd %in% c("lavaan", "sem", "cfa", "growth")) { # lavoptions$start <- FIT # FIXME: needed? lavobject <- try( do.call("lavaan", args = c( list( slotOptions = lavoptions, slotParTable = lavpartable, slotModel = lavmodel, # start = FIT, data = DATA ), dotdotdot ) ), silent = TRUE ) } else if (cmd == "fsr") { # extract fs.method and fsr.method from dotdotdot if (!is.null(dotdotdot$fs.method)) { fs.method <- dotdotdot$fs.method } else { fs.method <- formals(fsr)$fs.method # default } if (!is.null(dotdotdot$fsr.method)) { fsr.method <- dotdotdot$fsr.method } else { fsr.method <- formals(fsr)$fsr.method # default } lavoptions$start <- FIT # FIXME: needed? lavobject <- try( do.call("fsr", args = c( list( slotOptions = lavoptions, slotParTable = lavpartable, slotModel = lavmodel, # start = FIT, data = DATA, cmd = "lavaan", fs.method = fs.method, fsr.method = fsr.method ), dotdotdot ) ), silent = TRUE ) } else { lav_msg_stop(gettext("unknown cmd:"), cmd) } } # data.ok.flag RES <- list( ok = FALSE, timing = NULL, ParTable = NULL, Data = NULL, SampleStats = NULL, vcov = NULL, test = NULL, optim = NULL, implied = NULL, baseline = NULL, baseline.ok = FALSE, fun = NULL ) if (data.ok.flag && inherits(lavobject, "lavaan") && lavInspect(lavobject, "converged")) { RES$ok <- TRUE if (show.progress) { cat( " OK -- niter = ", sprintf("%3d", lavInspect(lavobject, "iterations")), "\n" ) } # extract slots from fit if ("timing" %in% store.slots) { RES$timing <- lavobject@timing } if ("partable" %in% store.slots) { RES$ParTable <- lavobject@ParTable } if ("data" %in% store.slots) { RES$Data <- lavobject@Data } if ("samplestats" %in% store.slots) { RES$SampleStats <- lavobject@SampleStats } if ("cache" %in% store.slots) { RES$Cache <- lavobject@Cache } if ("vcov" %in% store.slots) { RES$vcov <- lavobject@vcov } if ("test" %in% store.slots) { RES$test <- lavobject@test } if ("optim" %in% store.slots) { RES$optim <- lavobject@optim } if ("implied" %in% store.slots) { RES$implied <- lavobject@implied } if ("loglik" %in% store.slots) { RES$loglik <- lavobject@loglik } if ("h1" %in% store.slots) { RES$h1 <- lavobject@h1 } if ("baseline" %in% store.slots) { RES$baseline <- lavobject@baseline if (length(lavobject@baseline) > 0L) { RES$baseline.ok <- TRUE } } # custom FUN if (!is.null(FUN)) { RES$fun <- FUN(lavobject) } } else { # failed! if (show.progress) { if (data.ok.flag) { if (inherits(lavobject, "lavaan")) { cat(" FAILED: no convergence\n") } else { cat(" FAILED: could not construct lavobject\n") print(lavobject) } } else { cat(" FAILED: nlev too low for some vars\n") } } if ("partable" %in% store.slots) { RES$ParTable <- lavpartable RES$ParTable$est <- RES$ParTable$start RES$ParTable$est[RES$ParTable$free > 0] <- as.numeric(NA) RES$ParTable$se <- numeric(length(lavpartable$lhs)) RES$ParTable$se[RES$ParTable$free > 0] <- as.numeric(NA) } if (store.failed) { tmpfile <- tempfile(pattern = "lavaanListData") datfile <- paste0(tmpfile, ".csv") write.csv(DATA, file = datfile, row.names = FALSE) if (data.ok.flag) { # or only if lavobject is of class lavaan? objfile <- paste0(tmpfile, ".RData") write(lavobject, file = objfile) } } } RES } # the next 20 lines are based on the boot package RES <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { parallel::mclapply(seq_len(ndat), fn, mc.cores = ncpus) } else if (have_snow) { list(...) # evaluate any promises if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if (RNGkind()[1L] == "L'Ecuyer-CMRG") { parallel::clusterSetRNGStream(cl, iseed = iseed) } RES <- parallel::parLapply(cl, seq_len(ndat), fn) parallel::stopCluster(cl) RES } else { parallel::parLapply(cl, seq_len(ndat), fn) } } } else { lapply(seq_len(ndat), fn) } # restructure if ("baseline" %in% store.slots) { meta <- list( ndat = ndat, ok = sapply(RES, "[[", "ok"), baseline.ok = sapply(RES, "[[", "baseline.ok"), store.slots = store.slots ) } else { meta <- list( ndat = ndat, ok = sapply(RES, "[[", "ok"), store.slots = store.slots ) } # extract store.slots slots if ("timing" %in% store.slots) { timingList <- lapply(RES, "[[", "timing") } if ("partable" %in% store.slots) { ParTableList <- lapply(RES, "[[", "ParTable") } if ("data" %in% store.slots) { DataList <- lapply(RES, "[[", "Data") } if ("samplestats" %in% store.slots) { SampleStatsList <- lapply(RES, "[[", "SampleStats") } if ("cache" %in% store.slots) { CacheList <- lapply(RES, "[[", "Cache") } if ("vcov" %in% store.slots) { vcovList <- lapply(RES, "[[", "vcov") } if ("test" %in% store.slots) { testList <- lapply(RES, "[[", "test") } if ("optim" %in% store.slots) { optimList <- lapply(RES, "[[", "optim") } if ("implied" %in% store.slots) { impliedList <- lapply(RES, "[[", "implied") } if ("h1" %in% store.slots) { h1List <- lapply(RES, "[[", "h1") } if ("loglik" %in% store.slots) { loglikList <- lapply(RES, "[[", "loglik") } if ("baseline" %in% store.slots) { baselineList <- lapply(RES, "[[", "baseline") } if (!is.null(FUN)) { funList <- lapply(RES, "[[", "fun") } # create lavaanList object lavaanList <- new("lavaanList", version = packageDescription("lavaan", fields = "Version"), call = mc, Options = lavoptions, ParTable = lavpartable, pta = lavpta, Model = lavmodel, Data = FIT@Data, # meta meta = meta, # per dataset timingList = timingList, ParTableList = ParTableList, DataList = DataList, SampleStatsList = SampleStatsList, CacheList = CacheList, vcovList = vcovList, testList = testList, optimList = optimList, impliedList = impliedList, h1List = h1List, loglikList = loglikList, baselineList = baselineList, funList = funList, external = list() ) lavaanList } semList <- function(model = NULL, dataList = NULL, dataFunction = NULL, dataFunction.args = list(), ndat = length(dataList), ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) { mc <- match.call(expand.dots = TRUE) mc$cmd <- "sem" mc[[1L]] <- quote(lavaan::lavaanList) eval(mc, parent.frame()) } cfaList <- function(model = NULL, dataList = NULL, dataFunction = NULL, dataFunction.args = list(), ndat = length(dataList), ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) { mc <- match.call(expand.dots = TRUE) mc$cmd <- "cfa" mc[[1L]] <- quote(lavaan::lavaanList) eval(mc, parent.frame()) } lavaan/R/lav_options.R0000644000176200001440000011176714627656440014430 0ustar liggesusers# initial version YR 02/08/2010 # YR 28 Jan 2017: add lavOptions(), lav_options_default() # LDW 16 Apr 2024: move lavOptions and lav_options_default to separate file # help functions for lav_options_set #### # help function to determine estimator 'group' lav_options_estimatorgroup <- function(estimator) { goal <- switch(estimator, ml = , mlf = , mlm = , mlmv = , mlmvs = , mlr = "ML", catml = "catML", dwls = , wlsm = , wlsmv = , wlsmvs = "DWLS", uls = , ulsm = , ulsmv = , ulsmvs = "ULS", none = "none", toupper(estimator) ) goal } lav_options_checkinterval <- function(x, nm, num2int) { if (num2int) x <- as.integer(x) oks <- vapply(x, function(x1) { (x1 > nm$bounds[1] || (x1 == nm$bounds[1] && nm$first.in)) && (x1 < nm$bounds[2] || (x1 == nm$bounds[2] && nm$last.in)) }, TRUE) all(oks) } lav_options_checkvalues <- function(optname, optvalue, chr) { optvalid <- names(chr) if (is.null(optvalid)) optvalid <- chr if (any(optvalid == "empty.string")) { optvalid[optvalid == "empty.string"] <- "" } optvals <- gsub("[_-]", ".", tolower(optvalue)) optvalsok <- match(optvals, optvalid) if (any(is.na(optvalsok))) { lav_msg_stop(sprintf( ngettext( length(optvalue[is.na(optvalsok)]), "invalid value in %1$s option: %2$s.", "invalid values in %1$s option: %2$s." ), optname, lav_msg_view(optvalue[is.na(optvalsok)], log.sep = "none") )) } as.vector(chr[optvalsok]) } lav_options_check <- function(opts, opt.check, subname) { # nolint opt.names <- names(opts) hiddens <- startsWith(opt.names, ".") if (any(hiddens)) { # remove hidden options temporarily opts.hidden <- opts[hiddens] opts <- opts[!hiddens] opt.names <- opt.names[!hiddens] } check.names <- names(opt.check) match.opt <- match(opt.names, check.names) if (any(is.na(match.opt))) { lav_msg_stop(gettextf( "Some option(s) unknown: %s !", lav_msg_view(opt.names[is.na(match.opt)], log.sep = "none"))) } for (j in seq_along(opts)) { opt.name <- opt.names[j] opt.value <- opts[[j]] opt.check1 <- opt.check[[match.opt[j]]] if (!is.null(attr(opt.check1, "SUB"))) { opts[[j]] <- lav_options_check(opt.value, opt.check1, paste0(opt.name, "$")) next } # check length of option value if (length(opt.value) < opt.check1$oklen[1]) { lav_msg_stop(gettextf( "Length of option '%1$s' value must be at least %2$s.", paste0(subname, opt.name), opt.check1$oklen[1])) } if (length(opt.value) > abs(opt.check1$oklen[2])) { if (opt.check1$oklen[2] > 0L) { lav_msg_stop(gettextf( "Length of option '%1$s' value must be maximum %2$s.", paste0(subname, opt.name), opt.check1$oklen[2])) } else { lav_msg_warn(gettextf( "Length of option '%1$s' value should be maximum %2$s. Only first %3$s elements used.", paste0(subname, opt.name), -opt.check1$oklen[2], -opt.check1$oklen[2])) } } if (is.null(opt.check1$bl)) opt.check1$bl <- FALSE if (!is.null(opt.check1$chr) || !is.null(opt.check1$nm) || opt.check1$bl) { if (!opt.check1$bl || !is.logical(opt.value)) { if (!is.null(opt.check1$nm) && is.numeric(opt.value)) { num2int <- FALSE if (!is.null(opt.check1$num2int)) num2int <- opt.check1$num2int if (!lav_options_checkinterval(opt.value, opt.check1$nm, num2int)) { lav_msg_stop(gettextf( "Value(s) of option %1$s out of range (%2$s)!", paste0(subname, opt.name), paste0(opt.check1$nm$bounds[1], if (opt.check1$nm$first.in) " <= " else " < ", "x", if (opt.check1$nm$last.in) " <= " else " < ", opt.check1$nm$bounds[2]))) } } if (!is.null(opt.check1$chr) && is.character(opt.value)) { opt.value <- lav_options_checkvalues(opt.name, opt.value, opt.check1$chr) opts[[j]] <- opt.value } } } } if (any(hiddens)) { # add hidden options opts <- modifyList(opts, opts.hidden) } opts } # this function collects and checks the user-provided options/arguments, # and fills in the "default" values, or changes them in an attempt to # produce a consistent set of values... # # returns a list with the named options lav_options_set <- function(opt = NULL) { # nolint # check the presence of necessary hidden options #### if (is.null(opt$.categorical) || is.null(opt$.multilevel) || is.null(opt$.clustered)) lav_msg_fixme( ".categorical, .multilevel and .clustered must be present") # get opt.default and opt.check #### if (!exists("opt.check", lavaan_cache_env)) lav_options_default() opt.check <- get("opt.check", lavaan_cache_env) if (lav_debug()) { cat("lavaan DEBUG: lavaanOptions IN\n") str(opt) opt$optim.partrace <- TRUE } # check options with definitions #### opt <- lav_options_check(opt, opt.check, "") # check option 'start' if (is.character(opt$start) && all(opt$start != c("default", "simple", "est"))) lav_msg_stop(gettext( "start option must be 'default', 'simple', 'est' or a parametertable")) # first of all: set estimator #### if (opt$estimator == "default") { if (opt$.categorical) { opt$estimator <- "wlsmv" } else { opt$estimator <- "ml" } } # defaults for opt$sample.cov.rescale if (opt$sample.cov.rescale == "default") { opt$sample.cov.rescale <- switch( opt$estimator, dls = TRUE, fabin2 = , fabin3 = , mgm = , js = , jsa = , bentler1982 = TRUE, miiv = TRUE, "default" ) } # option defaults specific for mimic=... opt <- lav_options_mimic(opt) # store opt$estimator as estimator.orig in upper case opt$estimator.orig <- toupper(opt$estimator) # rename names of test statistics if needed, check for invalid values #### opt$test <- lav_test_rename(opt$test, check = TRUE) # same for scaled.test opt$scaled.test <- lav_test_rename(opt$scaled.test, check = TRUE) # rename names of se values, check illegal combinations se/estimator #### # pass-through function: may change value of information # for backwards compatibility (eg if se = "expected") opt <- lav_options_check_se(opt) # do.fit FALSE implies se="none" and test="none" (unless not default) #### if (!opt$do.fit) { if (opt$se == "default") opt$se <- "none" if (opt$test[1] == "default") opt$test <- "none" } # marker.int.fixed #### if (opt$marker.int.zero) { opt$meanstructure <- TRUE opt$int.ov.free <- TRUE if ((is.logical(opt$effect.coding) && opt$effect.coding) || (is.character(opt$effect.coding) && nchar(opt$effect.coding) > 0L)) { lav_msg_stop(gettext( "effect coding cannot be combined with marker.int.zero = TRUE option")) } if (opt$std.lv) { lav_msg_stop(gettext( "std.lv = TRUE cannot be combined with marker.int.zero = TRUE")) } } # group.equal and group.partial #### if (length(opt$group.equal) > 0L && opt$group.equal[1] == "none") { opt$group.equal <- character(0) } else if (is.null(opt$group.equal) || all(nchar(opt$group.equal) == 0L)) { opt$group.equal <- character(0) } if (is.null(opt$group.partial) || all(nchar(opt$group.partial) == 0L)) { opt$group.partial <- character(0) } else if (length(opt$group.partial) == 0) { # nothing to do } else { # strip white space opt$group.partial <- gsub("[[:space:]]+", "", opt$group.partial) } # if categorical, and group.equal contains "intercepts", also add # thresholds (and vice versa) if (opt$.categorical && any("intercepts" == opt$group.equal)) { opt$group.equal <- unique(c(opt$group.equal, "thresholds")) } if (opt$.categorical && any("thresholds" == opt$group.equal)) { opt$group.equal <- unique(c(opt$group.equal, "intercepts")) } # clustered #### # brute-force override (for now) if (opt$.clustered && !opt$.multilevel) { opt$meanstructure <- TRUE if (opt$estimator == "mlr") { opt$estimator <- "ml" opt$test <- "yuan.bentler.mplus" opt$se <- "robust.cluster" } else if (opt$estimator == "mlm") { opt$estimator <- "ml" opt$test <- "satorra.bentler" opt$se <- "robust.cluster.sem" } else if (opt$.categorical & opt$estimator != "pml") { opt$test <- "satorra.bentler" opt$se <- "robust.cluster.sem" } # test #### if (length(opt$test) == 1L && opt$test == "default") { opt$test <- "yuan.bentler.mplus" } else if (all(opt$test %in% c( "none", "standard", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus" ))) { # nothing to do } else if (opt$se == "robust") { opt$test <- "yuan.bentler.mplus" } else { lav_msg_stop( gettextf("`test' argument must one of %s in the clustered case", lav_msg_view(c("none", "yuan.bentler", "yuan.bentler.mplus", "satorra.bentler"), log.sep = "or"))) } # se #### if (opt$se == "default") { opt$se <- "robust.cluster" } else if (any(opt$se == c("none", "robust.cluster", "robust.cluster.sem"))) { # nothing to do } else if (opt$se == "robust") { opt$se <- "robust.cluster" } # information #### if (opt$information[1] == "default") { if (opt$se == "robust.cluster" && opt$estimator == "ml") { opt$information[1] <- "observed" } else { opt$information[1] <- "expected" } } if (length(opt$information) > 1L && opt$information[2] == "default") { if (opt$se == "robust.cluster") { opt$information[2] <- "observed" } else { opt$information[2] <- "expected" } } } # multilevel #### # brute-force override (for now) if (opt$.multilevel) { opt$meanstructure <- TRUE # test if (length(opt$test) == 1L && opt$test == "default") { # ok, will be set later } else if (all(opt$test %in% c("none", "standard", "yuan.bentler", "yuan.bentler.mplus"))) { # nothing to do } else { lav_msg_stop(gettextf( "`test' argument must one of %s in the multilevel case", lav_msg_view(c("none", "standard", "yuan.bentler", "yuan.bentler.mplus"), log.sep = "or"))) } # se if (opt$se == "default") { # ok, will be set later } else if (any(opt$se == c( "none", "standard", "robust.huber.white", "sandwich"))) { # nothing to do } else if (opt$se == "robust") { opt$se <- "robust.huber.white" } else { lav_msg_stop(gettextf( "`se' argument must one of %s in the multilevel case", lav_msg_view(c("none", "standard", "robust.huber.white"), log.sep = "or"))) } # information if (opt$information[1] == "default") { opt$information[1] <- "observed" } if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } } # missing #### if (opt$missing == "default") { opt$missing <- "listwise" } else if (opt$missing == "ml") { if (opt$.categorical) { lav_msg_stop(gettextf( "missing = %s not available in the categorical setting", dQuote(opt$missing))) } if (any(opt$estimator == c( "mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", "uls", "ulsm", "ulsmv", "pml", "dls" ))) { lav_msg_stop(gettextf( "missing=%1$s is not allowed for estimator %2$s", dQuote(opt$missing), dQuote(lav_options_estimatorgroup(opt$estimator)))) } } else if (opt$missing == "ml.x") { if (opt$.categorical) { lav_msg_stop(gettextf( "missing = %s not available in the categorical setting", dQuote(opt$missing))) } if (any(opt$estimator == c( "mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", "uls", "ulsm", "ulsmv", "pml", "dls" ))) { lav_msg_stop(gettextf( "missing=%1$s is not allowed for estimator %2$s", dQuote(opt$missing), dQuote(lav_options_estimatorgroup(opt$estimator)))) } } else if (opt$missing == "two.stage") { if (opt$.categorical) { lav_msg_stop(gettextf( "missing = %s not available in the categorical setting", dQuote(opt$missing))) } if (any(opt$estimator == c( "mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", "uls", "ulsm", "ulsmv", "pml", "mml", "dls" ))) { lav_msg_stop(gettextf( "missing=%1$s is not allowed for estimator %2$s", dQuote(opt$missing), dQuote(lav_options_estimatorgroup(opt$estimator)))) } } else if (opt$missing == "robust.two.stage") { if (opt$.categorical) { lav_msg_stop(gettextf( "missing = %s not available in the categorical setting", dQuote(opt$missing))) } if (any(opt$estimator == c( "mlm", "mlmv", "gls", "wls", "wlsm", "wlsmv", "uls", "ulsm", "ulsmv", "pml", "mml", "dls" ))) { lav_msg_stop(gettextf( "missing=%1$s is not allowed for estimator %2$s", dQuote(opt$missing), dQuote(lav_options_estimatorgroup(opt$estimator)))) } } else if (opt$missing == "doubly.robust") { if (opt$estimator != "pml") { lav_msg_stop(gettextf( "missing=%s option only available for estimator PML", dQuote(opt$missing))) } } # check missing #### if (any(opt$missing == c("ml", "ml.x")) && opt$se == "robust.sem") { lav_msg_warn(gettextf( "missing will be set to %1$s for se = %2$s.", dQuote("listwise"), dQuote(opt$se))) opt$missing <- "listwise" } if (any(opt$missing == c("ml", "ml.x")) && any(opt$test %in% c( "satorra.bentler", "mean.var.adjusted", "scaled.shifted" ))) { lav_msg_warn(gettextf( "missing will be set to %s for satorra.bentler style test", dQuote("listwise"))) opt$missing <- "listwise" } # checks if missing = "two.stage" or "robust.two.stage" #### if (any(opt$missing == c("two.stage", "robust.two.stage"))) { opt$meanstructure <- TRUE # se if (opt$se == "default") { if (opt$missing == "two.stage") { opt$se <- "two.stage" } else { opt$se <- "robust.two.stage" } } else if (opt$missing == "two.stage" && opt$se == "two.stage") { # nothing to do } else if (opt$missing == "robust.two.stage" && opt$se == "robust.two.stage") { # nothing to do } else { lav_msg_warn(gettextf( "se will be set to %1$s if missing = %2$s", dQuote(opt$missing), dQuote(opt$missing))) opt$se <- opt$missing } # information if (opt$information[1] == "default") { # for both two.stage and robust.two.stage opt$information[1] <- "observed" } else if (opt$information[1] == "first.order") { lav_msg_warn(gettextf( "information will be set to %1$s if missing = %2$s", dQuote("observed"), dQuote(opt$missing) )) opt$information[1] <- "observed" } # observed.information (ALWAYS "h1" for now) opt$observed.information[1] <- "h1" opt$observed.information[2] <- "h1" # new in 0.6-9: ALWAYS h1.information = "unstructured" opt$h1.information <- c("unstructured", "unstructured") if (length(opt$information) > 1L && opt$information[2] == "default") { # for both two.stage and robust.two.stage opt$information[2] <- "observed" } # test if (length(opt$test) > 1L) { lav_msg_warn(gettextf( "test= argument can only contain a single element if missing = %s (taking the first)", dQuote(opt$missing))) opt$test <- opt$test[1] } if (length(opt$test) == 1L && opt$test == "default") { opt$test <- "satorra.bentler" } else if (length(opt$test) == 1L && any( opt$test == c("satorra", "sb", "satorra.bentler", "satorra-bentler"))) { opt$test <- "satorra.bentler" } else { lav_msg_warn(gettextf( "test will be set to %1$s if missing = %2$s", dQuote("satorra.bentler"), dQuote(opt$missing) )) opt$test <- "satorra.bentler" } } # meanstructure #### if (is.logical(opt$meanstructure)) { if (opt$meanstructure == FALSE) { if (any(opt$missing == c("ml", "ml.x", "two.stage"))) { lav_msg_warn(gettextf( "missing argument %s forces meanstructure = TRUE", opt$missing)) } } } else if (opt$meanstructure == "default") { # by default: no meanstructure! if (opt$estimator == "pml") { opt$meanstructure <- TRUE } else { opt$meanstructure <- FALSE } } # bootstrap #### if (opt$se == "bootstrap") { opt$information[1] <- "observed" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } opt$bootstrap <- as.integer(opt$bootstrap) } # specific per estimator (group) #### opt <- switch(opt$estimator, ml = , mlf = , mlm = , mlmv = , mlmvs = , mlr = lav_options_est_ml(opt), gls = lav_options_est_gls(opt), ntrls = lav_options_est_ntrls(opt), catml = lav_options_est_catml(opt), wls = lav_options_est_wls(opt), dls = lav_options_est_dls(opt), dwls = , wlsm = , wlsmv = , wlsmvs = lav_options_est_dwls(opt), uls = , ulsm = , ulsmv = , ulsmvs = lav_options_est_uls(opt), pml = lav_options_est_pml(opt), fml = lav_options_est_fml(opt), reml = lav_options_est_reml(opt), mml = lav_options_est_mml(opt), fabin2 = , fabin3 = , mgm = , js = , jsa = , bentler1982 = lav_options_est_fabin(opt), miiv = lav_options_est_miiv(opt), lav_options_est_none(opt) # estimator = none ) # after code specific to estimator types #### # optim.method - if still "default" at this point -> set to "nlminb" if (opt$optim.method == "default") { opt$optim.method <- "nlminb" } # special stuff for categorical if (opt$.categorical) { opt$meanstructure <- TRUE # Mplus style if (lav_options_estimatorgroup(opt$estimator) == "ML") { lav_msg_stop(gettext( "estimator ML for ordered data is not supported yet. Use WLSMV instead." )) } } # link if (opt$link == "logit") { if (opt$estimator != "mml") { lav_msg_warn(gettextf( "link will be set to %1$s for estimator = %2$s", dQuote("probit"), dQuote(opt$estimator) )) } } # likelihood approach (wishart or normal) + sample.cov.rescale if (!any(lav_options_estimatorgroup(opt$estimator) == c("ML", "REML", "PML", "FML", "NTRLS", "catML"))) { # if(opt$likelihood != "default") { # lav_msg_stop(gettext( # "likelihood argument is only relevant if estimator = ML")) # } if (opt$sample.cov.rescale == "default") { opt$sample.cov.rescale <- FALSE } # else { # lav_msg_warn(gettext( # "sample.cov.rescale argument is only relevant if estimator = ML")) # } } else { # ml and friends if (any(lav_options_estimatorgroup(opt$estimator) == c("PML", "FML"))) { opt$likelihood <- "normal" } else if (opt$likelihood == "default") { opt$likelihood <- "normal" } if (opt$sample.cov.rescale == "default") { opt$sample.cov.rescale <- FALSE if (opt$likelihood == "normal") { opt$sample.cov.rescale <- TRUE } } } # se information if (opt$information[1] == "default") { if (any(opt$missing == c("ml", "ml.x")) || any(opt$se == c("robust.huber.white", "first.order"))) { # nchar(opt$constraints) > 0L) { opt$information[1] <- "observed" } else { opt$information[1] <- "expected" } } # first.order information can not be used with robust if (opt$information[1] == "first.order" && any(opt$se == c("robust.huber.white", "robust.sem"))) { lav_msg_stop(gettextf( "information must be either %s if robust standard errors are requested.", lav_msg_view(c("expected", "observed"), log.sep = "or"))) } # test information if (length(opt$information) == 1L) { opt$information <- rep(opt$information, 2L) } if (opt$information[2] == "default") { if (any(opt$missing == c("ml", "ml.x")) || any(opt$se == c("robust.huber.white", "first.order"))) { # nchar(opt$constraints) > 0L) { opt$information[2] <- "observed" } else { opt$information[2] <- "expected" } } # first.order information cannot be used with robust if (opt$information[2] == "first.order" && any(opt$test %in% c("satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted"))) { lav_msg_stop(gettextf( "information must be either %s if robust test statistics are requested.", lav_msg_view(c("expected", "observed"), log.sep = "or"))) } if (length(opt$observed.information) == 1L) { opt$observed.information <- rep(opt$observed.information, 2L) } if (all(opt$observed.information[2] != c("hessian", "h1"))) { if (opt$observed.information[2] == "default") { if (any(opt$test %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted" ))) { if (length(opt$test) > 1L) { opt$observed.information[2] <- "h1" # CHANGED in 0.6-6! if (any(opt$test == "yuan.bentler.mplus")) { lav_msg_warn(gettext( "observed.information for ALL test statistics is set to h1.")) } } else { if (opt$estimator == "PML" || opt$test[1] == "yuan.bentler.mplus") { opt$observed.information[2] <- "hessian" } else { opt$observed.information[2] <- "h1" # CHANGED in 0.6-6! } } } else { # default is "hessian" opt$observed.information[2] <- "hessian" } } } if (length(opt$h1.information) == 1L) { opt$h1.information <- rep(opt$h1.information, 2L) } if (opt$h1.information.meat == "default") { opt$h1.information.meat <- opt$h1.information[1] } # check information if estimator is uls/wls and friends if (any(lav_options_estimatorgroup(opt$estimator) == c("ULS", "WLS", "DWLS"))) { if (opt$information[1] != "expected") { lav_msg_warn(gettextf( "information will be set to %1$s for estimator = %2$s", dQuote("expected"), dQuote(opt$estimator)) ) opt$information <- rep.int("expected", 2L) } opt$h1.information <- rep.int("unstructured", 2L) #FIXME: allow option? } # omega information if (opt$omega.information == "default") { opt$omega.information <- opt$information[2] # test version! } if (opt$omega.h1.information == "default") { # opt$omega.h1.information <- opt$h1.information[2] # test version! opt$omega.h1.information <- "unstructured" } if (opt$omega.h1.information.meat == "default") { opt$omega.h1.information.meat <- opt$omega.h1.information } # conditional.x if (is.character(opt$conditional.x)) { # = "default" if (opt$.categorical) { opt$conditional.x <- TRUE } else { opt$conditional.x <- FALSE } } # if conditional.x, always use a meanstructure if (opt$conditional.x) { opt$meanstructure <- TRUE } # fixed.x if (is.logical(opt$fixed.x)) { # if(opt$conditional.x && opt$fixed.x == FALSE && !opt$.multilevel) { if (opt$conditional.x && opt$fixed.x == FALSE) { lav_msg_stop(gettext( "fixed.x = FALSE is not supported when conditional.x = TRUE.")) } if (opt$fixed.x && is.character(opt$start) && opt$start == "simple") { lav_msg_warn(gettextf( "start = %s implies fixed.x = FALSE", dQuote(opt$start))) opt$fixed.x <- FALSE } } else if (opt$fixed.x == "default") { if (opt$conditional.x) { opt$fixed.x <- TRUE } else { opt$fixed.x <- FALSE } } # meanstructure again if (any(opt$missing == c("ml", "ml.x")) || opt$model.type == "growth") { opt$meanstructure <- TRUE } if (any(c("intercepts", "means") %in% opt$group.equal)) { opt$meanstructure <- TRUE } # if(opt$se == "robust.huber.white" || # opt$se == "robust.sem" || # opt$test == "satorra.bentler" || # opt$test == "mean.var.adjusted" || # opt$test == "scaled.shifted" || # opt$test == "yuan.bentler") { # opt$meanstructure <- TRUE # } if (!is.logical(opt$meanstructure)) { lav_msg_fixme("meanstructure must be logical at this point!") } # zero cell frequencies if (is.character(opt$zero.add)) { # = "default" opt$zero.add <- c(0.5, 0.0) # FIXME: TODO: mimic EQS , LISREL (0.0, 0.0) } else { if (length(opt$zero.add) == 1L) { opt$zero.add <- c(opt$zero.add, opt$zero.add) } } if (is.character(opt$zero.keep.margins)) { # = "default" opt$zero.keep.margins <- FALSE } # parameterization if (opt$parameterization == "default") { # for now, default is always delta opt$parameterization <- "delta" } # std.lv vs auto.fix.first # new in 0.6-5 (used to be in sem/cfa/growth) if (opt$std.lv) { opt$auto.fix.first <- FALSE } # std.lv vs effect.coding # new in 0.6-4 if (is.logical(opt$effect.coding)) { if (opt$effect.coding) { opt$effect.coding <- c("loadings", "intercepts") } else { opt$effect.coding <- "" } } # if we use effect coding for the factor loadings, we don't need/want # std.lv = TRUE if (any("loadings" == opt$effect.coding)) { if (opt$std.lv) { lav_msg_stop(gettextf( "std.lv is set to FALSE but effect.coding contains %s", dQuote("loadings"))) } # shut off auto.fix.first opt$auto.fix.first <- FALSE } # test again # unless test = "none", always add test = "standard" as the # first entry # NO: this breaks lavaan.survey pval.pFsum, which has the following check: # if (!lavInspect(lavaan.fit, "options")$test %in% c("satorra.bentler", # "mean.var.adjusted", "Satterthwaite")) { # lav_msg_stop( # gettext("Please refit the model with Satorra-Bentler (MLM)"), # gettext(" or Satterthwaite (MLMVS) adjustment.")) # } # if(! (length(opt$test) == 1L && opt$test == "none") ) { # opt$test <- c("standard", opt$test) # opt$test <- unique(opt$test) # } # add scaled.test to test (if not already there) if (opt$scaled.test != "standard") { if (length(opt$test) == 1L && opt$test[1] == "standard") { opt$test <- unique(c(opt$test, opt$scaled.test)) } else { opt$test <- unique(c(opt$scaled.test, opt$test)) } # make sure "standard" comes first standard.idx <- which(opt$test == "standard")[1] if (length(standard.idx) > 0L && standard.idx != 1L) { opt$test <- c("standard", opt$test[-standard.idx]) } } # final check wrong.idx <- which(!opt$test %in% c( "none", "standard", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted", "browne.residual.adf", "browne.residual.nt", "browne.residual.nt.model", "browne.residual.adf.model", "bollen.stine" )) if (length(wrong.idx) > 0L) { lav_msg_stop(gettextf( "invalid option(s) for test argument: %1$s. Possible options are: %2$s.", lav_msg_view(opt$test[wrong.idx]), lav_msg_view(c("none", "standard", "browne.residual.adf", "browne.residual.nt", "browne.residual.adf.model", "browne.residual.nt.model", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted", "bollen.stine"), log.sep = "or"))) } # bounds if (is.null(opt$bounds)) { if (length(opt$optim.bounds) > 0L) { opt$bounds <- "user" } else { opt$bounds <- "none" # for now } } else if (is.logical(opt$bounds)) { if (opt$bounds) { opt$bounds <- "wide" # default for most estimators } else { opt$bounds <- "none" } } # optim.bounds if (length(opt$optim.bounds) > 0L) { # opt$bounds should be "default", or "user" (or "none") if (any(opt$bounds == c("default", "none", "user"))) { opt$bounds <- "user" } else { lav_msg_stop( gettext("bounds and optim.bounds arguments can not be used together; remove the bounds= argument or set it to \"user\".") ) } } # handle different 'profiles' if (opt$bounds == "none") { opt$optim.bounds <- list( lower = character(0L), upper = character(0L) ) } else if (opt$bounds == "user") { if (length(opt$optim.bounds) == 0L) { lav_msg_stop(gettextf( "bounds= is %s but optim.bounds= argument is empty", dQuote("user") )) } } else if (opt$bounds == "default" || opt$bounds == "wide") { opt$optim.bounds <- list( lower = c("ov.var", "lv.var", "loadings", "covariances"), upper = c("ov.var", "lv.var", "loadings", "covariances"), lower.factor = c(1.05, 1.0, 1.1, 1.0), upper.factor = c(1.20, 1.3, 1.1, 1.0), min.reliability.marker = 0.1, min.var.lv.endo = 0.005 ) } else if (opt$bounds == "wide.zerovar") { opt$optim.bounds <- list( lower = c("ov.var", "lv.var", "loadings", "covariances"), upper = c("ov.var", "lv.var", "loadings", "covariances"), lower.factor = c(1.00, 1.0, 1.1, 1.0), upper.factor = c(1.20, 1.3, 1.1, 1.0), min.reliability.marker = 0.1, min.var.lv.endo = 0.005 ) } else if (opt$bounds == "standard") { opt$optim.bounds <- list( lower = c("ov.var", "lv.var", "loadings", "covariances"), upper = c("ov.var", "lv.var", "loadings", "covariances"), lower.factor = c(1.0, 1.0, 1.0, 0.999), upper.factor = c(1.0, 1.0, 1.0, 0.999), min.reliability.marker = 0.1, min.var.lv.endo = 0.005 ) } else if (opt$bounds == "pos.var") { opt$optim.bounds <- list( lower = c("ov.var", "lv.var"), lower.factor = c(1, 1), min.reliability.marker = 0.0, min.var.lv.exo = 0.0, min.var.lv.endo = 0.0 ) } else if (opt$bounds == "pos.ov.var") { opt$optim.bounds <- list( lower = c("ov.var"), lower.factor = 1 ) } else if (opt$bounds == "pos.lv.var") { opt$optim.bounds <- list( lower = c("lv.var"), lower.factor = 1, min.reliability.marker = 0.0, min.var.lv.exo = 0.0, min.var.lv.endo = 0.0 ) } # rotations.args if (!is.list(opt$rotation.args)) { lav_msg_stop(gettext("rotation.args should be be list.")) } # force orthogonal for some rotation algorithms if (any(opt$rotation == c("varimax", "entropy", "mccammon", "tandem1", "tandem2"))) { opt$rotation.args$orthogonal <- TRUE } # if target, check target matrix, and set order.lv.by to = "none" if (opt$rotation == "target" || opt$rotation == "pst") { target <- opt$rotation.args$target if (is.null(target)) { lav_msg_stop(gettext("rotation target matrix is NULL")) } if (is.list(target)) { if (!all(sapply(target, is.matrix))) { lav_msg_stop(gettext("the target list contains elements that are not a matrix")) } } else if (!is.matrix(target)) { lav_msg_stop(gettext("rotation target matrix is not a matrix")) } opt$rotation.args$order.lv.by <- "none" } if (opt$rotation == "pst") { target.mask <- opt$rotation.args$target.mask if (is.null(target.mask)) { lav_msg_stop(gettext("rotation target.mask matrix is NULL")) } if (is.list(target.mask)) { if (!all(sapply(target.mask, is.matrix))) { lav_msg_stop(gettext("the target.mask list contains elements that are not a matrix")) } } else if (!is.matrix(target.mask)) { lav_msg_stop(gettext("rotation target.mask matrix is not a matrix")) } if (is.list(target) && !is.list(target.mask)) { lav_msg_stop(gettext("target is a list, but target.mask is not a list")) } if (is.list(target.mask) && !is.list(target)) { lav_msg_stop(gettext("target.mask is a list, but target is not a list")) } if (is.list(target) && is.list(target.mask)) { if (length(target) != length(target.mask)) { lav_msg_stop(gettext("length(target) != length(target.mask)")) } } } # if NAs, force opt$rotation to be 'pst' and create target.mask if (opt$rotation == "target") { # matrix if (is.matrix(target) && anyNA(target)) { opt$rotation <- "pst" target.mask <- matrix(1, nrow = nrow(target), ncol = ncol(target)) target.mask[is.na(target)] <- 0 opt$rotation.args$target.mask <- target.mask # list } else if (is.list(target)) { ngroups <- length(target) for (g in seq_len(ngroups)) { if (anyNA(target[[g]])) { # is target.mask just a <0 x 0 matrix>? create list! if (is.matrix(opt$rotation.args$target.mask)) { opt$rotation.args$target.mask <- vector("list", length = ngroups) } opt$rotation <- "pst" target.mask <- matrix(1, nrow = nrow(target[[g]]), ncol = ncol(target[[g]])) target.mask[is.na(target[[g]])] <- 0 opt$rotation.args$target.mask[[g]] <- target.mask } } } } # set row.weights opt$rotation.args$row.weights <- tolower(opt$rotation.args$row.weights) if (opt$rotation.args$row.weights == "default") { # the default is "none", except for varimax and promax if (any(opt$rotation == c("varimax", "promax"))) { opt$rotation.args$row.weights <- "kaiser" } else { opt$rotation.args$row.weights <- "none" } } # override if bifactor if (any(opt$rotation == c("bi-geomin", "bigeomin", "bi-quartimin", "biquartimin"))) { opt$rotation.args$order.lv.by <- "none" } # no standard errors for promax (for now)... if (opt$rotation == "promax") { opt$se <- "none" opt$rotation.args$algorithm <- "promax" opt$rotation.args$rstarts <- 0L } # correlation if (opt$correlation) { # standardize opt$std.ov <- TRUE # if ML, switch to GLS if(opt$estimator == "ml") { #lav_msg_warn(gettext( # "GLS should be used for correlation structures instead of ML.")) opt$estimator <- "gls" } if (opt$missing == "ml") { lav_msg_stop(gettext( "correlation structures only work for complete data (for now).")) } if (opt$.multilevel) { lav_msg_stop(gettext( "correlation structures only work for single-level data.")) } if (opt$conditional.x) { lav_msg_stop(gettext( "correlation structures only work for conditional.x = FALSE (for now)." )) } if (opt$representation == "RAM") { lav_msg_stop(gettext( "correlation structures only work for representation = \"LISREL\".")) } if (opt$fixed.x) { lav_msg_stop(gettext( "correlation structures only work for fixed.x = FALSE (for now).")) } } # sample.cov.robust # new in 0.6-17 # sample.cov.robust cannot be used if: # - data is missing (for now), # - sampling weights are used # - estimator is (D)WLS # - multilevel # - conditional.x if (opt$sample.cov.robust) { if (opt$missing != "listwise") { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if data is missing.")) } if (opt$.categorical) { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if data is categorical")) } if (opt$.clustered || opt$.multilevel) { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if data is clustered")) } if (opt$conditional.x) { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if conditional.x = TRUE")) } if (all(lav_options_estimatorgroup(opt$estimator) != c("ML", "GLS"))) { lav_msg_stop(gettext( "sample.cov.robust = TRUE does not work (yet) if estimator is not GLS or ML")) } } opt$estimator <- lav_options_estimatorgroup(opt$estimator) # group.w.free # if(opt$group.w.free && opt$.categorical) { # lav_msg_stop(gettext( # "group.w.free = TRUE is not supported (yet) in the categorical setting." # )) # } # in order not to break semTools and blavaan, we restore categorical: opt$categorical <- opt$.categorical if (lav_debug()) { cat("lavaan DEBUG: lavaanOptions OUT\n") str(opt) } opt } lavaan/R/lav_samplestats_wls_obs.R0000644000176200001440000000563614627656441017023 0ustar liggesuserslav_samplestats_wls_obs <- function(mean.g, cov.g, var.g, th.g, th.idx.g, res.int.g, res.cov.g, res.var.g, res.th.g, res.slopes.g, group.w.g, categorical = FALSE, conditional.x = FALSE, meanstructure = FALSE, correlation = FALSE, slopestructure = FALSE, group.w.free = FALSE) { # WLS.obs if (categorical) { # order of elements is important here: # 1. thresholds + (negative) means (interleaved) # 2. slopes (if any) # 3. variances (if any) # 4. covariance matrix (no diagonal!) # NOTE: prior to 0.5-17, we had this: # TH[ov.types == "numeric"] <- -1*TH[ov.types == "numeric"] # which is WRONG if we have more than one threshold per variable # (thanks to Sacha Epskamp for spotting this!) if (conditional.x) { TH <- res.th.g TH[th.idx.g == 0] <- -1 * TH[th.idx.g == 0] nvar <- length(res.var.g) num.idx <- which(!seq_len(nvar) %in% th.idx.g) WLS.obs <- c( TH, lav_matrix_vec(res.slopes.g), res.var.g[num.idx], lav_matrix_vech(res.cov.g, diagonal = FALSE) ) } else { TH <- th.g TH[th.idx.g == 0] <- -1 * TH[th.idx.g == 0] nvar <- length(var.g) num.idx <- which(!seq_len(nvar) %in% th.idx.g) WLS.obs <- c( TH, var.g[num.idx], lav_matrix_vech(cov.g, diagonal = FALSE) ) } } else { # CONTINUOUS: DIAG <- TRUE if (correlation) { DIAG <- FALSE } if (conditional.x) { if (meanstructure) { if (slopestructure) { # order = vec(Beta), where first row are intercepts # cbind(res.int, res.slopes) is t(Beta) # so we need vecr WLS.obs <- c( lav_matrix_vecr(cbind( res.int.g, res.slopes.g )), lav_matrix_vech(res.cov.g, diagonal = DIAG) ) } else { WLS.obs <- c( res.int.g, lav_matrix_vech(res.cov.g, diagonal = DIAG) ) } } else { if (slopestructure) { WLS.obs <- c( lav_matrix_vecr(res.slopes.g), lav_matrix_vech(res.cov.g, diagonal = DIAG) ) } else { WLS.obs <- lav_matrix_vech(res.cov.g, diagonal = DIAG) } } } else { if (meanstructure) { WLS.obs <- c( mean.g, lav_matrix_vech(cov.g, diagonal = DIAG) ) } else { WLS.obs <- lav_matrix_vech(cov.g, diagonal = DIAG) } } } # group.w.free? if (group.w.free) { WLS.obs <- c(group.w.g, WLS.obs) } WLS.obs } lavaan/R/lav_predict.R0000644000176200001440000015632014627656441014362 0ustar liggesusers# lavPredict() contains a collection of `predict' methods # the unifying theme is that they all rely on the (unknown, to be estimated) # or (known, apriori specified) values for the latent variables # # lv: lavtent variables (aka `factor scores') # ov: predict linear part of y_i # # - YR 11 June 2013: first version, in order to get factor scores for the # categorical case # - YR 12 Jan 2014: refactoring + lav_predict_fy (to be used by estimator MML) # # overload standard R function `predict' setMethod( "predict", "lavaan", function(object, newdata = NULL) { lavPredict( object = object, newdata = newdata, type = "lv", method = "EBM", fsm = FALSE, optim.method = "bfgs" ) } ) # efaList version predict.efaList <- function(object, ...) { # kill object$loadings if present object[["loadings"]] <- NULL if (length(object) == 1L) { # unlist object <- object[[1]] } else { # use the 'last' one per default object <- object[[length(object)]] } predict(object, ...) } # public function lavPredict <- function(object, newdata = NULL, # keep order of predict(), 0.6-7 type = "lv", method = "EBM", transform = FALSE, se = "none", acov = "none", label = TRUE, fsm = FALSE, mdist = FALSE, append.data = FALSE, assemble = FALSE, # or TRUE? level = 1L, optim.method = "bfgs", ETA = NULL, drop.list.single.group = TRUE) { # catch efaList objects if (inherits(object, "efaList")) { # kill object$loadings if present object[["loadings"]] <- NULL if (length(object) == 1L) { # unlist object <- object[[1]] } else { # use the 'last' one per default object <- object[[length(object)]] } } stopifnot(inherits(object, "lavaan")) lavmodel <- object@Model lavdata <- object@Data lavsamplestats <- object@SampleStats # backward compatibility if (.hasSlot(object, "h1")) { lavh1 <- object@h1 } else { lavh1 <- lav_h1_implied_logl( lavdata = object@Data, lavsamplestats = object@SampleStats, lavoptions = object@Options ) } lavimplied <- object@implied res <- lav_predict_internal( lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavh1 = lavh1, lavpartable = object@ParTable, newdata = newdata, type = type, method = method, transform = transform, se = se, acov = acov, label = label, fsm = fsm, mdist = mdist, append.data = append.data, assemble = assemble, level = level, optim.method = optim.method, ETA = ETA, drop.list.single.group = drop.list.single.group ) res } # internal version, to be used if lavobject does not exist yet lav_predict_internal <- function(lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavh1 = NULL, lavimplied = NULL, lavpartable = NULL, # standard options newdata = NULL, # keep order of predict(), 0.6-7 type = "lv", method = "EBM", transform = FALSE, se = "none", acov = "none", label = TRUE, fsm = FALSE, mdist = FALSE, append.data = FALSE, assemble = FALSE, # or TRUE? level = 1L, optim.method = "bfgs", ETA = NULL, drop.list.single.group = TRUE) { # type type <- tolower(type) lavpta <- lav_partable_attributes(lavpartable) if (type %in% c("latent", "lv", "factor", "factor.score", "factorscore")) { type <- "lv" } else if (type %in% c("ov", "yhat")) { type <- "yhat" } else if (type %in% c("residuals", "resid", "error")) { type <- "resid" } # if resid, not for categorical if (type == "resid" && lavmodel@categorical) { lav_msg_stop(gettext("casewise residuals not available if data is categorical")) } # append.data? check level if (append.data && level > 1L) { lav_msg_warn(gettext("append.data not available if level > 1L")) append.data <- FALSE } # mdist? -> fsm = TRUE if (mdist) { fsm <- TRUE } # se? if (acov != "none") { se <- acov # ACOV implies SE } if (se != "none") { if (is.logical(se) && se) { se <- "standard" if (acov != "none") { acov <- se # reverse-imply upstream } } if (type != "lv") { lav_msg_stop(gettext("standard errors only available if type = \"lv\"")) } if (lavmodel@categorical) { se <- acov <- "none" lav_msg_warn(gettext( "standard errors not available (yet) for non-normal data")) } # if(lavdata@missing %in% c("ml", "ml.x")) { # se <- acov <- "none" # warning("lavaan WARNING: standard errors not available (yet) for missing data + fiml") # } } # need full data set supplied if (is.null(newdata)) { # use internal copy: if (lavdata@data.type != "full") { lav_msg_stop(gettext( "sample statistics were used for fitting and newdata is empty")) } else if (is.null(lavdata@X[[1]])) { lav_msg_stop(gettext("no local copy of data; FIXME!")) } else { data.obs <- lavdata@X ov.names <- lavdata@ov.names } eXo <- lavdata@eXo } else { OV <- lavdata@ov newData <- lavData( data = newdata, group = lavdata@group, ov.names = lavdata@ov.names, ov.names.x = lavdata@ov.names.x, ordered = OV$name[OV$type == "ordered"], lavoptions = list( std.ov = lavdata@std.ov, group.label = lavdata@group.label, missing = lavdata@missing ), # was FALSE before? allow.single.case = TRUE ) # if ordered, check if number of levels is till the same (new in 0.6-7) if (lavmodel@categorical) { orig.ordered.idx <- which(lavdata@ov$type == "ordered") orig.ordered.lev <- lavdata@ov$nlev[orig.ordered.idx] match.new.idx <- match( lavdata@ov$name[orig.ordered.idx], newData@ov$name ) new.ordered.lev <- newData@ov$nlev[match.new.idx] if (any(orig.ordered.lev - new.ordered.lev != 0)) { lav_msg_stop( gettext("mismatch number of categories for some ordered variables in newdata compared to original data.") ) } } data.obs <- newData@X eXo <- newData@eXo ov.names <- newData@ov.names } if (type == "lv") { if (!is.null(ETA)) { lav_msg_warn(gettext("lvs will be predicted here; supplying ETA has no effect")) } # post fit check (lv pd?) # ok <- lav_object_post_check(object) # if(!ok) { # stop("lavaan ERROR: lavInspect(,\"post.check\") is not TRUE; factor scores can not be computed. See the WARNING message.") # } out <- lav_predict_eta( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, se = se, acov = acov, level = level, data.obs = data.obs, eXo = eXo, method = method, fsm = fsm, optim.method = optim.method ) # extract fsm here if (fsm) { FSM <- attr(out, "fsm") } # extract se here if (se != "none") { SE <- attr(out, "se") if (acov != "none") { ACOV <- attr(out, "acov") } } # remove dummy lv? (removes attr!) out <- lapply(seq_len(lavdata@ngroups), function(g) { # determine block if (lavdata@nlevels == 1L) { bb <- g } else { bb <- (g - 1) * lavdata@nlevels + level } lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[bb]], lavmodel@ov.x.dummy.lv.idx[[bb]] ) ret <- out[[g]] if (length(lv.idx) > 0L) { ret <- out[[g]][, -lv.idx, drop = FALSE] } ret }) # we need to remove the dummy's before we transform if (fsm) { FSM <- lapply(seq_len(lavdata@ngroups), function(g) { # determine block if (lavdata@nlevels == 1L) { bb <- g } else { bb <- (g - 1) * lavdata@nlevels + level } lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[bb]], lavmodel@ov.x.dummy.lv.idx[[bb]] ) # ov.idx <- lavmodel@ov.x.dummy.ov.idx[[bb]] # or should we use pta$vidx$ov.ind? ov.ind <- lavpta$vidx$ov.ind[[bb]] ret <- FSM[[g]] if (length(lv.idx) > 0L) { if (is.matrix(FSM[[g]])) { ret <- FSM[[g]][-lv.idx, ov.ind, drop = FALSE] } else if (is.list(FSM[[g]])) { FSM[[g]] <- lapply(FSM[[g]], function(x) { ret <- x[-lv.idx, ov.ind, drop = FALSE] ret }) } } ret }) } # new in 0.6-16 # we assume the dummy lv's have already been removed if (transform) { VETA <- computeVETA(lavmodel = lavmodel, remove.dummy.lv = TRUE) EETA <- computeEETA( lavmodel = lavmodel, lavsamplestats = lavsamplestats, remove.dummy.lv = TRUE ) out <- lapply(seq_len(lavdata@ngroups), function(g) { # determine block if (lavdata@nlevels == 1L) { bb <- g } else { bb <- (g - 1) * lavdata@nlevels + level } FS.centered <- scale(out[[g]], center = TRUE, scale = FALSE ) FS.cov <- crossprod(FS.centered) / nrow(FS.centered) FS.cov.inv <- try(solve(FS.cov), silent = TRUE) if (inherits(FS.cov.inv, "try-error")) { lav_msg_warn( gettext("could not invert (co)variance matrix of factor scores; returning original factor scores.")) return(out[[g]]) } fs.inv.sqrt <- lav_matrix_symmetric_sqrt(FS.cov.inv) veta.sqrt <- lav_matrix_symmetric_sqrt(VETA[[g]]) if (fsm) { # change FSM FSM[[g]] <<- veta.sqrt %*% fs.inv.sqrt %*% FSM[[g]] } tmp <- FS.centered %*% fs.inv.sqrt %*% veta.sqrt ret <- t(t(tmp) + drop(EETA[[g]])) ret }) } # new in 0.6-17 if (mdist) { VETA <- computeVETA(lavmodel = lavmodel, remove.dummy.lv = TRUE) EETA <- computeEETA( lavmodel = lavmodel, lavsamplestats = lavsamplestats, remove.dummy.lv = TRUE ) MDIST <- lapply(seq_len(lavdata@ngroups), function(g) { A <- FSM[[g]] Sigma <- lavimplied$cov[[g]] if (transform) { fs.cov <- VETA[[g]] } else { fs.cov <- A %*% Sigma %*% t(A) } fs.cov.inv <- solve(fs.cov) # Mahalobis distance fs.c <- t(t(out[[g]]) - EETA[[g]]) # center df.squared <- rowSums((fs.c %*% fs.cov.inv) * fs.c) ret <- df.squared # squared! ret }) } # append original/new data? (also remove attr) if (append.data && level == 1L) { out <- lapply(seq_len(lavdata@ngroups), function(g) { ret <- cbind(out[[g]], data.obs[[g]]) ret }) } if (se != "none") { SE <- lapply(seq_len(lavdata@ngroups), function(g) { # determine block if (lavdata@nlevels == 1L) { bb <- g } else { bb <- (g - 1) * lavdata@nlevels + level } lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[bb]], lavmodel@ov.x.dummy.lv.idx[[bb]] ) ret <- SE[[g]] if (length(lv.idx) > 0L) { ret <- SE[[g]][, -lv.idx, drop = FALSE] } ret }) if (acov != "none") { ACOV <- lapply(seq_len(lavdata@ngroups), function(g) { # determine block if (lavdata@nlevels == 1L) { bb <- g } else { bb <- (g - 1) * lavdata@nlevels + level } lv.idx <- c( lavmodel@ov.y.dummy.lv.idx[[bb]], lavmodel@ov.x.dummy.lv.idx[[bb]] ) ret <- ACOV[[g]] if (length(lv.idx) > 0L) { if (is.matrix(ACOV[[g]])) { ret <- ACOV[[g]][-lv.idx, -lv.idx, drop = FALSE] } else if (is.list(ACOV[[g]])) { ret <- lapply(ACOV[[g]], function(x) { ret <- x[-lv.idx, -lv.idx, drop = FALSE] ret }) } } ret }) } # acov } # se # label? if (label) { for (g in seq_len(lavdata@ngroups)) { if (lavdata@nlevels > 1L) { gg <- (g - 1) * lavdata@nlevels + level } else { gg <- g } if (append.data) { colnames(out[[g]]) <- c( lavpta$vnames$lv[[gg]], ov.names[[g]] ) # !not gg } else { colnames(out[[g]]) <- lavpta$vnames$lv[[gg]] } if (fsm) { if (is.null(FSM[[g]])) { # skip } else if (is.matrix(FSM[[g]])) { dimnames(FSM[[g]]) <- list( lavpta$vnames$lv[[gg]], # ov.names[[g]]) # !not gg lavpta$vnames$ov.ind[[gg]] ) } else if (is.list(FSM[[g]])) { FSM[[g]] <- lapply(FSM[[g]], function(x) { dimnames(x) <- list( lavpta$vnames$lv[[gg]], # ov.names[[g]]) # !not gg lavpta$vnames$ov.ind[[gg]] ) x }) } } if (se != "none") { if (!is.null(SE[[g]])) { colnames(SE[[g]]) <- lavpta$vnames$lv[[gg]] } } if (acov != "none") { if (is.null(ACOV[[g]])) { # skip } else if (is.matrix(ACOV[[g]])) { dimnames(ACOV[[g]]) <- list( lavpta$vnames$lv[[gg]], lavpta$vnames$lv[[gg]] ) } else if (is.list(ACOV[[g]])) { ACOV[[g]] <- lapply(ACOV[[g]], function(x) { dimnames(x) <- list( lavpta$vnames$lv[[gg]], lavpta$vnames$lv[[gg]] ) x }) } } } # g # group.labels if (lavdata@ngroups > 1L) { names(out) <- lavdata@group.label if (se != "none") { names(SE) <- lavdata@group.label } if (acov != "none") { names(ACOV) <- lavdata@group.label } } } # label # yhat: estimated value for the observed indicators, given (estimated) # factor scores # resid: y - yhat } else if (type %in% c("yhat", "resid")) { resid.flag <- type == "resid" out <- lav_predict_yhat( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, ETA = ETA, method = method, optim.method = optim.method, fsm = fsm, resid.flag = resid.flag ) if (fsm) { FSM <- attr(out, "fsm") } # label? if (label) { for (g in seq_len(lavdata@ngroups)) { colnames(out[[g]]) <- lavpta$vnames$ov[[g]] } } # mdist if (mdist) { LAMBDA <- computeLAMBDA( lavmodel = lavmodel, remove.dummy.lv = FALSE ) MDIST <- lapply(seq_len(lavdata@ngroups), function(g) { Sigma <- lavimplied$cov[[g]] LA <- LAMBDA[[g]] if (type == "resid") { ILA <- diag(ncol(Sigma)) - LA %*% FSM[[g]] Omega.e <- ILA %*% Sigma %*% t(ILA) eig <- eigen(Omega.e, symmetric = TRUE) A <- eig$vectors[, seq_len(nrow(LA) - ncol(LA)), drop = FALSE ] } else if (type == "yhat") { LAA <- LA %*% FSM[[g]] Omega.e <- LAA %*% Sigma %*% t(LAA) eig <- eigen(Omega.e, symmetric = TRUE) A <- eig$vectors[, seq_len(ncol(LA)), drop = FALSE] } outA <- apply(out[[g]], 1L, function(x) { colSums(A * x, na.rm = TRUE) }) if (is.matrix(outA)) { outA <- t(outA) } else { outA <- as.matrix(outA) } # if(lavmodel@meanstructure) { # est.mean <- drop(t(lavimplied$mean[[g]]) %*% A) # if(type == "resid") { # obs.mean <- drop(lavh1$implied$mean[[g]] %*% A) # est.mean <- drop(t(lavimplied$mean[[g]]) %*% A) # outA.mean <- obs.mean - est.mean # } else if(type == "yhat") { # outA.mean <- est.mean # } # } else { # outA.mean <- colMeans(outA) # } outA.cov <- t(A) %*% Omega.e %*% A outA.cov.inv <- solve(outA.cov) # Mahalobis distance # outA.c <- t( t(outA) - outA.mean ) # center outA.c <- outA df.squared <- rowSums((outA.c %*% outA.cov.inv) * outA.c) ret <- df.squared # squared! ret }) } # density for each observed item, given (estimated) factor scores } else if (type == "fy") { out <- lav_predict_fy( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, ETA = ETA, method = method, optim.method = optim.method ) # label? if (label) { for (g in seq_len(lavdata@ngroups)) { colnames(out[[g]]) <- lavpta$vnames$ov[[g]] } } } else { lav_msg_stop(gettext("type must be one of: lv yhat fy")) } # lavaan.matrix out <- lapply(out, "class<-", c("lavaan.matrix", "matrix")) if (lavdata@ngroups == 1L && drop.list.single.group) { res <- out[[1L]] } else { res <- out } # assemble multiple groups into a single data.frame? (new in 0.6-4) if (lavdata@ngroups > 1L && assemble) { if (!is.null(newdata)) { lavdata <- newData } DATA <- matrix(as.numeric(NA), nrow = sum(unlist(lavdata@norig)), ncol = ncol(out[[1L]]) ) # assume == per g colnames(DATA) <- colnames(out[[1L]]) for (g in seq_len(lavdata@ngroups)) { DATA[lavdata@case.idx[[g]], ] <- out[[g]] } DATA <- as.data.frame(DATA, stringsAsFactors = FALSE) if (!is.null(newdata)) { DATA[, lavdata@group] <- newdata[, lavdata@group] } else { # add group DATA[, lavdata@group] <- rep(as.character(NA), nrow(DATA)) if (lavdata@missing == "listwise") { # we will loose the group label of omitted variables! DATA[unlist(lavdata@case.idx), lavdata@group] <- rep(lavdata@group.label, unlist(lavdata@nobs)) } else { DATA[unlist(lavdata@case.idx), lavdata@group] <- rep(lavdata@group.label, unlist(lavdata@norig)) } } res <- DATA } if (fsm && type == "lv") { attr(res, "fsm") <- FSM } if (mdist) { attr(res, "mdist") <- MDIST } if (se != "none") { attr(res, "se") <- SE # return full sampling covariance matrix? if (acov == "standard") { attr(res, "acov") <- ACOV } } res } # internal function lav_predict_eta <- function(lavobject = NULL, # for convenience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, # new data data.obs = NULL, eXo = NULL, # options method = "EBM", fsm = FALSE, se = "none", acov = "none", level = 1L, optim.method = "bfgs") { # full object? if (inherits(lavobject, "lavaan")) { lavdata <- lavobject@Data } else { stopifnot(!is.null(lavdata)) } # method method <- tolower(method) # alias if (method == "regression") { method <- "ebm" } else if (method == "bartlett" || method == "bartlet") { method <- "ml" } # normal case? if (all(lavdata@ov$type == "numeric")) { if (method == "ebm") { out <- lav_predict_eta_normal( lavobject = lavobject, lavmodel = lavmodel, lavdata = lavdata, lavimplied = lavimplied, se = se, acov = acov, level = level, lavsamplestats = lavsamplestats, data.obs = data.obs, eXo = eXo, fsm = fsm ) } else if (method == "ml") { out <- lav_predict_eta_bartlett( lavobject = lavobject, lavmodel = lavmodel, lavdata = lavdata, lavimplied = lavimplied, se = se, acov = acov, level = level, lavsamplestats = lavsamplestats, data.obs = data.obs, eXo = eXo, fsm = fsm ) } else { lav_msg_stop(gettextf("unkown method: %s.", method)) } } else { if (method == "ebm") { out <- lav_predict_eta_ebm_ml( lavobject = lavobject, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, se = se, acov = acov, level = level, data.obs = data.obs, eXo = eXo, ML = FALSE, optim.method = optim.method ) } else if (method == "ml") { out <- lav_predict_eta_ebm_ml( lavobject = lavobject, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, se = se, acov = acov, level = level, data.obs = data.obs, eXo = eXo, ML = TRUE, optim.method = optim.method ) } else { lav_msg_stop(gettextf("unkown method: %s.", method)) } } out } # factor scores - normal case # NOTE: this is the classic 'regression' method; for the linear/continuous # case, this is equivalent to both EB and EBM lav_predict_eta_normal <- function(lavobject = NULL, # for convenience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, # optional new data data.obs = NULL, eXo = NULL, se = "none", acov = "none", level = 1L, fsm = FALSE) { # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), !is.null(lavsamplestats), !is.null(lavimplied) ) } if (is.null(data.obs)) { data.obs <- lavdata@X newdata.flag <- FALSE } else { newdata.flag <- TRUE } # eXo not needed # missings? and missing = "ml"? if (lavdata@missing %in% c("ml", "ml.x")) { if (newdata.flag) { MP <- vector("list", lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { MP[[g]] <- lav_data_missing_patterns(data.obs[[g]]) } } else { MP <- lavdata@Mp } } LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) Sigma.hat <- lavimplied$cov Sigma.inv <- lapply(Sigma.hat, MASS::ginv) VETA <- computeVETA(lavmodel = lavmodel) EETA <- computeEETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats) EY <- computeEY(lavmodel = lavmodel, lavsamplestats = lavsamplestats) FS <- vector("list", length = lavdata@ngroups) if (fsm) { FSM <- vector("list", length = lavdata@ngroups) } if (acov != "none") { se <- acov # ACOV implies SE } if (se != "none") { SE <- vector("list", length = lavdata@ngroups) # return full sampling covariance matrix? if (acov != "none") { ACOV <- vector("list", length = lavdata@ngroups) } } for (g in 1:lavdata@ngroups) { if (lavdata@nlevels > 1L) { Lp <- lavdata@Lp[[g]] YLp <- lavsamplestats@YLp[[g]] # implied for this group group.idx <- (g - 1) * lavdata@nlevels + seq_len(lavdata@nlevels) implied.group <- lapply(lavimplied, function(x) x[group.idx]) # random effects (=random intercepts or cluster means) out <- lav_mvnorm_cluster_implied22l( Lp = Lp, implied = implied.group ) MB.j <- lav_mvnorm_cluster_em_estep_ranef( YLp = YLp, Lp = Lp, sigma.w = out$sigma.w, sigma.b = out$sigma.b, sigma.zz = out$sigma.zz, sigma.yz = out$sigma.yz, mu.z = out$mu.z, mu.w = out$mu.w, mu.b = out$mu.b, se = FALSE ) ov.idx <- Lp$ov.idx if (level == 1L) { data.W <- data.obs[[g]][, ov.idx[[1]]] data.B <- MB.j[Lp$cluster.idx[[2]], , drop = FALSE] # center data.obs.g <- data.W - data.B } else if (level == 2L) { Data.B <- matrix(0, nrow = nrow(MB.j), ncol = ncol(data.obs[[g]]) ) Data.B[, ov.idx[[1]]] <- MB.j between.idx <- Lp$between.idx[[2 * g]] if (length(between.idx) > 0L) { Data.B[, between.idx] <- data.obs[[g]][ !duplicated(Lp$cluster.idx[[2]]), between.idx ] } data.obs.g <- Data.B[, ov.idx[[2]]] } else { lav_msg_stop(gettext("only 2 levels are supported")) } gg <- (g - 1) * lavdata@nlevels + level VETA.g <- VETA[[gg]] EETA.g <- EETA[[gg]] LAMBDA.g <- LAMBDA[[gg]] EY.g <- EY[[gg]] Sigma.inv.g <- Sigma.inv[[gg]] } else { data.obs.g <- data.obs[[g]] VETA.g <- VETA[[g]] EETA.g <- EETA[[g]] LAMBDA.g <- LAMBDA[[g]] EY.g <- EY[[g]] Sigma.inv.g <- Sigma.inv[[g]] } nfac <- ncol(VETA[[g]]) if (nfac == 0L) { FS[[g]] <- matrix(0, lavdata@nobs[[g]], nfac) next } # center data Yc <- t(t(data.obs.g) - EY.g) # global factor score coefficient matrix 'C' FSC <- VETA.g %*% t(LAMBDA.g) %*% Sigma.inv.g # store fsm? if (fsm) { FSM.g <- FSC } # compute factor scores if (lavdata@missing %in% c("ml", "ml.x")) { # missing patterns for this group Mp <- MP[[g]] # factor scores container FS.g <- matrix(as.numeric(NA), nrow(Yc), ncol = length(EETA.g)) # if(fsm) { # FSM.g <- vector("list", length = Mp$npatterns) # } if (se == "standard") { SE.g <- matrix(as.numeric(NA), nrow(Yc), ncol = length(EETA.g)) } if (acov == "standard") { ACOV.g <- vector("list", length = Mp$npatterns) } # compute FSC per pattern for (p in seq_len(Mp$npatterns)) { var.idx <- Mp$pat[p, ] # observed na.idx <- which(!var.idx) # missing # extract observed data for these (centered) cases Oc <- Yc[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] # invert Sigma (Sigma_22, observed part only) for this pattern Sigma_22.inv <- try(lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv.g, rm.idx = na.idx, logdet = FALSE ), silent = TRUE) if (inherits(Sigma_22.inv, "try-error")) { lav_msg_stop(gettext("Sigma_22.inv cannot be inverted")) } lambda <- LAMBDA.g[var.idx, , drop = FALSE] FSC <- VETA.g %*% t(lambda) %*% Sigma_22.inv # FSM? # if(fsm) { # tmp <- matrix(as.numeric(NA), nrow = ncol(lambda), # ncol = ncol(Yc)) # tmp[,var.idx] <- FSC # FSM.g[[p]] <- tmp # } # factor score for this pattern FS.g[Mp$case.idx[[p]], ] <- t(FSC %*% t(Oc) + EETA.g) # SE? if (se == "standard") { tmp <- (VETA.g - VETA.g %*% t(lambda) %*% Sigma_22.inv %*% lambda %*% VETA.g) tmp.d <- diag(tmp) tmp.d[tmp.d < 1e-05] <- as.numeric(NA) # all cases in this pattern get the same SEs SE.g[Mp$case.idx[[p]], ] <- matrix(sqrt(tmp.d), nrow = length(Mp$case.idx[[p]]), ncol = ncol(SE.g), byrow = TRUE ) } # ACOV? if (acov == "standard") { ACOV.g[[p]] <- tmp # for this pattern } } # p } else { # compute factor scores FS.g <- t(FSC %*% t(Yc) + EETA.g) } # replace values in dummy lv's by their observed counterpart if (length(lavmodel@ov.y.dummy.lv.idx[[g]]) > 0L && level == 1L) { FS.g[, lavmodel@ov.y.dummy.lv.idx[[g]]] <- data.obs.g[, lavmodel@ov.y.dummy.ov.idx[[g]], drop = FALSE] } if (length(lavmodel@ov.x.dummy.lv.idx[[g]]) > 0L && level == 1L) { FS.g[, lavmodel@ov.x.dummy.lv.idx[[g]]] <- data.obs.g[, lavmodel@ov.x.dummy.ov.idx[[g]], drop = FALSE] } FS[[g]] <- FS.g # FSM if (fsm) { FSM[[g]] <- FSM.g } # standard error if (se == "standard") { if (lavdata@missing %in% c("ml", "ml.x")) { SE[[g]] <- SE.g if (acov == "standard") { ACOV[[g]] <- ACOV.g } } else { # complete data tmp <- (VETA.g - VETA.g %*% t(LAMBDA.g) %*% Sigma.inv.g %*% LAMBDA.g %*% VETA.g) tmp.d <- diag(tmp) tmp.d[tmp.d < 1e-05] <- as.numeric(NA) SE[[g]] <- matrix(sqrt(tmp.d), nrow = 1L) # return full sampling covariance matrix? if (acov == "standard") { ACOV[[g]] <- tmp } } } # se = "standard" } # g if (fsm) { attr(FS, "fsm") <- FSM } if (se != "none") { attr(FS, "se") <- SE # return full sampling covariance matrix? if (acov == "standard") { attr(FS, "acov") <- ACOV } } FS } # factor scores - normal case - Bartlett method # NOTES: 1) this is the classic 'Bartlett' method; for the linear/continuous # case, this is equivalent to 'ML' # 2) the usual formula is: # FSC = solve(lambda' theta.inv lambda) (lambda' theta.inv) # BUT to deal with singular THETA (with zeroes on the diagonal), # we use the 'GLS' version instead: # FSC = solve(lambda' sigma.inv lambda) (lambda' sigma.inv) # Reference: Bentler & Yuan (1997) 'Optimal Conditionally Unbiased # Equivariant Factor Score Estimators' # in Berkane (Ed) 'Latent variable modeling with # applications to causality' (Springer-Verlag) # 3) instead of solve(), we use MASS::ginv, for special settings where # -by construction- (lambda' sigma.inv lambda) is singular # note: this will destroy the conditionally unbiased property # of Bartlett scores!! lav_predict_eta_bartlett <- function(lavobject = NULL, # for convenience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, # optional new data data.obs = NULL, eXo = NULL, se = "none", acov = "none", level = 1L, fsm = FALSE) { # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), !is.null(lavsamplestats), !is.null(lavimplied) ) } if (is.null(data.obs)) { data.obs <- lavdata@X newdata.flag <- FALSE } else { newdata.flag <- TRUE } # eXo not needed # missings? and missing = "ml"? if (lavdata@missing %in% c("ml", "ml.x")) { if (newdata.flag) { MP <- vector("list", lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { MP[[g]] <- lav_data_missing_patterns(data.obs[[g]]) } } else { MP <- lavdata@Mp } } LAMBDA <- computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE) Sigma <- lavimplied$cov Sigma.inv <- lapply(lavimplied$cov, MASS::ginv) VETA <- computeVETA(lavmodel = lavmodel) # for se only EETA <- computeEETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats) EY <- computeEY(lavmodel = lavmodel, lavsamplestats = lavsamplestats) FS <- vector("list", length = lavdata@ngroups) if (fsm) { FSM <- vector("list", length = lavdata@ngroups) } if (acov != "none") se <- acov # ACOV implies SE if (se != "none") { SE <- vector("list", length = lavdata@ngroups) # return full sampling covariance matrix? if (acov != "none") { ACOV <- vector("list", length = lavdata@ngroups) } } for (g in 1:lavdata@ngroups) { if (lavdata@nlevels > 1L) { Lp <- lavdata@Lp[[g]] YLp <- lavsamplestats@YLp[[g]] # implied for this group group.idx <- (g - 1) * lavdata@nlevels + seq_len(lavdata@nlevels) implied.group <- lapply(lavimplied, function(x) x[group.idx]) # random effects (=random intercepts or cluster means) # NOTE: is the 'ML' way not simply using the observed cluster # means? out <- lav_mvnorm_cluster_implied22l( Lp = Lp, implied = implied.group ) MB.j <- lav_mvnorm_cluster_em_estep_ranef( YLp = YLp, Lp = Lp, sigma.w = out$sigma.w, sigma.b = out$sigma.b, sigma.zz = out$sigma.zz, sigma.yz = out$sigma.yz, mu.z = out$mu.z, mu.w = out$mu.w, mu.b = out$mu.b, se = FALSE ) ov.idx <- Lp$ov.idx if (level == 1L) { data.W <- data.obs[[g]][, ov.idx[[1]]] data.B <- MB.j[Lp$cluster.idx[[2]], , drop = FALSE] # center data.obs.g <- data.W - data.B } else if (level == 2L) { Data.B <- matrix(0, nrow = nrow(MB.j), ncol = ncol(data.obs[[g]]) ) Data.B[, ov.idx[[1]]] <- MB.j between.idx <- Lp$between.idx[[2 * g]] if (length(between.idx) > 0L) { Data.B[, between.idx] <- data.obs[[g]][ !duplicated(Lp$cluster.idx[[2]]), between.idx ] } data.obs.g <- Data.B[, ov.idx[[2]]] } else { lav_msg_stop(gettext("only 2 levels are supported")) } gg <- (g - 1) * lavdata@nlevels + level VETA.g <- VETA[[gg]] EETA.g <- EETA[[gg]] LAMBDA.g <- LAMBDA[[gg]] EY.g <- EY[[gg]] Sigma.inv.g <- Sigma.inv[[gg]] } else { data.obs.g <- data.obs[[g]] VETA.g <- VETA[[g]] EETA.g <- EETA[[g]] LAMBDA.g <- LAMBDA[[g]] EY.g <- EY[[g]] Sigma.inv.g <- Sigma.inv[[g]] } nfac <- length(EETA[[g]]) if (nfac == 0L) { FS[[g]] <- matrix(0, lavdata@nobs[[g]], nfac) next } # center data Yc <- t(t(data.obs.g) - EY.g) # global factor score coefficient matrix 'C' FSC <- (MASS::ginv(t(LAMBDA.g) %*% Sigma.inv.g %*% LAMBDA.g) %*% t(LAMBDA.g) %*% Sigma.inv.g) # store fsm? if (fsm) { # store fsm? FSM.g <- FSC } # compute factor scores if (lavdata@missing %in% c("ml", "ml.x")) { # missing patterns for this group Mp <- MP[[g]] # factor scores container FS.g <- matrix(as.numeric(NA), nrow(Yc), ncol = length(EETA.g)) # if(fsm) { # FSM.g <- vector("list", length = Mp$npatterns) # } if (se == "standard") { SE.g <- matrix(as.numeric(NA), nrow(Yc), ncol = length(EETA.g)) } if (acov == "standard") { ACOV.g <- vector("list", length = Mp$npatterns) } # compute FSC per pattern for (p in seq_len(Mp$npatterns)) { var.idx <- Mp$pat[p, ] # observed na.idx <- which(!var.idx) # missing # extract observed data for these (centered) cases Oc <- Yc[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] # invert Sigma (Sigma_22, observed part only) for this pattern Sigma_22.inv <- try(lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv.g, rm.idx = na.idx, logdet = FALSE ), silent = TRUE) if (inherits(Sigma_22.inv, "try-error")) { lav_msg_stop(gettext("Sigma_22.inv cannot be inverted")) } lambda <- LAMBDA.g[var.idx, , drop = FALSE] FSC <- (MASS::ginv(t(lambda) %*% Sigma_22.inv %*% lambda) %*% t(lambda) %*% Sigma_22.inv) # if FSC contains rows that are all-zero, replace by NA # # this happens eg if all the indicators of a single factor # are missing; then this column in lambda only contains zeroes # and therefore the corresponding row in FSC contains only # zeroes, leading to factor score 0 # # showing 'NA' is better than getting 0 # # (Note that this is not needed for the 'regression' method, # only for Bartlett) # zero.idx <- which(apply(FSC, 1L, function(x) all(x == 0))) if (length(zero.idx) > 0L) { FSC[zero.idx, ] <- NA } # FSM? # if(fsm) { # tmp <- matrix(as.numeric(NA), nrow = ncol(lambda), # ncol = ncol(Yc)) # tmp[,var.idx] <- FSC # FSM.g[[p]] <- tmp # } # factor scores for this pattern FS.g[Mp$case.idx[[p]], ] <- t(FSC %*% t(Oc) + EETA.g) # SE? if (se == "standard") { tmp <- (MASS::ginv(t(lambda) %*% Sigma_22.inv %*% lambda) - VETA.g) tmp.d <- diag(tmp) tmp.d[tmp.d < 1e-05] <- as.numeric(NA) # all cases in this pattern get the same SEs SE.g[Mp$case.idx[[p]], ] <- matrix(sqrt(tmp.d), nrow = length(Mp$case.idx[[p]]), ncol = ncol(SE.g), byrow = TRUE ) } # ACOV? if (acov == "standard") { ACOV.g[[p]] <- tmp # for this pattern } } # what about FSM? There is no single one, but as many as patterns # if(fsm) { # # use 'global' version (just like in complete case) # FSM[[g]] <- ( MASS::ginv(t(LAMBDA.g) %*% Sigma.inv.g %*% # LAMBDA.g) %*% t(LAMBDA.g) %*% Sigma.inv.g ) # } } else { # compute factor scores FS.g <- t(FSC %*% t(Yc) + EETA.g) } # replace values in dummy lv's by their observed counterpart if (length(lavmodel@ov.y.dummy.lv.idx[[g]]) > 0L && level == 1L) { FS.g[, lavmodel@ov.y.dummy.lv.idx[[g]]] <- data.obs[[g]][, lavmodel@ov.y.dummy.ov.idx[[g]], drop = FALSE] } if (length(lavmodel@ov.x.dummy.lv.idx[[g]]) > 0L && level == 1L) { FS.g[, lavmodel@ov.x.dummy.lv.idx[[g]]] <- data.obs[[g]][, lavmodel@ov.x.dummy.ov.idx[[g]], drop = FALSE] } FS[[g]] <- FS.g # FSM if (fsm) { FSM[[g]] <- FSM.g } # standard error if (se == "standard") { if (lavdata@missing %in% c("ml", "ml.x")) { SE[[g]] <- SE.g if (acov == "standard") { ACOV[[g]] <- ACOV.g } } else { # complete data # the traditional formula is: # solve(t(lambda) %*% solve(theta) %*% lambda) # but we replace it by # solve( t(lambda) %*% solve(sigma) %*% lambda ) - psi # to handle negative variances # in addition, we use ginv tmp <- (MASS::ginv(t(LAMBDA.g) %*% Sigma.inv.g %*% LAMBDA.g) - VETA.g) tmp.d <- diag(tmp) tmp.d[tmp.d < 1e-05] <- as.numeric(NA) SE[[g]] <- matrix(sqrt(tmp.d), nrow = 1L) # return full sampling covariance matrix? if (acov == "standard") { ACOV[[g]] <- tmp } } } # se } # g if (fsm) { attr(FS, "fsm") <- FSM } if (se != "none") { attr(FS, "se") <- SE # return full sampling covariance matrix? if (acov == "standard") { attr(FS, "acov") <- ACOV } } FS } # factor scores - EBM or ML lav_predict_eta_ebm_ml <- function(lavobject = NULL, # for convenience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, # optional new data data.obs = NULL, eXo = NULL, se = "none", acov = "none", level = 1L, ML = FALSE, optim.method = "bfgs") { optim.method <- tolower(optim.method) stopifnot(optim.method %in% c("nlminb", "bfgs")) ### FIXME: if all indicators of a factor are normal, can we not ### just use the `classic' regression method?? ### (perhaps after whitening, to get uncorrelated factors...) # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), !is.null(lavsamplestats) ) } # new data? if (is.null(data.obs)) { data.obs <- lavdata@X } if (is.null(eXo)) { eXo <- lavdata@eXo } # se? if (acov != "none") { se <- acov # ACOV implies SE } # if(se != "none") { # warning("lavaan WARNING: standard errors are not available (yet) for the non-normal case") # } VETAx <- computeVETAx(lavmodel = lavmodel) VETAx.inv <- VETAx for (g in seq_len(lavdata@ngroups)) { if (nrow(VETAx[[g]]) > 0L) { VETAx.inv[[g]] <- solve(VETAx[[g]]) } } EETAx <- computeEETAx( lavmodel = lavmodel, lavsamplestats = lavsamplestats, eXo = eXo, nobs = lapply(data.obs, NROW), remove.dummy.lv = TRUE ) ## FIXME? TH <- computeTH(lavmodel = lavmodel, delta = FALSE) THETA <- computeTHETA(lavmodel = lavmodel) # check for zero entries in THETA (new in 0.6-4) for (g in seq_len(lavdata@ngroups)) { if (any(diag(THETA[[g]]) == 0)) { lav_msg_stop(gettext( "(residual) variance matrix THETA contains zero elements on the diagonal.")) } } # local objective function: x = lv values f.eta.i <- function(x, y.i, x.i, mu.i) { # add 'dummy' values (if any) for ov.y if (length(lavmodel@ov.y.dummy.lv.idx[[g]]) > 0L) { x2 <- c(x - mu.i, data.obs[[g]][i, lavmodel@ov.y.dummy.ov.idx[[g]], drop = FALSE ]) } else { x2 <- x - mu.i } # conditional density of y, given eta.i(=x) log.fy <- lav_predict_fy_eta.i( lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, y.i = y.i, x.i = x.i, eta.i = matrix(x2, nrow = 1L), # <---- eta! theta.sd = theta.sd, th = th, th.idx = th.idx, log = TRUE ) if (ML) { # NOTE: 'true' ML is simply -1*sum(log.fy) # - but there is no upper/lower bound for the extrema: # a pattern of all (in)correct drives the 'theta' parameter # towards +/- Inf # - therefore, we add a vague prior, just to stabilize # diff <- t(x) - mu.i V <- diag(length(x)) * 1e-05 tmp <- as.numeric(0.5 * diff %*% V %*% t(diff)) out <- 1 + tmp - sum(log.fy, na.rm = TRUE) } else { diff <- t(x) - mu.i V <- VETAx.inv[[g]] tmp <- as.numeric(0.5 * diff %*% V %*% t(diff)) out <- tmp - sum(log.fy, na.rm = TRUE) } out } FS <- vector("list", length = lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { nfac <- ncol(VETAx[[g]]) nfac2 <- nfac if (length(lavmodel@ov.y.dummy.lv.idx[[g]]) > 0L) { nfac2 <- nfac2 + length(lavmodel@ov.y.dummy.lv.idx[[g]]) } FS[[g]] <- matrix(as.numeric(NA), nrow(data.obs[[g]]), nfac2) # special case: no regular lv's if (nfac == 0) { # impute dummy ov.y (if any) FS[[g]][, lavmodel@ov.y.dummy.ov.idx[[g]]] <- data.obs[[g]][, lavmodel@ov.y.dummy.ov.idx[[g]], drop = FALSE] next } ## FIXME: factor scores not identical (but close) to Mplus # if delta elements not equal to 1?? mm.in.group <- 1:lavmodel@nmat[g] + cumsum(c(0, lavmodel@nmat))[g] MLIST <- lavmodel@GLIST[mm.in.group] # check for negative values neg.var.idx <- which(diag(THETA[[g]]) < 0) if (length(neg.var.idx) > 0) { lav_msg_warn( gettext("factor scores could not be computed due to at least one negative (residual) variance")) next } # common values theta.sd <- sqrt(diag(THETA[[g]])) th <- TH[[g]] th.idx <- lavmodel@th.idx[[g]] # casewise for now N <- nrow(data.obs[[g]]) for (i in 1:N) { # eXo? if (!is.null(eXo[[g]])) { x.i <- eXo[[g]][i, , drop = FALSE] } else { x.i <- NULL } mu.i <- EETAx[[g]][i, , drop = FALSE] y.i <- data.obs[[g]][i, , drop = FALSE] ### DEBUG ONLY: # cat("i = ", i, "mu.i = ", mu.i, "\n") START <- numeric(nfac) # initial values for eta if (!all(is.na(y.i))) { # find best values for eta.i if (optim.method == "nlminb") { out <- nlminb( start = START, objective = f.eta.i, gradient = NULL, # for now control = list(rel.tol = 1e-8), y.i = y.i, x.i = x.i, mu.i = mu.i ) } else if (optim.method == "bfgs") { out <- optim( par = START, fn = f.eta.i, gr = NULL, control = list(reltol = 1e-8, fnscale = 1.1), method = "BFGS", y.i = y.i, x.i = x.i, mu.i = mu.i ) } if (out$convergence == 0L) { eta.i <- out$par } else { eta.i <- rep(as.numeric(NA), nfac) } } else { eta.i <- rep(as.numeric(NA), nfac) } # add dummy ov.y lv values if (length(lavmodel@ov.y.dummy.lv.idx[[g]]) > 0L) { eta.i <- c(eta.i, data.obs[[g]][i, lavmodel@ov.y.dummy.ov.idx[[g]], drop = FALSE ]) } FS[[g]][i, ] <- eta.i } } FS } # predicted value for response y*_i, conditional on the predicted latent # variable scores # `measurement part': # y*_i = nu + lambda eta_i + K x_i + epsilon_i # # where eta_i = latent variable value for i (either given or from predict) # # Two types: 1) nrow(ETA) = nrow(X) (factor scores) # 2) nrow(ETA) = 1L (given values) # # in both cases, we return [nobs x nvar] matrix per group lav_predict_yhat <- function(lavobject = NULL, # for convience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, # new data data.obs = NULL, eXo = NULL, # ETA values ETA = NULL, # options method = "EBM", duplicate = FALSE, optim.method = "bfgs", fsm = FALSE, resid.flag = FALSE) { # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), !is.null(lavsamplestats), !is.null(lavimplied) ) } # new data? if (is.null(data.obs)) { data.obs <- lavdata@X } if (is.null(eXo)) { eXo <- lavdata@eXo } # do we get values for ETA? If not, use `predict' to get plausible values if (is.null(ETA)) { ETA <- lav_predict_eta( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, method = method, optim.method = optim.method, fsm = fsm ) FSM <- attr(ETA, "fsm") } else { # matrix if (is.matrix(ETA)) { # user-specified? if (nrow(ETA) == 1L) { tmp <- matrix(ETA, lavsamplestats@ntotal, length(ETA), byrow = TRUE ) } else if (nrow(ETA) != lavsamplestats@ntotal) { lav_msg_stop(gettext("nrow(ETA) != lavsamplestats@ntotal")) } else { tmp <- ETA } ETA <- lapply(1:lavdata@ngroups, function(i) tmp[lavdata@case.idx[[i]], ]) # vector: just 1 row of factor-scores } else if (is.numeric(ETA)) { # convert to matrix tmp <- matrix(ETA, lavsamplestats@ntotal, length(ETA), byrow = TRUE) ETA <- lapply(1:lavdata@ngroups, function(i) tmp[lavdata@case.idx[[i]], ]) } else if (is.list(ETA)) { stopifnot(lavdata@ngroups == length(ETA)) } } YHAT <- computeYHAT( lavmodel = lavmodel, GLIST = NULL, lavsamplestats = lavsamplestats, eXo = eXo, nobs = lapply(data.obs, NROW), ETA = ETA, duplicate = duplicate ) # if conditional.x, paste eXo if (lavmodel@categorical && !is.null(eXo)) { YHAT <- lapply(seq_len(lavdata@ngroups), function(g) { ret <- cbind(YHAT[[g]], eXo[[g]]) ret }) } # residuals? compute y - yhat if (resid.flag) { RES <- lapply(seq_len(lavdata@ngroups), function(g) { ret <- data.obs[[g]] - YHAT[[g]] ret }) } else { RES <- YHAT } # fsm? if (fsm) { attr(RES, "fsm") <- FSM } RES } # conditional density y -- assuming independence!! # f(y_i | eta_i, x_i) for EACH item # lav_predict_fy <- function(lavobject = NULL, # for convience # sub objects lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, # new data data.obs = NULL, eXo = NULL, # ETA values ETA = NULL, # options method = "EBM", log. = FALSE, optim.method = "bfgs") { # full object? if (inherits(lavobject, "lavaan")) { lavmodel <- lavobject@Model lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavimplied <- lavobject@implied } else { stopifnot( !is.null(lavmodel), !is.null(lavdata), !is.null(lavsamplestats), !is.null(lavimplied) ) } # new data? if (is.null(data.obs)) { data.obs <- lavdata@X } if (is.null(eXo)) { eXo <- lavdata@eXo } # we need the YHATs (per group) YHAT <- lav_predict_yhat( lavobject = NULL, lavmodel = lavmodel, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, data.obs = data.obs, eXo = eXo, ETA = ETA, method = method, duplicate = FALSE, optim.method = optim.method ) THETA <- computeTHETA(lavmodel = lavmodel) TH <- computeTH(lavmodel = lavmodel, delta = FALSE) FY <- vector("list", length = lavdata@ngroups) for (g in seq_len(lavdata@ngroups)) { FY[[g]] <- lav_predict_fy_internal( X = data.obs[[g]], yhat = YHAT[[g]], TH = TH[[g]], THETA = THETA[[g]], num.idx = lavmodel@num.idx[[g]], th.idx = lavmodel@th.idx[[g]], link = lavmodel@link, log. = log. ) } FY } # single group, internal function lav_predict_fy_internal <- function(X = NULL, yhat = NULL, TH = NULL, THETA = NULL, num.idx = NULL, th.idx = NULL, link = NULL, log. = FALSE) { # shortcuts theta.var <- diag(THETA) # check size YHAT (either 1L or Nobs rows) if (!(nrow(yhat) == 1L || nrow(yhat) == nrow(X))) { lav_msg_stop(gettext("nrow(YHAT[[g]]) not 1L and not nrow(X))")) } FY.group <- matrix(0, nrow(X), ncol(X)) # if(NORMAL) { # if(nrow(yhat) == nrow(X)) { # tmp <- (X - yhat)^2 # } else { # tmp <- sweep(X, MARGIN=2, STATS=yhat, FUN="-")^2 # } # tmp1 <- sweep(tmp, MARGIN=2, theta.var, "/") # tmp2 <- exp( -0.5 * tmp1 ) # tmp3 <- sweep(tmp2, MARGIN=2, sqrt(2*pi*theta.var), "/") # if(log.) { # FY.group <- log(tmp3) # } else { # FY.group <- tmp3 # } # } else { # mixed items ord.idx <- unique(th.idx[th.idx > 0L]) # first, NUMERIC variables if (length(num.idx) > 0L) { for (v in num.idx) { FY.group[, v] <- dnorm(X[, v], # YHAT may change or not per case mean = yhat[, v], sd = sqrt(theta.var[v]), log = log. ) } } # second, ORDERED variables for (v in ord.idx) { th.y <- TH[th.idx == v] TH.Y <- c(-Inf, th.y, Inf) ncat <- length(th.y) + 1L fy <- numeric(ncat) theta.v <- sqrt(theta.var[v]) yhat.v <- yhat[, v] # two cases: yhat.v is a scalar, or has length = nobs fy <- matrix(0, nrow = length(yhat.v), ncol = ncat) # for each category for (k in seq_len(ncat)) { if (link == "probit") { fy[, k] <- pnorm((TH.Y[k + 1] - yhat.v) / theta.v) - pnorm((TH.Y[k] - yhat.v) / theta.v) } else if (link == "logit") { fy[, k] <- plogis((TH.Y[k + 1] - yhat.v) / theta.v) - plogis((TH.Y[k] - yhat.v) / theta.v) } else { lav_msg_stop(gettext("link must be probit or logit")) } } # underflow idx <- which(fy < .Machine$double.eps) if (length(idx) > 0L) { fy[idx] <- .Machine$double.eps } # log? if (log.) { fy <- log(fy) } # case-wise expansion/selection if (length(yhat.v) == 1L) { # expand category probabilities for all observations FY.group[, v] <- fy[1L, X[, v]] } else { # select correct category probability per observation FY.group[, v] <- fy[cbind(seq_len(nrow(fy)), X[, v])] } } # ord FY.group } # conditional density y -- assuming independence!! # f(y_i | eta_i, x_i) # # but for a SINGLE observation y_i (and x_i), for given values of eta_i # lav_predict_fy_eta.i <- function(lavmodel = NULL, lavdata = NULL, lavsamplestats = NULL, y.i = NULL, x.i = NULL, eta.i = NULL, theta.sd = NULL, g = 1L, th = NULL, th.idx = NULL, log = TRUE) { mm.in.group <- 1:lavmodel@nmat[g] + cumsum(c(0, lavmodel@nmat))[g] MLIST <- lavmodel@GLIST[mm.in.group] # linear predictor for all items YHAT <- computeEYetax.LISREL( MLIST = MLIST, eXo = x.i, ETA = eta.i, sample.mean = lavsamplestats@mean[[g]], ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], delta = FALSE ) # P(y_i | eta_i, x_i) for all items if (all(lavdata@ov$type == "numeric")) { # NORMAL case FY <- dnorm(y.i, mean = YHAT, sd = theta.sd, log = log) } else { FY <- numeric(lavmodel@nvar[g]) for (v in seq_len(lavmodel@nvar[g])) { if (lavdata@ov$type[v] == "numeric") { ### FIXME!!! we can do all numeric vars at once!! FY[v] <- dnorm(y.i[v], mean = YHAT[v], sd = theta.sd[v], log = log ) } else if (lavdata@ov$type[v] == "ordered") { # handle missing value if (is.na(y.i[v])) { FY[v] <- as.numeric(NA) } else { th.y <- th[th.idx == v] TH.Y <- c(-Inf, th.y, Inf) k <- y.i[v] p1 <- pnorm((TH.Y[k + 1] - YHAT[v]) / theta.sd[v]) p2 <- pnorm((TH.Y[k] - YHAT[v]) / theta.sd[v]) prob <- (p1 - p2) if (prob < .Machine$double.eps) { prob <- .Machine$double.eps } if (log) { FY[v] <- log(prob) } else { FY[v] <- prob } } } else { lav_msg_stop(gettextf("unknown type: `%1$s' for variable: %2$s", lavdata@ov$type[v], lavdata@ov$name[v]) ) } } } FY } lavaan/R/lav_object_inspect.R0000644000176200001440000031222314627656440015716 0ustar liggesusers# inspect a fitted lavaan object # backward compatibility -- wrapper around lavInspect inspect.lavaan <- function(object, what = "free", ...) { lavInspect.lavaan(object = object, what = what, add.labels = TRUE, add.class = TRUE, drop.list.single.group = TRUE) } # the `tech' version: no labels, full matrices, ... for further processing lavTech.lavaan <- function(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { lavInspect.lavaan(object, what = what, add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } # the `user' version: with defaults for display only lavInspect.lavaan <- function(object, # nolint what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) { # object must inherit from class lavaan stopifnot(inherits(object, "lavaan")) # store partable with pta in object to use cache in called functions object@ParTable <- lav_partable_set_cache(object@ParTable, object@pta) # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } # only a single argument if (length(what) > 1) { lav_msg_stop(gettextf("argument %s cannot have more than one element", "what")) } # be case insensitive what <- tolower(what) #### model matrices, with different contents #### if (what == "free") { lav_object_inspect_modelmatrices(object, what = "free", type = "free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "impute" || what == "imputed") { # just to ease the transition for semTools! object@imputed } else if (what == "partable" || what == "user") { lav_object_inspect_modelmatrices(object, what = "free", type = "partable", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "se" || what == "std.err" || what == "standard.errors") { lav_object_inspect_modelmatrices(object, what = "se", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "se.std" || what == "std.se") { lav_object_inspect_modelmatrices(object, what = "std.se", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "start" || what == "starting.values") { lav_object_inspect_modelmatrices(object, what = "start", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "est" || what == "estimates" || what == "x") { lav_object_inspect_modelmatrices(object, what = "est", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "est.unrotated") { lav_object_inspect_modelmatrices(object, what = "est.unrotated", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "dx.free") { lav_object_inspect_modelmatrices(object, what = "dx.free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "dx.all") { lav_object_inspect_modelmatrices(object, what = "dx.all", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "std" || what == "std.all" || what == "est.std" || what == "std.est" || what == "standardized") { lav_object_inspect_modelmatrices(object, what = "std.all", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "std.lv") { lav_object_inspect_modelmatrices(object, what = "std.lv", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) } else if (what == "std.nox") { lav_object_inspect_modelmatrices(object, what = "std.nox", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) #### parameter table #### } else if (what == "list") { parTable(object) #### bootstrap coef #### } else if (what %in% c("boot", "bootstrap", "boot.coef", "coef.boot")) { lav_object_inspect_boot(object, add.labels = add.labels, add.class = add.class) #### fit indices #### } else if (what == "fit" || what == "fitmeasures" || what == "fit.measures" || what == "fit.indices") { fitMeasures(object) #### baseline model #### } else if (what == "baseline.partable") { out <- as.data.frame(object@baseline$partable, stringsAsFactors = FALSE) if (add.class) { class(out) <- c("lavaan.data.frame", "data.frame") } return(out) } else if (what == "baseline.test") { object@baseline$test #### modification indices #### } else if (what == "mi" || what == "modindices" || what == "modification.indices") { modificationIndices(object) #### sample statistics ##### } else if (what == "obs" || what == "observed" || what == "sampstat" || what == "sampstats" || what == "samplestats" || what == "samp" || what == "sample" || what == "samplestatistics") { # new in 0.6-3: always use h1 = TRUE!!! lav_object_inspect_sampstat(object, h1 = TRUE, std = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "obs.std" || what == "observed.std" || what == "sampstat.std" || what == "sampstats.std" || what == "samplestats.std" || what == "samp.std" || what == "sample.std" || what == "samplestatistics.std") { lav_object_inspect_sampstat(object, h1 = TRUE, std = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "h1" || what == "missing.h1" || what == "sampstat.h1") { lav_object_inspect_sampstat(object, h1 = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### wls.est - wls.obs - wls.v #### } else if (what == "wls.est") { lav_object_inspect_wls_est(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "wls.obs") { lav_object_inspect_wls_obs(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "wls.v") { lav_object_inspect_wls_v(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### data + missingness #### } else if (what == "data") { lav_object_inspect_data(object, add.labels = add.labels, drop.list.single.group = drop.list.single.group) } else if (what == "case.idx") { lav_object_inspect_case_idx(object, drop.list.single.group = drop.list.single.group) } else if (what == "ngroups") { object@Data@ngroups } else if (what == "group") { object@Data@group } else if (what == "cluster") { object@Data@cluster } else if (what == "nlevels") { object@Data@nlevels } else if (what == "nclusters") { lav_object_inspect_cluster_info(object, level = 2L, what = "nclusters", drop.list.single.group = drop.list.single.group) } else if (what == "ncluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "ncluster.size", drop.list.single.group = drop.list.single.group) } else if (what == "cluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.size", drop.list.single.group = drop.list.single.group) } else if (what == "cluster.id") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.id", drop.list.single.group = drop.list.single.group) } else if (what == "cluster.idx") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.idx", drop.list.single.group = drop.list.single.group) } else if (what == "cluster.label") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.label", drop.list.single.group = drop.list.single.group) } else if (what == "cluster.sizes") { lav_object_inspect_cluster_info(object, level = 2L, what = "cluster.sizes", drop.list.single.group = drop.list.single.group) } else if (what == "average.cluster.size") { lav_object_inspect_cluster_info(object, level = 2L, what = "average.cluster.size", drop.list.single.group = drop.list.single.group) } else if (what == "ordered") { object@Data@ordered } else if (what == "group.label") { object@Data@group.label } else if (what == "level.label") { object@Data@level.label } else if (what == "nobs") { unlist(object@Data@nobs) } else if (what == "norig") { unlist(object@Data@norig) } else if (what == "ntotal") { sum(unlist(object@Data@nobs)) } else if (what == "coverage") { lav_object_inspect_missing_coverage(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what %in% c("patterns", "pattern")) { lav_object_inspect_missing_patterns(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "empty.idx") { lav_object_inspect_empty_idx(object, drop.list.single.group = drop.list.single.group) #### rsquare #### } else if (what == "rsquare" || what == "r-square" || what == "r2") { lav_object_inspect_rsquare(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### model-implied sample statistics #### } else if (what == "implied" || what == "fitted" || what == "expected" || what == "exp") { lav_object_inspect_implied(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "resid" || what == "res" || what == "residual" || what == "residuals") { lav_object_inspect_residuals(object, h1 = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cov.lv" || what == "veta") { lav_object_inspect_cov_lv(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cor.lv") { lav_object_inspect_cov_lv(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "mean.lv" || what == "eeta") { lav_object_inspect_mean_lv(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cov.all") { lav_object_inspect_cov_all(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cor.all") { lav_object_inspect_cov_all(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cov.ov" || what == "sigma" || what == "sigma.hat") { lav_object_inspect_cov_ov(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "cor.ov") { lav_object_inspect_cov_ov(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "mean.ov" || what == "mu" || what == "mu.hat") { lav_object_inspect_mean_ov(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "th" || what == "thresholds") { lav_object_inspect_th(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "th.idx") { lav_object_inspect_th_idx(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "vy") { lav_object_inspect_vy(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### specific model matrices? #### } else if (what == "theta" || what == "theta.cov") { lav_object_inspect_theta(object, correlation.metric = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "theta.cor") { lav_object_inspect_theta(object, correlation.metric = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### (squared) Mahalanobis distances #### } else if (what == "mdist2.fs") { lav_object_inspect_mdist2(object, type = "lv", squared = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "mdist2.resid") { lav_object_inspect_mdist2(object, type = "resid", squared = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "mdist.fs") { lav_object_inspect_mdist2(object, type = "lv", squared = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "mdist.resid") { lav_object_inspect_mdist2(object, type = "resid", squared = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### convergence, meanstructure, categorical #### } else if (what == "converged") { object@optim$converged } else if (what == "iterations" || what == "iter" || what == "niter") { object@optim$iterations } else if (what == "meanstructure") { object@Model@meanstructure } else if (what == "categorical") { object@Model@categorical } else if (what == "fixed.x") { object@Model@fixed.x } else if (what == "parameterization") { object@Model@parameterization } else if (what == "npar") { lav_object_inspect_npar(object, type = "free") } else if (what == "coef") { # this breaks simsem and semTools -- 0.6-1 # lav_object_inspect_coef(object, type = "free", # add.labels = add.labels, add.class = add.class) lav_object_inspect_modelmatrices(object, what = "est", type = "free", add.labels = add.labels, add.class = add.class, list.by.group = list.by.group, drop.list.single.group = drop.list.single.group) #### NACOV samplestats #### } else if (what == "gamma") { lav_object_inspect_sampstat_gamma(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### gradient, Hessian, information, first.order, vcov #### } else if (what == "gradient") { lav_object_inspect_gradient(object, add.labels = add.labels, add.class = add.class, logl = FALSE) } else if (what == "gradient.logl") { lav_object_inspect_gradient(object, add.labels = add.labels, add.class = add.class, logl = TRUE) } else if (what == "optim.gradient") { lav_object_inspect_gradient(object, add.labels = add.labels, add.class = add.class, optim = TRUE) } else if (what == "hessian") { lav_object_inspect_hessian(object, add.labels = add.labels, add.class = add.class) } else if (what == "information") { lav_object_inspect_information(object, information = "default", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "information.expected") { lav_object_inspect_information(object, information = "expected", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "information.observed") { lav_object_inspect_information(object, information = "observed", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "information.first.order" || what == "information.firstorder" || what == "first.order") { lav_object_inspect_information(object, information = "first.order", augmented = FALSE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "augmented.information") { lav_object_inspect_information(object, information = "default", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "augmented.information.expected") { lav_object_inspect_information(object, information = "expected", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "augmented.information.observed") { lav_object_inspect_information(object, information = "observed", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "augmented.information.first.order" || what == "augmented.first.order") { lav_object_inspect_information(object, information = "first.order", augmented = TRUE, inverted = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "inverted.information") { lav_object_inspect_information(object, information = "default", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if (what == "inverted.information.expected") { lav_object_inspect_information(object, information = "expected", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if (what == "inverted.information.observed") { lav_object_inspect_information(object, information = "observed", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if (what == "inverted.information.first.order" || what == "inverted.first.order") { lav_object_inspect_information(object, information = "first.order", augmented = TRUE, inverted = TRUE, add.labels = add.labels, add.class = add.class) } else if (what == "h1.information") { lav_object_inspect_h1_information(object, information = "default", h1.information = "default", inverted = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "h1.information.expected") { lav_object_inspect_h1_information(object, information = "expected", h1.information = "default", inverted = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "h1.information.observed") { lav_object_inspect_h1_information(object, information = "observed", h1.information = "default", inverted = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "h1.information.first.order" || what == "h1.information.firstorder" || what == "h1.first.order") { lav_object_inspect_h1_information(object, information = "first.order", h1.information = "default", inverted = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "vcov") { lav_object_inspect_vcov(object, standardized = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "vcov.std.all" || what == "vcov.standardized" || what == "vcov.std") { lav_object_inspect_vcov(object, standardized = TRUE, type = "std.all", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.std.lv") { lav_object_inspect_vcov(object, standardized = TRUE, type = "std.lv", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.std.nox") { lav_object_inspect_vcov(object, standardized = TRUE, type = "std.nox", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def") { lav_object_inspect_vcov_def(object, joint = FALSE, standardized = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.std.all" || what == "vcov.def.standardized" || what == "vcov.def.std") { lav_object_inspect_vcov_def(object, joint = FALSE, standardized = TRUE, type = "std.all", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.std.lv") { lav_object_inspect_vcov_def(object, joint = FALSE, standardized = TRUE, type = "std.lv", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.std.nox") { lav_object_inspect_vcov_def(object, joint = FALSE, standardized = TRUE, type = "std.nox", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.joint") { lav_object_inspect_vcov_def(object, joint = TRUE, standardized = FALSE, add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.joint.std.all" || what == "vcov.def.joint.standardized" || what == "vcov.def.joint.std") { lav_object_inspect_vcov_def(object, joint = TRUE, standardized = TRUE, type = "std.all", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.joint.std.lv") { lav_object_inspect_vcov_def(object, joint = TRUE, standardized = TRUE, type = "std.lv", add.labels = add.labels, add.class = add.class) } else if (what == "vcov.def.joint.std.nox") { lav_object_inspect_vcov_def(object, joint = TRUE, standardized = TRUE, type = "std.nox", add.labels = add.labels, add.class = add.class) } else if (what == "ugamma" || what == "ug" || what == "u.gamma") { lav_object_inspect_UGamma(object, add.labels = add.labels, add.class = add.class) } else if (what == "ufromugamma" || what == "u") { lav_object_inspect_UfromUGamma(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) ### jacobians #### } else if (what == "delta") { lav_object_inspect_delta(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "delta.rownames") { lav_object_inspect_delta_rownames(object, drop.list.single.group = drop.list.single.group) ### casewise loglikehoods ### } else if (what == "loglik.casewise") { lav_object_inspect_loglik_casewise(object, log. = TRUE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "lik.casewise") { lav_object_inspect_loglik_casewise(object, log. = FALSE, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) # multilevel # } else if (what == "icc") { lav_object_inspect_icc(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } else if (what == "ranef") { lav_object_inspect_ranef(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) # post-checking } else if (what == "post.check" || what == "post") { lav_object_post_check(object) # options } else if (what == "options" || what == "lavoptions") { object@Options # version } else if (what == "version") { object@version # call } else if (what == "call") { as.list(object@call) # timing } else if (what == "timing") { object@timing # optim } else if (what == "optim") { object@optim # test } else if (what == "test") { object@test # zero cell tables } else if (what == "zero.cell.tables") { lav_object_inspect_zero_cell_tables(object, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) #### not found #### } else { lav_msg_stop(gettextf( "%1$s argument unknown: %2$s", "what", lav_msg_view(what) )) } } # helper functions (mostly to deal with older 'object' that may have # been saved somewhere) lav_object_inspect_est <- function(object, unrotated = FALSE) { if (inherits(object, "lavaan")) { # from 0.5-19, they are in the partable if (!is.null(object@ParTable$est)) { if (unrotated) { return.value <- object@ParTable$est.unrotated } else { return.value <- object@ParTable$est # if this changes, tag @TDJorgensen in commit message } } else if (.hasSlot(object, "Fit")) { # in < 0.5-19, we should look in @Fit@est return.value <- object@Fit@est } else { partable <- parTable(object) return.value <- rep(as.numeric(NA), length(partable$lhs)) } } else { # try generic coef() return.value <- coef(object, type = "user") if (is.matrix(return.value)) { # lavaanList? return.value <- rowMeans(return.value) } } return.value } lav_object_inspect_se <- function(object) { # from 0.5-19, they are in the partable if (!is.null(object@ParTable$se)) { return.value <- object@ParTable$se } else if (.hasSlot(object, "Fit")) { # in < 0.5-19, we should look in @Fit@se return.value <- object@Fit@se } else { partable <- parTable(object) return.value <- rep(as.numeric(NA), length(partable$lhs)) } return.value } lav_object_inspect_std_se <- function(object) { if (!is.null(object@ParTable$se.std)) { return.value <- object@ParTable$se.std } else { tmp.std <- standardizedSolution(object) return.value <- tmp.std$se } return.value } lav_object_inspect_start <- function(object) { # from 0.5-19, they are in the partable if (!is.null(object@ParTable$start)) { return.value <- object@ParTable$start } else { # in < 0.5-19, we should look in @Fit@start return.value <- object@Fit@start } return.value } lav_object_inspect_boot <- function(object, add.labels = FALSE, add.class = FALSE) { if (object@Options$se != "bootstrap" && !any(c("bootstrap", "bollen.stine") %in% object@Options$test)) { lav_msg_stop(gettext("bootstrap was not used.")) } # from 0.5-19. they are in a separate slot tmp <- try(slot(object, "boot"), silent = TRUE) if (inherits(tmp, "try-error")) { # older version of object? est <- lav_object_inspect_est(object) tmp.boot <- attr(est, "tmp.boot.COEF") } else { # 0.5-19 way tmp.boot <- object@boot$coef } # add coef names if (add.labels) { colnames(tmp.boot) <- names(coef(object)) } # add class if (add.class) { class(tmp.boot) <- c("lavaan.matrix", "matrix") } tmp.boot } lav_object_inspect_modelmatrices <- function(object, what = "free", # nolint type = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) { glist <- object@Model@GLIST current.verbose <- lav_verbose() if (what == "dx.free") { if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose), TRUE) tmp.dx <- lav_model_gradient( lavmodel = object@Model, GLIST = NULL, lavsamplestats = object@SampleStats, lavdata = object@Data, lavcache = object@Cache, type = "free", group.weight = TRUE, ceq.simple = TRUE, Delta = NULL) } else if (what == "dx.all") { if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose), TRUE) glist <- lav_model_gradient(lavmodel = object@Model, GLIST = NULL, lavsamplestats = object@SampleStats, lavdata = object@Data, lavcache = object@Cache, type = "allofthem", group.weight = TRUE, ceq.simple = FALSE, Delta = NULL) names(glist) <- names(object@Model@GLIST) } else if (what == "std.all") { tmp.std <- lav_standardize_all(object) } else if (what == "std.lv") { tmp.std <- lav_standardize_lv(object) } else if (what == "std.nox") { tmp.std <- lav_standardize_all_nox(object) } else if (what == "se") { tmp.se <- lav_object_inspect_se(object) } else if (what == "std.se") { tmp.se <- lav_object_inspect_std_se(object) } else if (what == "start") { tmp.start <- lav_object_inspect_start(object) } else if (what == "est") { tmp.est <- lav_object_inspect_est(object) } else if (what == "est.unrotated") { if (!is.null(object@Options$rotation) && object@Options$rotation == "none") { tmp.est <- lav_object_inspect_est(object, unrotated = FALSE) } else { tmp.est <- lav_object_inspect_est(object, unrotated = TRUE) } } for (mm in seq_along(glist)) { if (add.labels) { dimnames(glist[[mm]]) <- object@Model@dimNames[[mm]] } if (what == "free") { # fill in free parameter counts if (type == "free") { m.el.idx <- object@Model@m.free.idx[[mm]] x.el.idx <- object@Model@x.free.idx[[mm]] # } else if(type == "unco") { # m.el.idx <- object@Model@m.unco.idx[[mm]] # x.el.idx <- object@Model@x.unco.idx[[mm]] } else if (type == "partable") { m.el.idx <- object@Model@m.user.idx[[mm]] x.el.idx <- object@Model@x.user.idx[[mm]] } else { lav_msg_stop(gettextf( "%1$s argument unknown: %2$s", "type", lav_msg_view(type) )) } # erase everything glist[[mm]][, ] <- 0.0 glist[[mm]][m.el.idx] <- x.el.idx } else if (what == "se" || what == "std.se") { # fill in standard errors m.user.idx <- object@Model@m.user.idx[[mm]] x.user.idx <- object@Model@x.user.idx[[mm]] # erase everything glist[[mm]][, ] <- 0.0 glist[[mm]][m.user.idx] <- tmp.se[x.user.idx] } else if (what == "start") { # fill in starting values m.user.idx <- object@Model@m.user.idx[[mm]] x.user.idx <- object@Model@x.user.idx[[mm]] glist[[mm]][m.user.idx] <- tmp.start[x.user.idx] } else if (what %in% c("est", "est.unrotated")) { # fill in estimated parameter values m.user.idx <- object@Model@m.user.idx[[mm]] x.user.idx <- object@Model@x.user.idx[[mm]] glist[[mm]][m.user.idx] <- tmp.est[x.user.idx] } else if (what == "dx.free") { # fill in derivatives free parameters m.el.idx <- object@Model@m.free.idx[[mm]] x.el.idx <- object@Model@x.free.idx[[mm]] # erase everything glist[[mm]][, ] <- 0.0 glist[[mm]][m.el.idx] <- tmp.dx[x.el.idx] } else if (what %in% c("std.all", "std.lv", "std.nox")) { m.user.idx <- object@Model@m.user.idx[[mm]] x.user.idx <- object@Model@x.user.idx[[mm]] glist[[mm]][m.user.idx] <- tmp.std[x.user.idx] } # class if (add.class) { if (object@Model@isSymmetric[mm]) { class(glist[[mm]]) <- c("lavaan.matrix.symmetric", "matrix") } else { class(glist[[mm]]) <- c("lavaan.matrix", "matrix") } } } # try to reflect `equality constraints' con.flag <- FALSE if (what == "free" && object@Model@eq.constraints) { # extract constraints from parameter table partable <- parTable(object) tmp.con <- partable[partable$op %in% c("==", "<", ">"), c("lhs", "op", "rhs")] rownames(tmp.con) <- NULL # replace 'labels' by parameter numbers tmp.id <- lav_partable_constraints_label_id(partable) tmp.label <- names(tmp.id) for (con in seq_len(nrow(tmp.con))) { # lhs lhs.labels <- all.vars(as.formula(paste("~", tmp.con[con, "lhs"]))) if (length(lhs.labels) > 0L) { # par id lhs.freeid <- tmp.id[match(lhs.labels, tmp.label)] # substitute tmp <- tmp.con[con, "lhs"] for (pat in seq_along(lhs.labels)) { tmp <- sub(lhs.labels[pat], lhs.freeid[pat], tmp) } tmp.con[con, "lhs"] <- tmp } # rhs rhs.labels <- all.vars(as.formula(paste("~", tmp.con[con, "rhs"]))) if (length(rhs.labels) > 0L) { # par id rhs.freeid <- tmp.id[match(rhs.labels, tmp.label)] # substitute tmp <- tmp.con[con, "rhs"] for (pat in seq_along(rhs.labels)) { tmp <- sub(rhs.labels[pat], rhs.freeid[pat], tmp) } tmp.con[con, "rhs"] <- tmp } } # con # add this info at the top # glist <- c(constraints = list(tmp.con), glist) # no, not a good idea, it does not work with list.by.group # add it as a 'header' attribute? attr(tmp.con, "header") <- "Note: model contains equality constraints:" con.flag <- TRUE } # should we group them per block? if (list.by.group) { lavmodel <- object@Model nmat <- lavmodel@nmat return.value <- vector("list", length = lavmodel@nblocks) for (b in seq_len(lavmodel@nblocks)) { # which mm belong to this block? mm.in.group <- 1:nmat[b] + cumsum(c(0, nmat))[b] return.value[[b]] <- glist[mm.in.group] } if (lavmodel@nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (lavmodel@nblocks > 1L) { names(return.value) <- object@Data@block.label } } else { return.value <- glist } # header if (con.flag) { attr(return.value, "header") <- tmp.con } # lavaan.list if (add.class) { class(return.value) <- c("lavaan.list", "list") } return.value } # - fixme, should we export this function? # - since 0.5-21, conditional.x = TRUE returns residual sample statistics # for ML, we have both joint and residual cov/var/...; but for # categorical = TRUE, we only have residual cov/var...; so, we # only return residual in both cases, whenever residual # - since 0.6-3, we always extract the values from the @h1 slot (if present) # if meanstructure = FALSE, do NOT include $mean elements any longer lav_object_inspect_sampstat <- function(object, h1 = TRUE, # nolint std = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } nblocks <- object@Model@nblocks ov.names <- object@pta$vnames$ov ov.names.res <- object@pta$vnames$ov.nox ov.names.x <- object@pta$vnames$ov.x # slots lavsamplestats <- object@SampleStats lavmodel <- object@Model # if nlevels, override h1 to be TRUE, and set conditional.x = FALSE if (object@Data@nlevels > 1L) { h1 <- TRUE conditional.x <- FALSE # for now (0.6-12) } else { conditional.x <- lavmodel@conditional.x } # check if we have a non-empty @h1 slot if (!.hasSlot(object, "h1")) { h1 <- FALSE } else if (length(object@h1) == 0L) { h1 <- FALSE } else { h1.implied <- object@h1$implied } # if h1 = FALSE and nlevels > 1L, nothing can show... if (!h1 && object@Data@nlevels > 1L) { lav_msg_stop(gettext( "sample statistics not available; refit with option h1 = TRUE")) } return.value <- vector("list", length = nblocks) for (b in seq_len(nblocks)) { if (!conditional.x) { # covariance matrix if (h1) { return.value[[b]]$cov <- h1.implied$cov[[b]] } else { return.value[[b]]$cov <- lavsamplestats@cov[[b]] } if (std) { diag.orig <- diag(return.value[[b]]$cov) return.value[[b]]$cov <- cov2cor(return.value[[b]]$cov) } if (add.labels && !is.null(return.value[[b]]$cov)) { rownames(return.value[[b]]$cov) <- colnames(return.value[[b]]$cov) <- ov.names[[b]] } if (add.class) { class(return.value[[b]]$cov) <- c("lavaan.matrix.symmetric", "matrix") } # mean vector if (lavmodel@meanstructure) { if (h1) { return.value[[b]]$mean <- as.numeric(h1.implied$mean[[b]]) } else { return.value[[b]]$mean <- as.numeric(lavsamplestats@mean[[b]]) } if (std) { diag.orig[diag.orig < .Machine$double.eps] <- NA return.value[[b]]$mean <- return.value[[b]]$mean / sqrt(diag.orig) } if (add.labels) { names(return.value[[b]]$mean) <- ov.names[[b]] } if (add.class) { class(return.value[[b]]$mean) <- c("lavaan.vector", "numeric") } } # thresholds if (lavmodel@categorical) { if (h1) { return.value[[b]]$th <- as.numeric(h1.implied$th[[b]]) } else { return.value[[b]]$th <- as.numeric(lavsamplestats@th[[b]]) } if (length(lavmodel@num.idx[[b]]) > 0L) { num.idx <- which(lavmodel@th.idx[[b]] == 0) return.value[[b]]$th <- return.value[[b]]$th[-num.idx] } # FIXME: what to do if std = TRUE (depends on delta/theta) if (add.labels) { names(return.value[[b]]$th) <- object@pta$vnames$th[[b]] } if (add.class) { class(return.value[[b]]$th) <- c("lavaan.vector", "numeric") } } # !conditional.x } else { # if conditional.x = TRUE # residual covariance matrix if (h1) { return.value[[b]]$res.cov <- h1.implied$res.cov[[b]] } else { return.value[[b]]$res.cov <- lavsamplestats@res.cov[[b]] } if (std) { diag.orig <- diag(return.value[[b]]$res.cov) return.value[[b]]$res.cov <- cov2cor(return.value[[b]]$res.cov) } if (add.labels) { rownames(return.value[[b]]$res.cov) <- colnames(return.value[[b]]$res.cov) <- ov.names.res[[b]] } if (add.class) { class(return.value[[b]]$res.cov) <- c("lavaan.matrix.symmetric", "matrix") } # intercepts if (lavmodel@meanstructure) { if (h1) { return.value[[b]]$res.int <- as.numeric(h1.implied$res.int[[b]]) } else { return.value[[b]]$res.int <- as.numeric(lavsamplestats@res.int[[b]]) } if (std) { diag.orig[diag.orig < .Machine$double.eps] <- NA return.value[[b]]$res.int <- return.value[[b]]$res.int / sqrt(diag.orig) } if (add.labels) { names(return.value[[b]]$res.int) <- ov.names.res[[b]] } if (add.class) { class(return.value[[b]]$res.int) <- c("lavaan.vector", "numeric") } } # thresholds if (lavmodel@categorical) { if (h1) { return.value[[b]]$res.th <- as.numeric(h1.implied$res.th[[b]]) } else { return.value[[b]]$res.th <- as.numeric(lavsamplestats@res.th[[b]]) } if (length(lavmodel@num.idx[[b]]) > 0L) { num.idx <- which(lavmodel@th.idx[[b]] == 0) return.value[[b]]$res.th <- return.value[[b]]$res.th[-num.idx] } # FIXME: if std: what to do? if (add.labels) { names(return.value[[b]]$res.th) <- object@pta$vnames$th[[b]] } if (add.class) { class(return.value[[b]]$res.th) <- c("lavaan.vector", "numeric") } } # slopes if (lavmodel@nexo[b] > 0L) { if (h1) { return.value[[b]]$res.slopes <- h1.implied$res.slopes[[b]] } else { return.value[[b]]$res.slopes <- lavsamplestats@res.slopes[[b]] } # FIXME: if std: what to do? (here: b.z = b * s.x /s.y) if (std) { tmp.y <- matrix(sqrt(diag.orig), nrow(return.value[[b]]$res.slopes), ncol(return.value[[b]]$res.slopes)) tmp.x <- matrix(sqrt(diag(lavsamplestats@cov.x[[b]])), nrow(return.value[[b]]$res.slopes), ncol(return.value[[b]]$res.slopes), byrow = TRUE) return.value[[b]]$res.slopes <- return.value[[b]]$res.slopes / tmp.y * tmp.x } if (add.labels) { rownames(return.value[[b]]$res.slopes) <- ov.names.res[[b]] colnames(return.value[[b]]$res.slopes) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$res.slopes) <- c("lavaan.matrix", "matrix") } } # cov.x if (lavmodel@nexo[b] > 0L) { return.value[[b]]$cov.x <- lavsamplestats@cov.x[[b]] if (std) { diag.orig <- diag(return.value[[b]]$cov.x) return.value[[b]]$cov.x <- cov2cor(return.value[[b]]$cov.x) } if (add.labels) { rownames(return.value[[b]]$cov.x) <- ov.names.x[[b]] colnames(return.value[[b]]$cov.x) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$cov.x) <- c("lavaan.matrix.symmetric", "matrix") } } # mean.x if (lavmodel@nexo[b] > 0L) { return.value[[b]]$mean.x <- as.numeric(object@SampleStats@mean.x[[b]]) if (std) { diag.orig[diag.orig < .Machine$double.eps] <- NA return.value[[b]]$mean.x <- return.value[[b]]$mean.x / sqrt(diag.orig) } if (add.labels) { names(return.value[[b]]$mean.x) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$mean.x) <- c("lavaan.vector", "numeric") } } } # conditional.x # stochastic weights if (lavmodel@group.w.free) { # to be consistent with the 'implied' values, # transform so group.w is the 'log(group.freq)' return.value[[b]]$group.w <- log(lavsamplestats@group.w[[b]] * lavsamplestats@ntotal) if (add.labels) { names(return.value[[b]]$group.w) <- "w" } if (add.class) { class(return.value[[b]]$group.w) <- c("lavaan.vector", "numeric") } } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_data <- function(object, add.labels = FALSE, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups if (object@Model@conditional.x) { return.value <- vector("list", length = n.g) for (g in 1:n.g) { return.value[[g]] <- cbind(object@Data@X[[g]], object@Data@eXo[[g]]) } } else { return.value <- object@Data@X } if (add.labels) { for (g in 1:n.g) { if (object@Model@conditional.x) { colnames(return.value[[g]]) <- c(object@Data@ov.names[[g]], object@Data@ov.names.x[[g]]) } else { colnames(return.value[[g]]) <- object@Data@ov.names[[g]] } } } if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lav_object_inspect_case_idx <- function(object, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups return.value <- object@Data@case.idx if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } # lav_object_inspect_case_idx <- function(object, level = 1L, # drop.list.single.group = FALSE) { # #FIXME: if lavaan ever allows 3-level or cross-classifed models, # # "level=" should be a character indicating the clustering variable # # n.g <- object@Data@ngroups # nlevels <- object@Data@nlevels # if (nlevels == 1L) level <- 1L # # if what="cluster.idx" for single-level model # # if (level == 2L) { # # level-2 (cluster) IDs # return.value <- lapply(object@Data@Lp, function(gg) # gg$cluster.id[[2]][ gg$cluster.idx[[2]] ]) # #FIXME: update if lavaan ever accepts 3-level or # cross-classified models # # } else return.value <- object@Data@case.idx # level-1 (casewise) IDs # # if(n.g == 1L && drop.list.single.group) { # return.value <- return.value[[1]] # } else { # if(length(object@Data@group.label) > 0L) { # names(return.value) <- unlist(object@Data@group.label) # } # } # return.value # } # # cluster info lav_object_inspect_cluster_info <- function( # nolint object, what = "cluster.size", level = 2L, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups nlevels <- object@Data@nlevels # just in case we have no clusters if (nlevels == 1L) { if (what %in% c("nclusters", "ncluster.size", "cluster.id")) { return.value <- as.list(rep(1L, n.g)) } else if (what %in% c("cluster.size", "cluster.sizes")) { return.value <- object@Data@nobs } else if (what %in% c("cluster.idx", "cluster.label")) { # everybody belongs to cluster 1 return.value <- lapply(seq_len(n.g), function(gg) rep(1L, object@Data@nobs[[gg]])) } } # if we do have clusters if (nlevels > 1L) { return.value <- vector("list", length = n.g) for (g in seq_len(n.g)) { tmp.lp <- object@Data@Lp[[g]] if (what == "nclusters") { return.value[[g]] <- tmp.lp$nclusters[[level]] } else if (what == "ncluster.size") { return.value[[g]] <- tmp.lp$ncluster.size[[level]] } else if (what == "cluster.size") { return.value[[g]] <- tmp.lp$cluster.size[[level]] } else if (what == "cluster.id") { return.value[[g]] <- tmp.lp$cluster.id[[level]] } else if (what == "cluster.idx") { return.value[[g]] <- tmp.lp$cluster.idx[[level]] } else if (what == "cluster.label") { return.value[[g]] <- tmp.lp$cluster.id[[level]][tmp.lp$cluster.idx[[level]]] } else if (what == "cluster.sizes") { return.value[[g]] <- tmp.lp$cluster.sizes[[level]] } else if (what == "average.cluster.size") { nn.g <- object@Data@nobs[[g]] cluster.size <- tmp.lp$cluster.size[[level]] nclusters <- tmp.lp$nclusters[[level]] return.value[[g]] <- (nn.g^2 - sum(cluster.size^2)) / (nn.g * (nclusters - 1L)) } } # g } # nlevels > 1L if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } # count the number of clusters, or obtain tmp.n within each cluster # lav_object_inspect_ncluster <- function(object, sizes = FALSE, #level = 2L, # drop.list.single.group = FALSE) { # n.g <- object@Data@ngroups # nlevels <- object@Data@nlevels # # if (nlevels == 1L) { # # single-level model, return sample size(s) or count 1 cluster per group # return.value <- if (sizes) unlist(object@Data@nobs) else rep(1L, n.g) # # } else if (sizes) { # # for each group, a vector of cluster sizes # return.value <- lapply(object@Data@Lp, function(gg) gg$cluster.size[[2]]) # #FIXME: update if lavaan ever accepts 3-level or cross-classified models # # if (n.g == 1L && drop.list.single.group) # return.value <- return.value[[1]] # # } else { # # number of clusters in each group # return.value <- sapply(object@Data@Lp, function(gg) gg$nclusters[[2]]) # #FIXME: update if lavaan ever accepts 3-level or cross-classified models # } # # # assign group names, if applicable # if (n.g > 1L) names(return.value) <- unlist(object@Data@group.label) # return.value # } lav_object_inspect_rsquare <- function(object, est.std.all = NULL, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { nblocks <- object@Model@nblocks return.value <- vector("list", length = nblocks) if (is.null(est.std.all)) { est.std.all <- lav_standardize_all(object) } partable <- object@ParTable partable$rsquare <- 1.0 - est.std.all # no values > 1.0 partable$rsquare[partable$rsquare > 1.0] <- as.numeric(NA) for (b in seq_len(nblocks)) { ind.names <- partable$rhs[which(partable$op == "=~" & partable$block == b)] eqs.y.names <- partable$lhs[which(partable$op == "~" & partable$block == b)] y.names <- unique(c(ind.names, eqs.y.names)) idx <- which(partable$op == "~~" & partable$lhs %in% y.names & partable$rhs == partable$lhs & partable$block == b) tmp <- partable$rsquare[idx] if (add.labels && length(tmp) > 0L) { names(tmp) <- partable$lhs[idx] } if (add.class) { class(tmp) <- c("lavaan.vector", "numeric") } return.value[[b]] <- tmp } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } # model implied sample stats lav_object_inspect_implied <- function(object, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } nblocks <- object@Model@nblocks ov.names <- object@pta$vnames$ov ov.names.res <- object@pta$vnames$ov.nox ov.names.x <- object@pta$vnames$ov.x # slots lavimplied <- object@implied lavmodel <- object@Model # if nlevels, always set conditional.x = FALSE if (object@Data@nlevels > 1L) { lavimplied <- lav_model_implied_cond2uncond(lavimplied) conditional.x <- FALSE # for now (0.6-12) } else { conditional.x <- lavmodel@conditional.x } return.value <- vector("list", length = nblocks) for (b in seq_len(nblocks)) { if (!conditional.x) { # covariance matrix return.value[[b]]$cov <- lavimplied$cov[[b]] if (add.labels && !is.null(return.value[[b]]$cov)) { rownames(return.value[[b]]$cov) <- colnames(return.value[[b]]$cov) <- ov.names[[b]] } if (add.class) { class(return.value[[b]]$cov) <- c("lavaan.matrix.symmetric", "matrix") } # mean vector if (lavmodel@meanstructure) { return.value[[b]]$mean <- as.numeric(lavimplied$mean[[b]]) if (add.labels) { names(return.value[[b]]$mean) <- ov.names[[b]] } if (add.class) { class(return.value[[b]]$mean) <- c("lavaan.vector", "numeric") } } # thresholds if (lavmodel@categorical) { return.value[[b]]$th <- as.numeric(lavimplied$th[[b]]) if (length(object@Model@num.idx[[b]]) > 0L) { num.idx <- which(object@Model@th.idx[[b]] == 0) return.value[[b]]$th <- return.value[[b]]$th[-num.idx] } if (add.labels) { names(return.value[[b]]$th) <- object@pta$vnames$th[[b]] } if (add.class) { class(return.value[[b]]$th) <- c("lavaan.vector", "numeric") } } # !conditional.x } else { # if conditional.x = TRUE # residual covariance matrix return.value[[b]]$res.cov <- lavimplied$res.cov[[b]] if (add.labels) { rownames(return.value[[b]]$res.cov) <- colnames(return.value[[b]]$res.cov) <- ov.names.res[[b]] } if (add.class) { class(return.value[[b]]$res.cov) <- c("lavaan.matrix.symmetric", "matrix") } # intercepts if (lavmodel@meanstructure) { return.value[[b]]$res.int <- as.numeric(lavimplied$res.int[[b]]) if (add.labels) { names(return.value[[b]]$res.int) <- ov.names.res[[b]] } if (add.class) { class(return.value[[b]]$res.int) <- c("lavaan.vector", "numeric") } } # thresholds if (lavmodel@categorical) { return.value[[b]]$res.th <- as.numeric(lavimplied$res.th[[b]]) if (length(object@Model@num.idx[[b]]) > 0L) { num.idx <- which(object@Model@th.idx[[b]] == 0) return.value[[b]]$res.th <- return.value[[b]]$res.th[-num.idx] } if (add.labels) { names(return.value[[b]]$res.th) <- object@pta$vnames$th[[b]] } if (add.class) { class(return.value[[b]]$res.th) <- c("lavaan.vector", "numeric") } } # slopes if (lavmodel@nexo[b] > 0L) { return.value[[b]]$res.slopes <- lavimplied$res.slopes[[b]] if (add.labels) { rownames(return.value[[b]]$res.slopes) <- ov.names.res[[b]] colnames(return.value[[b]]$res.slopes) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$res.slopes) <- c("lavaan.matrix", "matrix") } } # cov.x if (lavmodel@nexo[b] > 0L) { return.value[[b]]$cov.x <- lavimplied$cov.x[[b]] if (add.labels) { rownames(return.value[[b]]$cov.x) <- ov.names.x[[b]] colnames(return.value[[b]]$cov.x) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$cov.x) <- c("lavaan.matrix.symmetric", "matrix") } } # mean.x if (lavmodel@nexo[b] > 0L) { return.value[[b]]$mean.x <- as.numeric(lavimplied$mean.x[[b]]) if (add.labels) { names(return.value[[b]]$mean.x) <- ov.names.x[[b]] } if (add.class) { class(return.value[[b]]$mean.x) <- c("lavaan.vector", "numeric") } } } # conditional.x # stochastic weights if (lavmodel@group.w.free) { return.value[[b]]$group.w <- lavimplied$group.w[[b]] if (add.labels) { names(return.value[[b]]$group.w) <- "w" # somewhat redundant } if (add.class) { class(return.value[[b]]$group.w) <- c("lavaan.vector", "numeric") } } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } # residuals: _inspect_sampstat - _inspect_implied lav_object_inspect_residuals <- function(object, h1 = TRUE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lav_residuals(object, type = "raw", h1 = h1, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) } lav_object_inspect_cov_lv <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # compute lv covar return.value <- computeVETA(lavmodel = object@Model, remove.dummy.lv = TRUE) # nblocks nblocks <- length(return.value) # cor + labels + class for (b in seq_len(nblocks)) { if (correlation.metric && nrow(return.value[[b]]) > 1L) { # note: cov2cor fails if matrix is empty! return.value[[b]] <- cov2cor(return.value[[b]]) } if (add.labels) { colnames(return.value[[b]]) <- rownames(return.value[[b]]) <- object@pta$vnames$lv[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.matrix.symmetric", "matrix") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_mean_lv <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # compute lv means return.value <- computeEETA(lavmodel = object@Model, lavsamplestats = object@SampleStats, remove.dummy.lv = TRUE) # nblocks nblocks <- length(return.value) # ensure numeric return.value <- lapply(return.value, as.numeric) # labels + class for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L) { names(return.value[[b]]) <- object@pta$vnames$lv[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_cov_all <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # compute extended model implied covariance matrix (both ov and lv) return.value <- computeCOV(lavmodel = object@Model, remove.dummy.lv = TRUE) # nblocks nblocks <- length(return.value) # cor + labels + class for (b in seq_len(nblocks)) { if (correlation.metric && nrow(return.value[[b]]) > 1L) { # note: cov2cor fails if matrix is empty! return.value[[b]] <- cov2cor(return.value[[b]]) } if (add.labels) { tmp.names <- c(object@pta$vnames$ov.model[[b]], object@pta$vnames$lv[[b]]) colnames(return.value[[b]]) <- rownames(return.value[[b]]) <- tmp.names } if (add.class) { class(return.value[[b]]) <- c("lavaan.matrix.symmetric", "matrix") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_cov_ov <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # get model-implied covariance matrix observed if (object@Model@conditional.x) { return.value <- object@implied$res.cov } else { return.value <- object@implied$cov } # nblocks nblocks <- length(return.value) # cor + labels + class for (b in seq_len(nblocks)) { if (correlation.metric && nrow(return.value[[b]]) > 1L) { # note: cov2cor fails if matrix is empty! return.value[[b]] <- cov2cor(return.value[[b]]) } if (add.labels) { colnames(return.value[[b]]) <- rownames(return.value[[b]]) <- object@pta$vnames$ov.model[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.matrix.symmetric", "matrix") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_mean_ov <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # compute ov means if (object@Model@conditional.x) { return.value <- object@implied$res.int } else { return.value <- object@implied$mean } # nblocks nblocks <- length(return.value) # make numeric return.value <- lapply(return.value, as.numeric) # labels + class for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L) { names(return.value[[b]]) <- object@pta$vnames$ov.model[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_th <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # thresholds if (object@Model@conditional.x) { return.value <- object@implied$res.th } else { return.value <- object@implied$th } # nblocks nblocks <- length(return.value) # make numeric return.value <- lapply(return.value, as.numeric) # labels + class for (b in seq_len(nblocks)) { if (length(object@Model@num.idx[[b]]) > 0L) { num.idx <- which(object@Model@th.idx[[b]] == 0) return.value[[b]] <- return.value[[b]][-num.idx] } if (add.labels && length(return.value[[b]]) > 0L) { names(return.value[[b]]) <- object@pta$vnames$th[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_th_idx <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # thresholds idx return.value <- object@SampleStats@th.idx # nblocks nblocks <- length(return.value) # labels + class for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L) { names(return.value[[b]]) <- object@SampleStats@th.names[[b]] } if (add.class && !is.null(return.value[[b]])) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_vy <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # 'unconditional' model-implied variances # - same as diag(Sigma.hat) if all Y are continuous) # - 1.0 (or delta^2) if categorical # - if also Gamma, cov.x is used (only if categorical) return.value <- computeVY(lavmodel = object@Model, GLIST = NULL, diagonal.only = TRUE) # nblocks nblocks <- length(return.value) # labels + class for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L) { if (object@Model@categorical) { names(return.value[[b]]) <- object@pta$vnames$ov.nox[[b]] } else { names(return.value[[b]]) <- object@pta$vnames$ov[[b]] } } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_theta <- function(object, correlation.metric = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # get residual covariances return.value <- computeTHETA(lavmodel = object@Model) # nblocks nblocks <- length(return.value) # labels + class for (b in seq_len(nblocks)) { if (correlation.metric && nrow(return.value[[b]]) > 0L) { if (all(return.value[[b]] == 0)) { return.value[[b]] <- return.value[[b]] } else { return.value[[b]] <- cov2cor(return.value[[b]]) } } if (add.labels && length(return.value[[b]]) > 0L) { colnames(return.value[[b]]) <- rownames(return.value[[b]]) <- object@pta$vnames$ov.model[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.matrix.symmetric", "matrix") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_missing_coverage <- function(object, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups return.value <- vector("list", n.g) for (g in 1:n.g) { if (!is.null(object@Data@Mp[[g]])) { return.value[[g]] <- object@Data@Mp[[g]]$coverage } else { nvar <- length(object@Data@ov.names[[g]]) return.value[[g]] <- matrix(1.0, nvar, nvar) } if (add.labels && length(return.value[[g]]) > 0L) { colnames(return.value[[g]]) <- rownames(return.value[[g]]) <- object@pta$vnames$ov.model[[g]] } if (add.class) { class(return.value[[g]]) <- c("lavaan.matrix.symmetric", "matrix") } } if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lav_object_inspect_missing_patterns <- function(object, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups return.value <- vector("list", n.g) for (g in 1:n.g) { if (!is.null(object@Data@Mp[[g]])) { return.value[[g]] <- object@Data@Mp[[g]]$pat } else { nvar <- length(object@Data@ov.names[[g]]) return.value[[g]] <- matrix(TRUE, 1L, nvar) rownames(return.value[[g]]) <- object@Data@nobs[[g]] } if (add.labels && length(return.value[[g]]) > 0L) { colnames(return.value[[g]]) <- object@pta$vnames$ov.model[[g]] } if (add.class) { class(return.value[[g]]) <- c("lavaan.matrix", "matrix") } } if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lav_object_inspect_empty_idx <- function(object, drop.list.single.group = FALSE) { n.g <- object@Data@ngroups # get empty idx return.value <- vector("list", n.g) for (g in 1:n.g) { if (!is.null(object@Data@Mp[[g]])) { return.value[[g]] <- object@Data@Mp[[g]]$empty.idx } else { return.value[[g]] <- integer(0L) } } if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lav_object_inspect_wls_est <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } return.value <- lav_model_wls_est(object@Model) if (add.labels) { tmp.names <- lav_object_inspect_delta_rownames(object, drop.list.single.group = FALSE) } # nblocks nblocks <- length(return.value) for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L && object@Data@nlevels == 1L) { names(return.value[[b]]) <- tmp.names[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_wls_obs <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } return.value <- object@SampleStats@WLS.obs ### FIXME: should be in @h1?? if (add.labels) { tmp.names <- lav_object_inspect_delta_rownames(object, drop.list.single.group = FALSE) } # nblocks nblocks <- length(return.value) for (b in seq_len(nblocks)) { if (add.labels && length(return.value[[b]]) > 0L && object@Data@nlevels == 1L) { names(return.value[[b]]) <- tmp.names[[b]] } if (add.class) { class(return.value[[b]]) <- c("lavaan.vector", "numeric") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_wls_v <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # old (<0.6) object? if (!.hasSlot(object@Data, "block.label")) { object@Data@block.label <- object@Data@group.label } # return.value <- lav_model_wls_v(lavmodel = object@Model, # lavsamplestats = object@SampleStats, # structured = TRUE, # lavdata = object@Data) # WLS.V == (traditionally) h1 expected information return.value <- lav_model_h1_information_expected(lavobject = object) # this affects fit measures gfi, agfi, pgfi # nblocks nblocks <- length(return.value) # if estimator == "DWLS" or "ULS", we only stored the diagonal # hence, we create a full matrix here if (object@Options$estimator %in% c("DWLS", "ULS")) { return.value <- lapply(return.value, function(x) { nr <- NROW(x) diag(x, nrow = nr, ncol = nr) }) } if (add.labels) { tmp.names <- lav_object_inspect_delta_rownames(object, drop.list.single.group = FALSE) } # label + class for (b in seq_len(nblocks)) { # labels if (add.labels && nrow(return.value[[b]]) > 0L && object@Data@nlevels == 1L) { colnames(return.value[[b]]) <- rownames(return.value[[b]]) <- tmp.names[[b]] } # class if (add.class) { class(return.value[[b]]) <- c("lavaan.matrix", "matrix") } } if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (nblocks > 1L) { names(return.value) <- object@Data@block.label } return.value } lav_object_inspect_sampstat_gamma <- function(object, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { if (!is.null(object@SampleStats@NACOV[[1]])) { return.value <- object@SampleStats@NACOV } else { return.value <- lav_object_gamma(object) } if (add.labels) { tmp.names <- lav_object_inspect_delta_rownames(object, drop.list.single.group = FALSE) } # nblocks nblocks <- length(return.value) if (nblocks == 1L && drop.list.single.group) { return.value <- return.value[[1]] # labels if (add.labels) { colnames(return.value) <- rownames(return.value) <- tmp.names[[1]] } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } } else { if (object@Data@nlevels == 1L && length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) # labels if (add.labels) { for (g in seq_len(object@Data@ngroups)) { colnames(return.value[[g]]) <- rownames(return.value[[g]]) <- tmp.names[[g]] } } # class if (add.class) { for (g in seq_len(object@Data@ngroups)) { class(return.value[[g]]) <- c("lavaan.matrix.symmetric", "matrix") } } } else if (object@Data@nlevels > 1L && length(object@Data@group.label) == 0L) { names(return.value) <- object@Data@level.label } } return.value } lav_object_inspect_gradient <- function(object, add.labels = FALSE, add.class = FALSE, logl = FALSE, optim = FALSE) { lavmodel <- object@Model lavdata <- object@Data lavsamplestats <- object@SampleStats if (optim) { logl <- FALSE } if (lavsamplestats@missing.flag || object@Options$estimator == "PML") { group.weight <- FALSE } else { group.weight <- TRUE } current.verbose <- lav_verbose() if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose), TRUE) dx <- lav_model_gradient(lavmodel = lavmodel, GLIST = NULL, lavsamplestats = lavsamplestats, lavdata = object@Data, lavcache = object@Cache, type = "free", group.weight = group.weight) lav_verbose(current.verbose) # if logl, rescale to get gradient wrt the loglikelihood if (logl) { if (lavmodel@estimator %in% c("ML")) { if (lavdata@nlevels == 1L) { # currently, this is just a sign switch dx <- -1 * dx } else { lavpartable <- object@ParTable # gradient.log = gradient.obj * (2 * tmp.n) / nclusters if (lavdata@ngroups == 1L) { tmp.n <- lavdata@Lp[[1]]$nclusters[[1]] nclusters <- lavdata@Lp[[1]]$nclusters[[2]] dx <- dx * (2 * tmp.n) / nclusters } else { group.values <- lav_partable_group_values(lavpartable) for (g in seq_len(lavdata@ngroups)) { tmp.n <- lavdata@Lp[[g]]$nclusters[[1]] nclusters <- lavdata@Lp[[g]]$nclusters[[2]] g.idx <- which((lavpartable$group == group.values[g])[lavpartable$free > 0L]) dx[g.idx] <- dx[g.idx] * (2 * tmp.n) / nclusters } } } } else { # FIXME: # non-likelihood: what to do? just switch the sign for now. # Note: this is used in lavTestScore() dx <- -1 * dx } } # optim? if (optim) { # 1. scale (note: divide, not multiply!) if (!is.null(object@optim$parscale)) { dx <- dx / object@optim$parscale } # 2. pack if (lavmodel@eq.constraints) { dx <- as.numeric(dx %*% lavmodel@eq.constraints.K) } # only for PML: divide by tmp.n (to speed up convergence) if (lavmodel@estimator == "PML") { dx <- dx / lavsamplestats@ntotal } } # labels if (add.labels) { if (optim && lavmodel@eq.constraints) { tmp.names.all <- lav_partable_labels(object@ParTable, type = "free") tmp.seq <- seq_len(length(tmp.names.all)) pack.seq <- as.numeric((tmp.seq - lavmodel@eq.constraints.k0) %*% +lavmodel@eq.constraints.K) ok.idx <- which(pack.seq %in% tmp.seq) tmp.names <- rep("(eq.con)", length(pack.seq)) tmp.names[ok.idx] <- tmp.names.all[pack.seq[ok.idx]] names(dx) <- tmp.names } else { names(dx) <- lav_partable_labels(object@ParTable, type = "free") } } # class if (add.class) { class(dx) <- c("lavaan.vector", "numeric") } dx } lav_object_inspect_hessian <- function(object, add.labels = FALSE, add.class = FALSE) { return.value <- lav_model_hessian(lavmodel = object@Model, lavsamplestats = object@SampleStats, lavdata = object@Data, lavcache = object@Cache, lavoptions = object@Options, group.weight = TRUE) # labels if (add.labels) { colnames(return.value) <- rownames(return.value) <- lav_partable_labels(object@ParTable, type = "free") } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_information <- function(object, information = "default", augmented = FALSE, inverted = FALSE, add.labels = FALSE, add.class = FALSE) { if (information != "default") { # override option object@Options$information <- information } # backward compatibility if (.hasSlot(object, "h1")) { lavh1 <- object@h1 } else { lavh1 <- lav_h1_implied_logl(lavdata = object@Data, lavsamplestats = object@SampleStats, lavoptions = object@Options) } return.value <- lav_model_information(lavmodel = object@Model, lavsamplestats = object@SampleStats, lavdata = object@Data, lavcache = object@Cache, lavimplied = object@implied, lavh1 = lavh1, lavoptions = object@Options, extra = FALSE, augmented = augmented, inverted = inverted) # labels if (add.labels) { tmp.names <- lav_partable_labels(object@ParTable, type = "free") if (augmented) { n.extra <- nrow(return.value) - length(tmp.names) if (n.extra > 0L) { tmp.names <- c(tmp.names, paste("aug", 1:n.extra, sep = "")) } } colnames(return.value) <- rownames(return.value) <- tmp.names } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_h1_information <- function(object, # nolint information = "default", h1.information = "default", inverted = FALSE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { if (information != "default") { # override option object@Options$information <- information } if (h1.information != "default") { # override option object@Options$h1.information <- h1.information } lavmodel <- object@Model lavdata <- object@Data # list! return.value <- lav_model_h1_information(lavmodel = lavmodel, lavsamplestats = object@SampleStats, lavdata = lavdata, lavcache = object@Cache, lavimplied = object@implied, lavh1 = object@h1, lavoptions = object@Options) # inverted? (NOT USED) # if(inverted) { # return.value <- lapply(return.value, solve) # FIXME: handle errors... # } if (add.labels) { tmp.names <- lav_object_inspect_delta_rownames(object, drop.list.single.group = FALSE) } # labels/class per group for (g in seq_len(lavmodel@ngroups)) { # labels if (add.labels) { colnames(return.value[[g]]) <- rownames(return.value[[g]]) <- tmp.names[[g]] } # class if (add.class) { class(return.value[[g]]) <- c("lavaan.matrix.symmetric", "matrix") } } # drop list? if (lavmodel@ngroups == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (!is.null(lavdata)) { if (length(lavdata@group.label) > 0L) { names(return.value) <- unlist(lavdata@group.label) } } return.value } # only to provide a direct function to the old 'getVariability()' function lav_object_inspect_firstorder <- function(object, add.labels = FALSE, add.class = FALSE) { tmp.b0 <- lav_model_information_firstorder(lavmodel = object@Model, lavsamplestats = object@SampleStats, lavdata = object@Data, lavcache = object@Cache, lavoptions = object@Options, check.pd = FALSE, augmented = FALSE, inverted = FALSE) attr(tmp.b0, "B0.group") <- NULL return.value <- tmp.b0 # labels if (add.labels) { colnames(return.value) <- rownames(return.value) <- lav_partable_labels(object@ParTable, type = "free") } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_vcov <- function(object, standardized = FALSE, type = "std.all", free.only = TRUE, add.labels = FALSE, add.class = FALSE, remove.duplicated = FALSE) { lavmodel <- object@Model lavoptions <- object@Options # store partable with pta in object to use cache in called functions if (is.null(attr(object, "vnames"))) { object@ParTable <- lav_partable_set_cache(object@ParTable, object@pta) } # rotation? # if( .hasSlot(lavmodel, "nefa") && (lavmodel@nefa > 0L) && # lavoptions$rotation != "none" #&& lavoptions$rotation.se == "delta" # ) { # rotation <- TRUE # } else { # rotation <- FALSE # } if (object@optim$npar == 0) { return.value <- matrix(0, 0, 0) } else { # check if we already have it # tmp <- try(slot(object, "vcov"), silent = TRUE) # if( !inherits(tmp, "try-error") && !is.null(object@vcov$vcov) # && !(rotation && standardized)) { if (.hasSlot(object, "vcov") && !is.null(object@vcov$vcov)) { return.value <- object@vcov$vcov # if this changes, tag @TDJorgensen in commit message } else { # compute it again # if(rotation && standardized) { # lavmodel <- lav_model_set_parameters(lavmodel, # x = object@optim$x) # lavoptions <- object@Options # lavoptions$rotation.se <- "delta" # } return.value <- lav_model_vcov(lavmodel = lavmodel, lavsamplestats = object@SampleStats, lavoptions = lavoptions, lavdata = object@Data, lavcache = object@Cache, lavimplied = object@implied, lavh1 = object@h1 ) # if(rotation && !standardized) { # # fixme: compute tmp.vcov.rot manually... # stop("lavaan ERROR: rerun with store.vcov = TRUE") # } if (is.null(return.value)) { return(return.value) } } } # strip attributes attr(return.value, "E.inv") <- NULL attr(return.value, "B0") <- NULL attr(return.value, "B0.group") <- NULL attr(return.value, "Delta") <- NULL attr(return.value, "WLS.V") <- NULL attr(return.value, "tmp.boot.COEF") <- NULL attr(return.value, "tmp.boot.TEST") <- NULL # standardized? if (standardized) { if (type == "std.lv") { tmp.fun <- lav_standardize_lv_x } else if (type == "std.all") { tmp.fun <- lav_standardize_all_x } else if (type == "std.nox") { tmp.fun <- lav_standardize_all_nox_x } # if(rotation) { # if(.hasSlot(object@Model, "ceq.simple.only") && # object@Model@ceq.simple.only) { # x.vec <- drop(object@optim$x %*% t(object@Model@ceq.simple.K)) # } else { # x.vec <- object@optim$x # } # tmp.jac <- numDeriv::jacobian(func = tmp.fun, x = x.vec, # method = "simple", # method.args = list(eps = 1e-03), # default is 1e-04 # lavobject = object, rotation = rotation) # } else { # if(.hasSlot(object@Model, "ceq.simple.only") && # object@Model@ceq.simple.only) { # x <- lav_model_get_parameters(lavmodel) # x.vec <- drop(x %*% t(object@Model@ceq.simple.K)) # } else { x.vec <- lav_model_get_parameters(lavmodel) # } tmp.jac <- try(lav_func_jacobian_complex(func = tmp.fun, x = x.vec, lavobject = object), silent = TRUE) if (inherits(tmp.jac, "try-error")) { # eg. pnorm() tmp.jac <- lav_func_jacobian_simple(func = tmp.fun, x = x.vec, lavobject = object) } # } # tmp.jac contains *all* parameters in the parameter table if (free.only) { if (.hasSlot(object@Model, "ceq.simple.only") && object@Model@ceq.simple.only) { free.idx <- which(object@ParTable$free > 0L && !duplicated(object@ParTable$free)) } else { free.idx <- which(object@ParTable$free > 0L) } tmp.jac <- tmp.jac[free.idx, , drop = FALSE] } return.value <- tmp.jac %*% return.value %*% t(tmp.jac) # force return.value to be symmetric and pd return.value <- (return.value + t(return.value)) / 2 # return.value <- lav_matrix_symmetric_force_pd(return.value, # tol = 1e-09) # was 1e-06 < 0.6-9 } # labels if (add.labels) { # if(rotation && !free.only) { # # todo # } else { colnames(return.value) <- rownames(return.value) <- lav_partable_labels(object@ParTable, type = "free") # } } # alias? if (remove.duplicated && lavmodel@eq.constraints) { simple.flag <- lav_constraints_check_simple(lavmodel) if (simple.flag) { tmp.lab <- lav_partable_labels(object@ParTable, type = "free") dup.flag <- duplicated(tmp.lab) return.value <- return.value[!dup.flag, !dup.flag, drop = FALSE] } else { lav_msg_warn( gettext("alias is TRUE, but equality constraints do not appear to be simple; returning full vcov")) } } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_vcov_def <- function(object, joint = FALSE, standardized = FALSE, type = "std.all", add.labels = FALSE, add.class = FALSE) { lavmodel <- object@Model lavpartable <- object@ParTable free.idx <- which(lavpartable$free > 0L) def.idx <- which(lavpartable$op == ":=") joint.idx <- c(free.idx, def.idx) if (!joint && length(def.idx) == 0L) { return(matrix(0, 0, 0)) } else if (joint && length(joint.idx) == 0L) { return(matrix(0, 0, 0)) } if (standardized) { # compute tmp.vcov for "free" parameters only tmp.vcov <- lav_object_inspect_vcov(object, standardized = TRUE, type = type, free.only = FALSE, add.labels = FALSE, add.class = FALSE) if (joint) { return.value <- tmp.vcov[joint.idx, joint.idx, drop = FALSE] } else { return.value <- tmp.vcov[def.idx, def.idx, drop = FALSE] } } else { # get free parameters x <- lav_model_get_parameters(lavmodel, type = "free") # bootstrap or not? if (!is.null(object@boot$coef)) { tmp.boot <- object@boot$coef # remove NA rows error.idx <- attr(tmp.boot, "error.idx") if (length(error.idx) > 0L) { tmp.boot <- tmp.boot[-error.idx, , drop = FALSE] # drops attributes } tmp.boot.def <- apply(tmp.boot, 1L, lavmodel@def.function) if (length(def.idx) == 1L) { tmp.boot.def <- as.matrix(tmp.boot.def) } else { tmp.boot.def <- t(tmp.boot.def) } return.value <- cov(tmp.boot.def) } else { # tmp.vcov tmp.vcov <- lav_object_inspect_vcov(object, standardized = FALSE, type = type, free.only = TRUE, add.labels = FALSE, add.class = FALSE) # regular delta method tmp.jac <- try(lav_func_jacobian_complex(func = lavmodel@def.function, x = x), silent = TRUE) if (inherits(tmp.jac, "try-error")) { # eg. pnorm() tmp.jac <- lav_func_jacobian_simple(func = lavmodel@def.function, x = x) } if (joint) { tmp.jac2 <- rbind(diag(nrow = ncol(tmp.jac)), tmp.jac) return.value <- tmp.jac2 %*% tmp.vcov %*% t(tmp.jac2) } else { return.value <- tmp.jac %*% tmp.vcov %*% t(tmp.jac) } } } # labels if (add.labels) { if (joint) { lhs.names <- lavpartable$lhs[joint.idx] } else { lhs.names <- lavpartable$lhs[def.idx] } colnames(return.value) <- rownames(return.value) <- lhs.names } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_UGamma <- function(object, # nolint add.labels = FALSE, add.class = FALSE) { out <- lav_test_satorra_bentler(lavobject = object, method = "original", return.ugamma = TRUE) return.value <- out$UGamma # labels # if(add.labels) { # colnames(return.value) <- rownames(return.value) <- # } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } lav_object_inspect_UfromUGamma <- function(object, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { out <- lav_test_satorra_bentler(lavobject = object, method = "original", return.u = TRUE) return.value <- out$UfromUGamma # labels # if(add.labels) { # colnames(return.value) <- rownames(return.value) <- # } # class if (add.class) { class(return.value) <- c("lavaan.matrix.symmetric", "matrix") } return.value } # Delta (jacobian: d samplestats / d free_parameters) lav_object_inspect_delta <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lavmodel <- object@Model lavdata <- object@Data lavpartable <- object@ParTable lavpta <- object@pta return.value <- lav_object_inspect_delta_internal(lavmodel = lavmodel, lavdata = lavdata, lavpartable = lavpartable, add.labels = add.labels, add.class = add.class, drop.list.single.group = drop.list.single.group) return.value } lav_object_inspect_delta_rownames <- function( # nolint object, lavmodel = NULL, lavpartable = NULL, drop.list.single.group = FALSE) { if (!is.null(object)) { lavmodel <- object@Model lavpartable <- object@ParTable lavpta <- object@pta lavdata <- object@Data } else { lavdata <- NULL lavpta <- lav_partable_attributes(lavpartable) } categorical <- lavmodel@categorical correlation <- FALSE if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } conditional.x <- lavmodel@conditional.x group.w.free <- lavmodel@group.w.free nvar <- lavmodel@nvar num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx nblocks <- lavmodel@nblocks # store names per block, rbind later tmp.names <- vector("list", length = nblocks) # output is per group return.value <- vector("list", lavmodel@ngroups) for (g in 1:nblocks) { if (conditional.x) { ov.names <- lavpta$vnames$ov.nox[[g]] } else { ov.names <- lavpta$vnames$ov[[g]] } ov.names.x <- lavpta$vnames$ov.x[[g]] nvar <- length(ov.names) names.cov <- names.cor <- names.var <- character(0L) names.mu <- names.pi <- names.th <- character(0L) names.gw <- character(0L) # Sigma # - if continuous: vech(Sigma) # - if categorical: first numeric variances, then # tmp <- apply(expand.grid(ov.names, ov.names), 1L, # paste, collapse = "~~") # if(categorical) { # names.cor <- tmp[lav_matrix_vech_idx(nvar, diagonal = FALSE)] # names.var <- tmp[lav_matrix_diag_idx(nvar)[num.idx[[g]]]] # } else { # names.cov <- tmp[lav_matrix_vech_idx(nvar, diagonal = TRUE)] # } # NOTE: in 0.6-1, we use the same order, but 'label' in row-wise # format (eg x1 ~~ x2 instead of x2 ~~ x1) tmp <- matrix(apply(expand.grid(ov.names, ov.names), 1L, paste, collapse = "~~"), nrow = nvar) if (categorical) { names.cor <- lav_matrix_vechru(tmp, diagonal = FALSE) names.var <- diag(tmp)[num.idx[[g]]] } else if (correlation) { names.cor <- lav_matrix_vechru(tmp, diagonal = FALSE) } else { names.cov <- lav_matrix_vechru(tmp, diagonal = TRUE) } # Mu if (!categorical && lavmodel@meanstructure) { names.mu <- paste(ov.names, "~1", sep = "") } # Pi if (conditional.x && lavmodel@nexo[g] > 0L) { names.pi <- apply(expand.grid(ov.names, ov.names.x), 1L, paste, collapse = "~") } # th if (categorical) { names.th <- lavpta$vnames$th[[g]] # interweave numeric intercepts, if any if (length(num.idx[[g]]) > 0L) { tmp <- character(length(th.idx[[g]])) tmp[th.idx[[g]] > 0] <- names.th tmp[th.idx[[g]] == 0] <- paste(ov.names[num.idx[[g]]], "~1", sep = "") names.th <- tmp } } # gw if (group.w.free) { names.gw <- "w" } tmp.names[[g]] <- c(names.gw, names.th, names.mu, names.pi, names.cov, names.var, names.cor) } # blocks # multilevel? if (.hasSlot(lavmodel, "multilevel") && lavmodel@multilevel) { for (g in 1:lavmodel@ngroups) { return.value[[g]] <- c(tmp.names[[(g - 1) * 2 + 1]], tmp.names[[(g - 1) * 2 + 2]]) } } else { return.value <- tmp.names } # drop list? if (lavmodel@ngroups == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else if (!is.null(lavdata)) { if (length(lavdata@group.label) > 0L) { names(return.value) <- unlist(lavdata@group.label) } } return.value } lav_object_inspect_delta_internal <- function( # nolint lavmodel = NULL, lavdata = NULL, lavpartable = NULL, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { return.value <- computeDelta(lavmodel) if (add.labels) { tmp.pnames <- lav_partable_labels(lavpartable, type = "free") tmp.rownames <- lav_object_inspect_delta_rownames(object = NULL, lavmodel = lavmodel, lavpartable = lavpartable, drop.list.single.group = FALSE) } for (g in seq_len(lavmodel@ngroups)) { # add labels if (add.labels) { colnames(return.value[[g]]) <- tmp.pnames rownames(return.value[[g]]) <- tmp.rownames[[g]] } # add class if (add.class) { class(return.value[[g]]) <- c("lavaan.matrix", "matrix") } } # ngroups # drop list? if (lavmodel@ngroups == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(lavdata@group.label) > 0L) { names(return.value) <- unlist(lavdata@group.label) } } return.value } lav_object_inspect_zero_cell_tables <- # nolint function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { # categorical? if (!object@Model@categorical) { lav_msg_warn(gettext("no categorical variables in fitted model")) return(invisible(list())) } lavdata <- object@Data # create 2-way tables tmp.table <- lavTables(object, dimension = 2L, output = "data.frame", statistic = NULL) # select tables with empty cells empty.id <- tmp.table$id[which(tmp.table$obs.freq == 0)] if (length(empty.id) == 0L) { # only when lavInspect() is used, give message if (add.class) { cat("(There are no tables with empty cells for this fitted model)\n") } return(invisible(list())) } else { return.value <- lav_tables_cells_format( tmp.table[tmp.table$id %in% empty.id, ], lavdata = lavdata, drop.list.single.group = drop.list.single.group) } return.value } lav_object_inspect_coef <- function(object, type = "free", add.labels = FALSE, add.class = FALSE) { if (type == "user" || type == "all") { type <- "user" idx <- seq_along(object@ParTable$lhs) } else if (type == "free") { #idx <- which(object@ParTable$free > 0L & !duplicated(object@ParTable$free)) idx <- which(object@ParTable$free > 0L) } else { lav_msg_stop(gettextf( "%1$s argument must be either %2$s or %3$s", "type", "free", "user")) } tmp.est <- lav_object_inspect_est(object) cof <- tmp.est[idx] # labels? if (add.labels) { names(cof) <- lav_partable_labels(object@ParTable, type = type) } # class if (add.class) { class(cof) <- c("lavaan.vector", "numeric") } cof } lav_object_inspect_npar <- function(object, type = "free") { if (type == "free") { npar <- sum(object@ParTable$free > 0L & !duplicated(object@ParTable$free)) } else { npar <- length(object@ParTable$lhs) } npar } lav_object_inspect_icc <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lavdata <- object@Data n.g <- lavdata@ngroups return.value <- vector("list", n.g) # multilevel? if (lavdata@nlevels == 1L) { lav_msg_stop(gettext( "intraclass correlation only available for clustered data")) } if (length(object@h1) == 0L) { lav_msg_stop(gettext("h1 slot is not available; refit with h1 = TRUE")) } # implied statistics implied <- object@h1$implied for (g in 1:n.g) { sigma.w <- implied$cov[[(g - 1) * lavdata@nlevels + 1]] sigma.b <- implied$cov[[(g - 1) * lavdata@nlevels + 2]] w.diag <- diag(sigma.w) b.diag <- diag(sigma.b) return.value[[g]] <- numeric(length(w.diag)) ov.names.l <- lavdata@ov.names.l[[g]] w.idx <- which(ov.names.l[[1]] %in% ov.names.l[[2]]) w.names <- ov.names.l[[1]][w.idx] b.idx <- match(w.names, ov.names.l[[2]]) return.value[[g]][w.idx] <- b.diag[b.idx] / (w.diag[w.idx] + b.diag[b.idx]) # label if (add.labels) { names(return.value[[g]]) <- ov.names.l[[1]] } # class if (add.class) { class(return.value[[g]]) <- c("lavaan.vector", "numeric") } } # g if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lav_object_inspect_ranef <- function(object, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lavdata <- object@Data lavsamplestats <- object@SampleStats n.g <- lavdata@ngroups return.value <- vector("list", n.g) # multilevel? if (lavdata@nlevels == 1L) { lav_msg_stop(gettext( "random effects only available for clustered data (in the long format)")) } # implied statistics lavimplied <- object@implied for (g in 1:n.g) { tmp.lp <- lavdata@Lp[[g]] tmp.ylp <- lavsamplestats@YLp[[g]] # implied for this group group.idx <- (g - 1) * lavdata@nlevels + seq_len(lavdata@nlevels) implied.group <- lapply(lavimplied, function(x) x[group.idx]) # random effects (=random intercepts or cluster means) out <- lav_mvnorm_cluster_implied22l(Lp = tmp.lp, implied = implied.group) mb.j <- lav_mvnorm_cluster_em_estep_ranef(YLp = tmp.ylp, Lp = tmp.lp, sigma.w = out$sigma.w, sigma.b = out$sigma.b, sigma.zz = out$sigma.zz, sigma.yz = out$sigma.yz, mu.z = out$mu.z, mu.w = out$mu.w, mu.b = out$mu.b, se = FALSE) return.value[[g]] <- mb.j ov.names.l <- lavdata@ov.names.l[[g]] # label if (add.labels) { colnames(return.value[[g]]) <- ov.names.l[[1]] } # class if (add.class) { class(return.value[[g]]) <- c("lavaan.matrix", "matrix") } } # g if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } # casewise loglikelihood contributions lav_object_inspect_loglik_casewise <- function(object, log. = TRUE, # nolint add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lavdata <- object@Data lavsamplestats <- object@SampleStats lavimplied <- object@implied lavoptions <- object@Options n.g <- lavdata@ngroups return.value <- vector("list", n.g) # multilevel? if (lavdata@nlevels > 1L) { lav_msg_stop(gettext( "casewise (log)likeloods contributions not yet available for clustered data")) } # estimator ML? if (object@Options$estimator != "ML") { lav_msg_stop(gettextf( "casewise (log)likeloods contributions only available for estimator = %s", dQuote("ML"))) } for (g in 1:n.g) { if (lavsamplestats@missing.flag) { return.value[[g]] <- lav_mvnorm_missing_llik_casewise(Y = lavdata@X[[g]], wt = lavdata@weights[[g]], Mu = lavimplied$mean[[g]], Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]]) } else { # single-level, complete data if (lavoptions$conditional.x) { if (!is.null(lavdata@weights[[g]])) { lav_msg_stop(gettext("no support (yet) if weights are used.")) } return.value[[g]] <- lav_mvreg_loglik_data( Y = lavdata@X[[g]], eXo = lavdata@eXo[[g]], res.int = lavimplied$res.int[[g]], res.slopes = lavimplied$res.slopes[[g]], res.cov = lavimplied$res.cov[[g]], casewise = TRUE) } else { if (object@Model@meanstructure) { tmp.mean <- lavimplied$mean[[g]] } else { tmp.mean <- lavsamplestats@mean[[g]] } return.value[[g]] <- lav_mvnorm_loglik_data(Y = lavdata@X[[g]], wt = lavdata@weights[[g]], Mu = tmp.mean, Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]], casewise = TRUE) } } # single-level, complete data # log. = FALSE? if (!log.) { return.value[[g]] <- exp(return.value[[g]]) } # label # if(add.labels) { # } # class if (add.class) { class(return.value[[g]]) <- c("lavaan.vector", "numeric") } } # g if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } # Mahalanobis distances for factor scores or casewise residuals # type = "lv" -> factor scores # type = "resid" -> casewise residuals # # we always use Bartlett factor scores (see Yuan & Hayashi 2010) # (this has no impact on the m-distances for the factor scores, # and only a very slight impact on the m-distances for the casewise # residuals; but asymptotically, only when we use Bartlett factor # scores are the 'true scores' (=LAMBDA %*% FS) orthogonal to the # casewise residuals) lav_object_inspect_mdist2 <- function(object, type = "resid", squared = TRUE, add.labels = FALSE, add.class = FALSE, drop.list.single.group = FALSE) { lavdata <- object@Data n.g <- lavdata@ngroups # lavPredict() out <- lavPredict(object, type = type, method = "ML", # = Bartlett label = FALSE, fsm = TRUE, mdist = TRUE, se = "none", acov = "none") return.value <- attr(out, "mdist") for (g in seq_len(n.g)) { # squared? if (!squared) { return.value[[g]] <- sqrt(return.value[[g]]) } # labels? # if(add.labels) { # } # class if (add.class) { class(return.value[[g]]) <- c("lavaan.vector", "numeric") } } # g if (n.g == 1L && drop.list.single.group) { return.value <- return.value[[1]] } else { if (length(object@Data@group.label) > 0L) { names(return.value) <- unlist(object@Data@group.label) } } return.value } lavaan/R/lav_model_objective.R0000644000176200001440000003472714627656441016070 0ustar liggesusers# model objective lav_model_objective <- function(lavmodel = NULL, GLIST = NULL, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL) { # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST # shortcut for data.type == "none" or estimator == "none" if (lavmodel@estimator == "none" || length(lavsamplestats@cov) == 0L) { fx <- as.numeric(NA) attr(fx, "fx.group") <- rep(as.numeric(NA), lavsamplestats@ngroups) return(fx) } meanstructure <- lavmodel@meanstructure estimator <- lavmodel@estimator categorical <- lavmodel@categorical if (.hasSlot(lavmodel, "correlation")) { correlation <- lavmodel@correlation } else { correlation <- FALSE } group.w.free <- lavmodel@group.w.free fixed.x <- lavmodel@fixed.x conditional.x <- lavmodel@conditional.x num.idx <- lavmodel@num.idx th.idx <- lavmodel@th.idx if (.hasSlot(lavmodel, "estimator.args")) { estimator.args <- lavmodel@estimator.args } else { estimator.args <- list() } # do we need WLS.est? if (estimator %in% c("ULS", "WLS", "DWLS", "NTRLS", "DLS")) { lavimplied <- lav_model_implied(lavmodel, GLIST = GLIST) # check for COV with negative diagonal elements? for (g in 1:lavsamplestats@ngroups) { COV <- if (lavmodel@conditional.x) { lavimplied$res.cov[[g]] } else { lavimplied$cov[[g]] } dCOV <- diag(COV) if (anyNA(COV) || any(dCOV < 0)) { # return NA fx <- as.numeric(NA) attr(fx, "fx.group") <- rep(as.numeric(NA), lavsamplestats@ngroups) return(fx) } } WLS.est <- lav_model_wls_est( lavmodel = lavmodel, GLIST = GLIST, lavimplied = lavimplied ) # , # cov.x = lavsamplestats@cov.x) if (estimator == "NTRLS") { Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, extra = TRUE ) Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) } if (estimator == "DLS" && estimator.args$dls.GammaNT == "model") { Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, extra = FALSE ) Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) } if (lav_debug()) print(WLS.est) } else if (estimator %in% c("ML", "GLS", "PML", "FML", "REML", "catML") && lavdata@nlevels == 1L) { # compute moments for all groups # if(conditional.x) { # Sigma.hat <- computeSigmaHatJoint(lavmodel = lavmodel, # GLIST = GLIST, lavsamplestats = lavsamplestats, # extra = (estimator %in% c("ML", "REML","NTRLS"))) # } else { Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, extra = (estimator %in% c( "ML", "REML", "NTRLS", "catML" )) ) # } if (estimator == "REML") { LAMBDA <- computeLAMBDA(lavmodel = lavmodel, GLIST = GLIST) } # ridge? if (lavsamplestats@ridge > 0.0) { for (g in 1:lavsamplestats@ngroups) { diag(Sigma.hat[[g]]) <- diag(Sigma.hat[[g]]) + lavsamplestats@ridge } } if (lav_debug()) print(Sigma.hat) if (meanstructure) { # if(conditional.x) { # Mu.hat <- computeMuHatJoint(lavmodel = lavmodel, GLIST = GLIST, # lavsamplestats = lavsamplestats) # } else { Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) # } } if (categorical) { TH <- computeTH(lavmodel = lavmodel, GLIST = GLIST) } if (conditional.x) { PI <- computePI(lavmodel = lavmodel, GLIST = GLIST) } if (group.w.free) { GW <- computeGW(lavmodel = lavmodel, GLIST = GLIST) } } else if (estimator == "MML") { TH <- computeTH(lavmodel = lavmodel, GLIST = GLIST) THETA <- computeTHETA(lavmodel = lavmodel, GLIST = GLIST) GW <- computeGW(lavmodel = lavmodel, GLIST = GLIST) } fx <- 0.0 fx.group <- numeric(lavsamplestats@ngroups) logl.group <- rep(as.numeric(NA), lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { # incomplete data and fiml? if (lavsamplestats@missing.flag && estimator != "Bayes") { if (estimator == "ML" && lavdata@nlevels == 1L) { # FIML if (!attr(Sigma.hat[[g]], "po")) { return(Inf) } group.fx <- estimator.FIML( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], Yp = lavsamplestats@missing[[g]], h1 = lavsamplestats@missing.h1[[g]]$h1, N = lavsamplestats@nobs[[g]] ) } else if (estimator == "ML" && lavdata@nlevels > 1L) { # FIML twolevel group.fx <- estimator.2L( lavmodel = lavmodel, GLIST = GLIST, Y1 = lavdata@X[[g]], Lp = lavdata@Lp[[g]], Mp = lavdata@Mp[[g]], lavsamplestats = lavsamplestats, group = g ) } else { lav_msg_stop(gettextf( "this estimator: `%s' can not be used with incomplete data and the missing=\"ml\" option", estimator)) } } else if (estimator == "ML" || estimator == "Bayes" || estimator == "catML") { # complete data # ML and friends if (lavdata@nlevels > 1L) { if (estimator %in% c("catML", "Bayes")) { lav_msg_stop(gettext("multilevel data not supported for estimator"), estimator) } group.fx <- estimator.2L( lavmodel = lavmodel, GLIST = GLIST, Lp = lavdata@Lp[[g]], Mp = NULL, # complete data lavsamplestats = lavsamplestats, group = g ) } else if (conditional.x) { group.fx <- estimator.ML_res( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], PI = PI[[g]], res.cov = lavsamplestats@res.cov[[g]], res.int = lavsamplestats@res.int[[g]], res.slopes = lavsamplestats@res.slopes[[g]], res.cov.log.det = lavsamplestats@res.cov.log.det[[g]], cov.x = lavsamplestats@cov.x[[g]], mean.x = lavsamplestats@mean.x[[g]] ) } else { group.fx <- estimator.ML( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], data.cov = lavsamplestats@cov[[g]], data.mean = lavsamplestats@mean[[g]], data.cov.log.det = lavsamplestats@cov.log.det[[g]], meanstructure = meanstructure ) } ### GLS #### (0.6-10: not using WLS function any longer) } else if (estimator == "GLS") { group.fx <- estimator.GLS( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], data.cov = lavsamplestats@cov[[g]], data.cov.inv = lavsamplestats@icov[[g]], data.mean = lavsamplestats@mean[[g]], meanstructure = meanstructure, correlation = correlation ) } else if (estimator == "WLS" || estimator == "DLS" || estimator == "NTRLS") { # full weight matrix if (estimator == "WLS") { WLS.V <- lavsamplestats@WLS.V[[g]] } else if (estimator == "DLS") { if (estimator.args$dls.GammaNT == "sample") { WLS.V <- lavsamplestats@WLS.V[[g]] } else { dls.a <- estimator.args$dls.a GammaNT <- lav_samplestats_Gamma_NT( COV = Sigma.hat[[g]], MEAN = Mu.hat[[g]], rescale = FALSE, x.idx = lavsamplestats@x.idx[[g]], fixed.x = lavmodel@fixed.x, conditional.x = lavmodel@conditional.x, meanstructure = lavmodel@meanstructure, slopestructure = lavmodel@conditional.x ) W.DLS <- (1 - dls.a) * lavsamplestats@NACOV[[g]] + dls.a * GammaNT WLS.V <- lav_matrix_symmetric_inverse(W.DLS) } } else if (estimator == "NTRLS") { # WLS.V <- lav_samplestats_Gamma_inverse_NT( # ICOV = attr(Sigma.hat[[g]],"inv")[,,drop=FALSE], # COV = Sigma.hat[[g]][,,drop=FALSE], # MEAN = Mu.hat[[g]], # x.idx = c(10000,10001), ### FIXME!!!! # fixed.x = fixed.x, # conditional.x = conditional.x, # meanstructure = meanstructure, # slopestructure = conditional.x) WLS.V <- lav_mvnorm_information_expected( Sigma = Sigma.hat[[g]], x.idx = lavsamplestats@x.idx[[g]], meanstructure = lavmodel@meanstructure ) # DEBUG!!!! # WLS.V <- 2*WLS.V } group.fx <- estimator.WLS( WLS.est = WLS.est[[g]], WLS.obs = lavsamplestats@WLS.obs[[g]], WLS.V = WLS.V ) attr(group.fx, "WLS.est") <- WLS.est[[g]] } else if (estimator == "DWLS" || estimator == "ULS") { # diagonal weight matrix group.fx <- estimator.DWLS( WLS.est = WLS.est[[g]], WLS.obs = lavsamplestats@WLS.obs[[g]], WLS.VD = lavsamplestats@WLS.VD[[g]] ) attr(group.fx, "WLS.est") <- WLS.est[[g]] } else if (estimator == "PML") { # Pairwise maximum likelihood if (lavdata@nlevels > 1L) { # group.fx <- estimator.PML.2L(lavmodel = lavmodel, # GLIST = GLIST, # Lp = lavdata@Lp[[g]], # lavsamplestats = lavsamplestats, # group = g) group.fx <- 0 # for now attr(group.fx, "logl") <- 0 } else if (conditional.x) { group.fx <- estimator.PML( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], TH = TH[[g]], PI = PI[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], eXo = lavdata@eXo[[g]], wt = lavdata@weights[[g]], lavcache = lavcache[[g]], missing = lavdata@missing ) } else { group.fx <- estimator.PML( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], TH = TH[[g]], PI = NULL, th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], eXo = NULL, wt = lavdata@weights[[g]], lavcache = lavcache[[g]], missing = lavdata@missing ) } logl.group[g] <- attr(group.fx, "logl") } else if (estimator == "FML") { # Full maximum likelihood (underlying multivariate normal) group.fx <- estimator.FML( Sigma.hat = Sigma.hat[[g]], TH = TH[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache[[g]] ) } else if (estimator == "MML") { # marginal maximum likelihood group.fx <- estimator.MML( lavmodel = lavmodel, GLIST = GLIST, THETA = THETA[[g]], TH = TH[[g]], group = g, lavdata = lavdata, sample.mean = lavsamplestats@mean[[g]], sample.mean.x = lavsamplestats@mean.x[[g]], lavcache = lavcache ) } else if (estimator == "REML") { # restricted/residual maximum likelihood group.fx <- estimator.REML( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], data.cov = lavsamplestats@cov[[g]], data.mean = lavsamplestats@mean[[g]], data.cov.log.det = lavsamplestats@cov.log.det[[g]], meanstructure = meanstructure, group = g, lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata ) } else { lav_msg_stop(gettext("unsupported estimator:"), estimator) } if (estimator %in% c("ML", "REML", "NTRLS", "catML")) { if (lavdata@nlevels == 1L) { group.fx <- 0.5 * group.fx ## FIXME } } else if (estimator == "PML" || estimator == "FML" || estimator == "MML") { # do nothing } else if (estimator == "DLS") { if (estimator.args$dls.FtimesNminus1) { group.fx <- 0.5 * (lavsamplestats@nobs[[g]] - 1) / lavsamplestats@nobs[[g]] * group.fx } else { group.fx <- 0.5 * group.fx } } else { group.fx <- 0.5 * (lavsamplestats@nobs[[g]] - 1) / lavsamplestats@nobs[[g]] * group.fx } fx.group[g] <- group.fx } # g if (lavsamplestats@ngroups > 1) { ## FIXME: if group.w.free, should we use group.w or nobs??? ## - if we use estimated group.w, gradient changes!!!! ## - but, if group models are misspecified, the group weights ## will be affected too... which is unwanted (I think) # if(group.w.free) { # nobs <- unlist(GW) * lavsamplestats@ntotal # nobs <- exp(unlist(GW)) # } else { if (estimator == "PML") { # no weighting needed! (since N_g is part of the logl per group) fx <- sum(fx.group) } else { nobs <- unlist(lavsamplestats@nobs) # } fx <- weighted.mean(fx.group, w = nobs) } } else { # single group fx <- fx.group[1] } # penalty for group.w + ML if (group.w.free && estimator %in% c( "ML", "MML", "FML", "PML", "REML", "catML" )) { # obs.prop <- unlist(lavsamplestats@group.w) # est.prop <- unlist(GW) # if(estimator %in% c("WLS", "GLS", ...) { # # X2 style discrepancy measures (aka GLS/WLS!!) # fx.w <- sum ( (obs.prop-est.prop)^2/est.prop ) # } else { # # G2 style discrepancy measures (aka ML) # # deriv is here -2 * (obs.prop - est.prop) # fx.w <- sum(obs.prop * log(obs.prop/est.prop) ) # } # poisson kernel obs.freq <- unlist(lavsamplestats@group.w) * lavsamplestats@ntotal est.freq <- exp(unlist(GW)) fx.w <- -1 * sum(obs.freq * log(est.freq) - est.freq) # divide by N (to be consistent with the rest of lavaan) fx.w <- fx.w / lavsamplestats@ntotal fx.sat <- sum(obs.freq * log(obs.freq) - obs.freq) fx.sat <- fx.sat / lavsamplestats@ntotal # saturated - poisson # fx.w <- sum(obs.freq * log(obs.freq/est.freq)) # does not work without constraints? --> need lagrange multiplier fx <- fx + (fx.w + fx.sat) } fx.value <- as.numeric(fx) attr(fx, "fx.group") <- fx.group if (estimator == "PML") { attr(fx, "logl.group") <- logl.group attr(fx, "fx.pml") <- fx.value } fx } lavaan/R/lav_fit_cfi.R0000644000176200001440000005247714627656441014343 0ustar liggesusers# functions related to CFI and other 'incremental' fit indices # lower-level functions: # - lav_fit_cfi # - lav_fit_rni (same as CFI, but without the max(0,)) # - lav_fit_tli/lav_fit_nnfi # - lav_fit_rfi # - lav_fit_nfi # - lav_fit_pnfi # - lav_fit_ifi # higher-level functions: # - lav_fit_cfi_lavobject # Y.R. 20 July 2022 # CFI - comparative fit index (Bentler, 1990) # robust version: Brosseau-Liard & Savalei MBR 2014, equation 15 # robust version MLMV (scaled.shifted) # Savalei, V. (2018). On the computation of the RMSEA and CFI from the # mean-and-variance corrected test statistic with nonnormal data in SEM. # Multivariate behavioral research, 53(3), 419-429. eq 9 # note: robust MLM == robust MLMV # categorical data: # Savalei, V. (2021). Improving fit indices in structural equation modeling with # categorical data. Multivariate Behavioral Research, 56(3), 390-407. doi: # 10.1080/00273171.2020.1717922 # when missing = "fiml": # Zhang, X., & Savalei, V. (2023). New computations for RMSEA and CFI following # FIML and TS estimation with missing data. Psychological Methods, 28(2), # 263-283. https://doi.org/10.1037/met0000445 lav_fit_cfi <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL, c.hat = 1, c.hat.null = 1) { if (anyNA(c(X2, df, X2.null, df.null, c.hat, c.hat.null))) { return(as.numeric(NA)) } # robust? if (df > 0 && !missing(c.hat) && !missing(c.hat.null) && c.hat != 1 && c.hat.null != 1) { t1 <- max(c(X2 - (c.hat * df), 0)) t2 <- max(c(X2 - (c.hat * df), X2.null - (c.hat.null * df.null), 0)) } else { t1 <- max(c(X2 - df, 0)) t2 <- max(c(X2 - df, X2.null - df.null, 0)) } if (isTRUE(all.equal(t1, 0)) && isTRUE(all.equal(t2, 0))) { CFI <- 1 } else { CFI <- 1 - t1 / t2 } CFI } # RNI - relative noncentrality index (McDonald & Marsh, 1990) # same as CFI, but without the max(0,) lav_fit_rni <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL, c.hat = 1, c.hat.null = 1) { if (anyNA(c(X2, df, X2.null, df.null, c.hat, c.hat.null))) { return(as.numeric(NA)) } # robust? if (df > 0 && !missing(c.hat) && !missing(c.hat.null) && c.hat != 1 && c.hat.null != 1) { t1 <- X2 - (c.hat * df) t2 <- X2.null - (c.hat.null * df.null) } else { t1 <- X2 - df t2 <- X2.null - df.null } if (isTRUE(all.equal(t2, 0))) { RNI <- as.numeric(NA) } else if (!is.finite(t1) || !is.finite(t2)) { RNI <- as.numeric(NA) } else { RNI <- 1 - t1 / t2 } RNI } # TLI - Tucker-Lewis index (Tucker & Lewis, 1973) # same as # NNFI - nonnormed fit index (NNFI, Bentler & Bonett, 1980) # note: formula in lavaan <= 0.5-20: # t1 <- X2.null/df.null - X2/df # t2 <- X2.null/df.null - 1 # if(t1 < 0 && t2 < 0) { # TLI <- 1 # } else { # TLI <- t1/t2 # } # note: TLI original formula was in terms of fx/df, not X2/df # then, t1 <- fx_0/df.null - fx/df # t2 <- fx_0/df.null - 1/N (or N-1 for wishart) # note: in lavaan 0.5-21, we use the alternative formula: # TLI <- 1 - ((X2 - df)/(X2.null - df.null) * df.null/df) # - this one has the advantage that a 'robust' version # can be derived; this seems non-trivial for the original one # - unlike cfi, we do not use 'max(0, )' for t1 and t2 # therefore, t1 can go negative, and TLI can be > 1 lav_fit_tli <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL, c.hat = 1, c.hat.null = 1) { if (anyNA(c(X2, df, X2.null, df.null, c.hat, c.hat.null))) { return(as.numeric(NA)) } # robust? if (df > 0 && !missing(c.hat) && !missing(c.hat.null) && c.hat != 1 && c.hat.null != 1) { t1 <- (X2 - c.hat * df) * df.null t2 <- (X2.null - c.hat.null * df.null) * df } else { t1 <- (X2 - df) * df.null t2 <- (X2.null - df.null) * df } if (df > 0 && abs(t2) > 0) { TLI <- 1 - t1 / t2 } else if (!is.finite(t1) || !is.finite(t2)) { TLI <- as.numeric(NA) } else { TLI <- 1 } TLI } # alias for nnfi lav_fit_nnfi <- lav_fit_tli # RFI - relative fit index (Bollen, 1986; Joreskog & Sorbom 1993) lav_fit_rfi <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL) { if (anyNA(c(X2, df, X2.null, df.null))) { return(as.numeric(NA)) } if (df > df.null) { RLI <- as.numeric(NA) } else if (df > 0 && df.null > 0) { t1 <- X2.null / df.null - X2 / df t2 <- X2.null / df.null if (!is.finite(t1) || !is.finite(t2)) { RLI <- as.numeric(NA) } else if (t1 < 0 || t2 < 0) { RLI <- 1 } else { RLI <- t1 / t2 } } else { RLI <- 1 } RLI } # NFI - normed fit index (Bentler & Bonett, 1980) lav_fit_nfi <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL) { if (anyNA(c(X2, df, X2.null, df.null))) { return(as.numeric(NA)) } if (df > df.null || isTRUE(all.equal(X2.null, 0))) { NFI <- as.numeric(NA) } else if (df > 0) { t1 <- X2.null - X2 t2 <- X2.null NFI <- t1 / t2 } else { NFI <- 1 } NFI } # PNFI - Parsimony normed fit index (James, Mulaik & Brett, 1982) lav_fit_pnfi <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL) { if (anyNA(c(X2, df, X2.null, df.null))) { return(as.numeric(NA)) } if (df.null > 0 && X2.null > 0) { t1 <- X2.null - X2 t2 <- X2.null PNFI <- (df / df.null) * (t1 / t2) } else { PNFI <- as.numeric(NA) } PNFI } # IFI - incremental fit index (Bollen, 1989; Joreskog & Sorbom, 1993) lav_fit_ifi <- function(X2 = NULL, df = NULL, X2.null = NULL, df.null = NULL) { if (anyNA(c(X2, df, X2.null, df.null))) { return(as.numeric(NA)) } t1 <- X2.null - X2 t2 <- X2.null - df if (!is.finite(t1) || !is.finite(t2)) { IFI <- as.numeric(NA) } else if (t2 < 0) { IFI <- 1 } else if (isTRUE(all.equal(t2, 0))) { IFI <- as.numeric(NA) } else { IFI <- t1 / t2 } IFI } # higher-level function lav_fit_cfi_lavobject <- function(lavobject = NULL, fit.measures = "cfi", baseline.model = NULL, h1.model = NULL, standard.test = "standard", scaled.test = "none", robust = TRUE, cat.check.pd = TRUE) { # check lavobject stopifnot(inherits(lavobject, "lavaan")) # check for categorical categorical.flag <- lavobject@Model@categorical # tests TEST <- lavobject@test test.names <- sapply(lavobject@test, "[[", "test") if (test.names[1] == "none" || standard.test == "none") { return(list()) } test.idx <- which(test.names == standard.test)[1] if (length(test.idx) == 0L) { return(list()) } scaled.flag <- FALSE if (!scaled.test %in% c("none", "standard", "default")) { scaled.idx <- which(test.names == scaled.test) if (length(scaled.idx) > 0L) { scaled.idx <- scaled.idx[1] # only the first one scaled.flag <- TRUE } } # robust? robust.flag <- FALSE if (robust && scaled.flag && scaled.test %in% c( "satorra.bentler", "yuan.bentler.mplus", "yuan.bentler", "scaled.shifted" )) { robust.flag <- TRUE } # FIML? fiml.flag <- FALSE if (robust && lavobject@Options$missing %in% c("ml", "ml.x")) { fiml.flag <- robust.flag <- TRUE # check if we can compute corrected values if (scaled.flag) { version <- "V3" } else { version <- "V6" } fiml <- try(lav_fit_fiml_corrected(lavobject, version = version), silent = TRUE ) if (inherits(fiml, "try-error")) { lav_msg_warn(gettext("computation of robust CFI failed.")) fiml <- list( XX3 = as.numeric(NA), df3 = as.numeric(NA), c.hat3 = as.numeric(NA), XX3.scaled = as.numeric(NA), XX3.null = as.numeric(NA), df3.null = as.numeric(NA), c.hat3.null = as.numeric(NA) ) } else if (anyNA(c( fiml$XX3, fiml$df3, fiml$c.hat3, fiml$XX3.scaled, fiml$XX3.null, fiml$df3.null, fiml$c.hat3.null ))) { lav_msg_warn(gettext("computation of robust CFI resulted in NA values.")) } } # supported fit measures in this function # baseline model fit.baseline <- c("baseline.chisq", "baseline.df", "baseline.pvalue") if (scaled.flag) { fit.baseline <- c( fit.baseline, "baseline.chisq.scaled", "baseline.df.scaled", "baseline.pvalue.scaled", "baseline.chisq.scaling.factor" ) } fit.cfi.tli <- c("cfi", "tli") if (scaled.flag) { fit.cfi.tli <- c(fit.cfi.tli, "cfi.scaled", "tli.scaled") } if (robust.flag) { fit.cfi.tli <- c(fit.cfi.tli, "cfi.robust", "tli.robust") } # other incremental fit indices fit.cfi.other <- c("nnfi", "rfi", "nfi", "pnfi", "ifi", "rni") if (scaled.flag) { fit.cfi.other <- c( fit.cfi.other, "nnfi.scaled", "rfi.scaled", "nfi.scaled", "pnfi.scaled", "ifi.scaled", "rni.scaled" ) } if (robust.flag) { fit.cfi.other <- c(fit.cfi.other, "nnfi.robust", "rni.robust") } # which one do we need? if (missing(fit.measures)) { # default set fit.measures <- c(fit.baseline, fit.cfi.tli) } else { # remove any not-CFI related index from fit.measures rm.idx <- which(!fit.measures %in% c(fit.baseline, fit.cfi.tli, fit.cfi.other)) if (length(rm.idx) > 0L) { fit.measures <- fit.measures[-rm.idx] } if (length(fit.measures) == 0L) { return(list()) } } # basic test statistics X2 <- TEST[[test.idx]]$stat df <- TEST[[test.idx]]$df G <- lavobject@Data@ngroups # number of groups N <- lav_utils_get_ntotal(lavobject = lavobject) # N vs N-1 # scaled X2 if (scaled.flag) { X2.scaled <- TEST[[scaled.idx]]$stat df.scaled <- TEST[[scaled.idx]]$df } if (robust.flag) { XX3 <- X2 if (categorical.flag) { out <- try(lav_fit_catml_dwls(lavobject, check.pd = cat.check.pd), silent = TRUE ) if (inherits(out, "try-error")) { XX3 <- df3 <- c.hat <- c.hat3 <- XX3.scaled <- as.numeric(NA) } else { XX3 <- out$XX3 df3 <- out$df3 c.hat3 <- c.hat <- out$c.hat3 XX3.scaled <- out$XX3.scaled } } else if (fiml.flag) { XX3 <- fiml$XX3 df3 <- fiml$df3 c.hat3 <- c.hat <- fiml$c.hat3 XX3.scaled <- fiml$XX3.scaled } else if (scaled.test == "scaled.shifted") { # compute c.hat from a and b a <- TEST[[scaled.idx]]$scaling.factor b <- TEST[[scaled.idx]]$shift.parameter c.hat <- a * (df - b) / df } else { c.hat <- TEST[[scaled.idx]]$scaling.factor } } # output container indices <- list() # only do what is needed (per groups) cfi.baseline.flag <- cfi.tli.flag <- cfi.other.flag <- FALSE if (any(fit.baseline %in% fit.measures)) { cfi.baseline.flag <- TRUE } if (any(fit.cfi.tli %in% fit.measures)) { cfi.tli.flag <- TRUE } if (any(fit.cfi.other %in% fit.measures)) { cfi.other.flag <- TRUE } # 1. BASELINE model baseline.test <- NULL # we use the following priority: # 1. user-provided baseline model # 2. baseline model in @external slot # 3. baseline model in @baseline slot # 4. nothing -> compute independence model # TDJ: Also check for user-supplied h1.model, using similar priority: # 1. user-provided h1 model # 2. h1 model in @external slot # 3. default h1 model (already in @h1 slot, no update necessary) # 1. user-provided h1 model if (!is.null(h1.model)) { stopifnot(inherits(h1.model, "lavaan")) # 2. h1 model in @external slot } else if (!is.null(lavobject@external$h1.model)) { stopifnot(inherits(lavobject@external$h1.model, "lavaan")) h1.model <- lavobject@external$h1.model } # else is.null # 1. user-provided baseline model if (!is.null(baseline.model)) { baseline.test <- lav_fit_measures_check_baseline( fit.indep = baseline.model, object = lavobject, fit.h1 = h1.model # okay if NULL ) # 2. baseline model in @external slot } else if (!is.null(lavobject@external$baseline.model)) { fit.indep <- lavobject@external$baseline.model baseline.test <- lav_fit_measures_check_baseline( fit.indep = fit.indep, object = lavobject, fit.h1 = h1.model # okay if NULL ) # 3. internal @baseline slot } else if (.hasSlot(lavobject, "baseline") && length(lavobject@baseline) > 0L && !is.null(lavobject@baseline$test) && ## if there is a custom h1.model, need _check_baseline() to update @test is.null(h1.model)) { baseline.test <- lavobject@baseline$test # 4. (re)compute independence model } else { fit.indep <- try(lav_object_independence(lavobject), silent = TRUE) baseline.test <- lav_fit_measures_check_baseline( fit.indep = fit.indep, object = lavobject, fit.h1 = h1.model # okay if NULL ) } # baseline.test.idx baseline.test.idx <- which(names(baseline.test) == standard.test)[1] if (scaled.flag) { baseline.scaled.idx <- which(names(baseline.test) == scaled.test)[1] } if (!is.null(baseline.test)) { X2.null <- baseline.test[[baseline.test.idx]]$stat df.null <- baseline.test[[baseline.test.idx]]$df if (scaled.flag) { X2.null.scaled <- baseline.test[[baseline.scaled.idx]]$stat df.null.scaled <- baseline.test[[baseline.scaled.idx]]$df } if (robust.flag) { XX3.null <- X2.null if (categorical.flag) { if (inherits(out, "try-error")) { XX3.null <- c.hat.null <- as.numeric(NA) } else { XX3.null <- out$XX3.null c.hat.null <- out$c.hat3.null } } else if (fiml.flag) { XX3.null <- fiml$XX3.null c.hat.null <- fiml$c.hat3.null } else if (scaled.test == "scaled.shifted") { # compute c.hat from a and b a.null <- baseline.test[[baseline.scaled.idx]]$scaling.factor b.null <- baseline.test[[baseline.scaled.idx]]$shift.parameter c.hat.null <- a.null * (df.null - b.null) / df.null } else { c.hat.null <- baseline.test[[baseline.scaled.idx]]$scaling.factor } } } else { X2.null <- df.null <- as.numeric(NA) X2.null.scaled <- df.null.scaled <- as.numeric(NA) c.hat.null <- as.numeric(NA) } # check for NAs of nonfinite numbers if (!is.finite(X2) || !is.finite(df) || !is.finite(X2.null) || !is.finite(df.null)) { indices[fit.measures] <- as.numeric(NA) return(indices) } # fill in baseline indices if (cfi.baseline.flag) { indices["baseline.chisq"] <- X2.null indices["baseline.df"] <- df.null indices["baseline.pvalue"] <- baseline.test[[baseline.test.idx]]$pvalue if (scaled.flag) { indices["baseline.chisq.scaled"] <- X2.null.scaled indices["baseline.df.scaled"] <- df.null.scaled indices["baseline.pvalue.scaled"] <- baseline.test[[baseline.scaled.idx]]$pvalue indices["baseline.chisq.scaling.factor"] <- baseline.test[[baseline.scaled.idx]]$scaling.factor } } # 2. CFI and TLI if (cfi.tli.flag) { indices["cfi"] <- lav_fit_cfi( X2 = X2, df = df, X2.null = X2.null, df.null = df.null ) indices["tli"] <- lav_fit_tli( X2 = X2, df = df, X2.null = X2.null, df.null = df.null ) if (scaled.flag) { indices["cfi.scaled"] <- lav_fit_cfi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["tli.scaled"] <- lav_fit_tli( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) } if (robust.flag) { indices["cfi.robust"] <- lav_fit_cfi( X2 = XX3, df = df, X2.null = XX3.null, df.null = df.null, c.hat = c.hat, c.hat.null = c.hat.null ) indices["tli.robust"] <- lav_fit_tli( X2 = XX3, df = df, X2.null = XX3.null, df.null = df.null, c.hat = c.hat, c.hat.null = c.hat.null ) } } # 3. other # c("nnfi", "rfi", "nfi", "pnfi", "ifi", "rni") if (cfi.other.flag) { indices["nnfi"] <- lav_fit_nnfi(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) indices["rfi"] <- lav_fit_rfi(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) indices["nfi"] <- lav_fit_nfi(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) indices["pnfi"] <- lav_fit_pnfi(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) indices["ifi"] <- lav_fit_ifi(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) indices["rni"] <- lav_fit_rni(X2 = X2, df = df, X2.null = X2.null, df.null = df.null) if (scaled.flag) { indices["nnfi.scaled"] <- lav_fit_nnfi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["rfi.scaled"] <- lav_fit_rfi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["nfi.scaled"] <- lav_fit_nfi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["pnfi.scaled"] <- lav_fit_pnfi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["ifi.scaled"] <- lav_fit_ifi( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) indices["rni.scaled"] <- lav_fit_rni( X2 = X2.scaled, df = df.scaled, X2.null = X2.null.scaled, df.null = df.null.scaled ) } if (robust.flag) { indices["nnfi.robust"] <- lav_fit_nnfi( X2 = XX3, df = df, X2.null = XX3.null, df.null = df.null, c.hat = c.hat, c.hat.null = c.hat.null ) indices["rni.robust"] <- lav_fit_rni( X2 = XX3, df = df, X2.null = XX3.null, df.null = df.null, c.hat = c.hat, c.hat.null = c.hat.null ) } } # return only those that were requested indices[fit.measures] } # new in 0.6-5 # internal function to check the (external) baseline model, and # return baseline 'test' list if everything checks out (and NULL otherwise) lav_fit_measures_check_baseline <- function(fit.indep = NULL, object = NULL, fit.h1 = NULL) { TEST <- NULL # check if everything is in order if (inherits(fit.indep, "try-error")) { lav_msg_warn(gettext("baseline model estimation failed")) return(NULL) } else if (!inherits(fit.indep, "lavaan")) { lav_msg_warn(gettext( "(user-provided) baseline model is not a fitted lavaan object")) return(NULL) } else if (!fit.indep@optim$converged) { lav_msg_warn(gettext("baseline model did not converge")) return(NULL) } else { # evaluate if estimator/test matches original object # note: we do not need to check for 'se', as it may be 'none' sameTest <- all(object@Options$test == fit.indep@Options$test) if (!sameTest) { lav_msg_warn(gettextf( "Baseline model was using test(s) = %1$s, but original model was using test(s) = %2$s. Refitting baseline model!", lav_msg_view(fit.indep@Options$test, "none"), lav_msg_view(object@Options$test, "none"))) } sameEstimator <- (object@Options$estimator == fit.indep@Options$estimator) if (!sameEstimator) { lav_msg_warn(gettextf( "Baseline model was using estimator = %1$s, but original model was using estimator = %2$s. Refitting baseline model!", dQuote(fit.indep@Options$estimator), dQuote(object@Options$estimator))) } if (!sameTest || !sameEstimator) { lavoptions <- object@Options lavoptions$estimator <- object@Options$estimator lavoptions$se <- "none" lavoptions$baseline <- FALSE lavoptions$check.start <- FALSE lavoptions$check.post <- FALSE lavoptions$check.vcov <- FALSE lavoptions$test <- object@Options$test fit.indep <- try( lavaan(fit.indep, slotOptions = lavoptions, slotData = object@Data, slotSampleStats = object@SampleStats, sloth1 = object@h1, slotCache = object@Cache, verbose = FALSE ), silent = TRUE ) # try again TEST <- lav_fit_measures_check_baseline( fit.indep = fit.indep, object = object ) } else { # extract what we need TEST <- fit.indep@test } } # converged lavaan object # TDJ: Check for user-supplied h1.model (here, the fit.h1= argument) # Similar to BASELINE model, use the following priority: # 1. user-provided h1 model # 2. h1 model in @external slot # 3. default h1 model (already in @h1 slot, no update necessary) #FIXME? user-supplied h1 model in object might be in fit.indep, too user_h1_exists <- FALSE # 1. user-provided h1 model if (!is.null(fit.h1)) { stopifnot(inherits(fit.h1, "lavaan")) user_h1_exists <- TRUE # 2. h1 model in @external slot } else if (!is.null(object@external$h1.model)) { stopifnot(inherits(object@external$h1.model, "lavaan")) fit.h1 <- object@external$h1.model user_h1_exists <- TRUE } if (user_h1_exists) { ## update @test slot TEST <- lav_update_test_custom_h1(lav_obj_h0 = fit.indep, lav_obj_h1 = fit.h1)@test } TEST } lavaan/R/lav_constraints.R0000644000176200001440000002472014627656441015275 0ustar liggesuserslav_constraints_parse <- function(partable = NULL, constraints = NULL, theta = NULL, debug = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } # just in case we do not have a $free column in partable if (is.null(partable$free)) { partable$free <- seq_len(length(partable$lhs)) } # from the partable: free parameters if (!is.null(theta)) { # nothing to do } else if (!is.null(partable$est)) { theta <- partable$est[partable$free > 0L] } else if (!is.null(partable$start)) { theta <- partable$start[partable$free > 0L] } else { theta <- rep(0, length(partable$lhs)) } # number of free (but possibliy constrained) parameters npar <- length(theta) # parse the constraints if (is.null(constraints)) { LIST <- NULL } else if (!is.character(constraints)) { lav_msg_stop(gettext("constraints should be a string")) } else { FLAT <- lavParseModelString(constraints) CON <- attr(FLAT, "constraints") LIST <- list() if (length(CON) > 0L) { lhs <- unlist(lapply(CON, "[[", "lhs")) op <- unlist(lapply(CON, "[[", "op")) rhs <- unlist(lapply(CON, "[[", "rhs")) LIST$lhs <- c(LIST$lhs, lhs) LIST$op <- c(LIST$op, op) LIST$rhs <- c(LIST$rhs, rhs) } else { lav_msg_stop(gettext("no constraints found in constraints argument")) } } # simple equality constraints? ceq.simple <- FALSE if (!is.null(partable$unco)) { ceq.simple <- TRUE } # variable definitions def.function <- lav_partable_constraints_def(partable, con = LIST, debug = debug ) # construct ceq/ciq functions ceq.function <- lav_partable_constraints_ceq(partable, con = LIST, debug = debug ) # linear or nonlinear? ceq.linear.idx <- lav_constraints_linear_idx( func = ceq.function, npar = npar ) ceq.nonlinear.idx <- lav_constraints_nonlinear_idx( func = ceq.function, npar = npar ) # inequalities cin.function <- lav_partable_constraints_ciq(partable, con = LIST, debug = debug ) # linear or nonlinear? cin.linear.idx <- lav_constraints_linear_idx( func = cin.function, npar = npar ) cin.nonlinear.idx <- lav_constraints_nonlinear_idx( func = cin.function, npar = npar ) # Jacobians if (!is.null(body(ceq.function))) { ceq.JAC <- try(lav_func_jacobian_complex( func = ceq.function, x = theta ), silent = TRUE) if (inherits(ceq.JAC, "try-error")) { # eg. pnorm() ceq.JAC <- lav_func_jacobian_simple(func = ceq.function, x = theta) } # constants # do we have a non-zero 'rhs' elements? FIXME!!! is this reliable?? ceq.rhs <- -1 * ceq.function(numeric(npar)) # evaluate constraints ceq.theta <- ceq.function(theta) } else { ceq.JAC <- matrix(0, nrow = 0L, ncol = npar) ceq.rhs <- numeric(0L) ceq.theta <- numeric(0L) } if (!is.null(body(cin.function))) { cin.JAC <- try(lav_func_jacobian_complex( func = cin.function, x = theta ), silent = TRUE) if (inherits(cin.JAC, "try-error")) { # eg. pnorm() cin.JAC <- lav_func_jacobian_simple(func = cin.function, x = theta) } # constants # do we have a non-zero 'rhs' elements? FIXME!!! is this reliable?? cin.rhs <- -1 * cin.function(numeric(npar)) # evaluate constraints cin.theta <- cin.function(theta) } else { cin.JAC <- matrix(0, nrow = 0L, ncol = npar) cin.rhs <- numeric(0L) cin.theta <- numeric(0L) } # shortcut flags ceq.linear.flag <- length(ceq.linear.idx) > 0L ceq.nonlinear.flag <- length(ceq.nonlinear.idx) > 0L ceq.flag <- ceq.linear.flag || ceq.nonlinear.flag cin.linear.flag <- length(cin.linear.idx) > 0L cin.nonlinear.flag <- length(cin.nonlinear.idx) > 0L cin.flag <- cin.linear.flag || cin.nonlinear.flag ceq.only.flag <- ceq.flag && !cin.flag cin.only.flag <- cin.flag && !ceq.flag ceq.linear.only.flag <- (ceq.linear.flag && !ceq.nonlinear.flag && !cin.flag) ceq.simple.only <- ceq.simple && !ceq.flag && !cin.flag # additional info if ceq.linear.flag if (ceq.linear.flag) { ## NEW: 18 nov 2014: handle general *linear* constraints ## ## see Nocedal & Wright (2006) 15.3 ## - from x to x.red: ## x.red <- MASS::ginv(Q2) %*% (x - Q1 %*% solve(t(R)) %*% b) ## or ## x.red <- as.numeric((x - b %*% qr.coef(QR,diag(npar))) %*% Q2) ## ## - from x.red to x ## x <- as.numeric(Q1 %*% solve(t(R)) %*% b + Q2 %*% x.red) ## or ## x <- as.numeric(b %*% qr.coef(QR, diag(npar))) + ## as.numeric(Q2 %*% x.red) ## ## we write eq.constraints.K = Q2 ## eq.constraints.k0 = b %*% qr.coef(QR, diag(npar))) # compute range+null space of the jacobion (JAC) of the constraint # matrix # JAC <- lav_func_jacobian_complex(func = ceq.function, # x = lavpartable$start[lavpartable$free > 0L] QR <- qr(t(ceq.JAC)) ranK <- QR$rank Q <- qr.Q(QR, complete = TRUE) # Q1 <- Q[,1:ranK, drop = FALSE] # range space # Q2 <- Q[,-seq_len(ranK), drop = FALSE] # null space # R <- qr.R(QR) ceq.JAC.NULL <- Q[, -seq_len(ranK), drop = FALSE] if (all(ceq.rhs == 0)) { ceq.rhs.NULL <- numeric(npar) } else { tmp <- qr.coef(QR, diag(npar)) NA.idx <- which(is.na(rowSums(tmp))) # catch NAs if (length(NA.idx) > 0L) { tmp[NA.idx, ] <- 0 } ceq.rhs.NULL <- as.numeric(ceq.rhs %*% tmp) } } else { ceq.JAC.NULL <- matrix(0, 0L, 0L) ceq.rhs.NULL <- numeric(0L) } # if simple equalities only, create 'K' matrix ceq.simple.K <- matrix(0, 0, 0) if (ceq.simple.only) { n.unco <- max(partable$unco) n.free <- max(partable$free) ceq.simple.K <- matrix(0, nrow = n.unco, ncol = n.free) ##### ##### FIXME ! ##### idx.free <- partable$free[partable$free > 0] for (k in 1:n.unco) { c <- idx.free[k] ceq.simple.K[k, c] <- 1 } } # dummy jacobian 'function' ceq.jacobian <- function() NULL cin.jacobian <- function() NULL OUT <- list( def.function = def.function, ceq.function = ceq.function, ceq.JAC = ceq.JAC, ceq.jacobian = ceq.jacobian, ceq.rhs = ceq.rhs, ceq.theta = ceq.theta, ceq.linear.idx = ceq.linear.idx, ceq.nonlinear.idx = ceq.nonlinear.idx, ceq.linear.flag = ceq.linear.flag, ceq.nonlinear.flag = ceq.nonlinear.flag, ceq.flag = ceq.flag, ceq.linear.only.flag = ceq.linear.only.flag, ceq.JAC.NULL = ceq.JAC.NULL, ceq.rhs.NULL = ceq.rhs.NULL, ceq.simple.only = ceq.simple.only, ceq.simple.K = ceq.simple.K, cin.function = cin.function, cin.JAC = cin.JAC, cin.jacobian = cin.jacobian, cin.rhs = cin.rhs, cin.theta = cin.theta, cin.linear.idx = cin.linear.idx, cin.nonlinear.idx = cin.nonlinear.idx, cin.linear.flag = cin.linear.flag, cin.nonlinear.flag = cin.nonlinear.flag, cin.flag = cin.flag, cin.only.flag = cin.only.flag ) OUT } lav_constraints_linear_idx <- function(func = NULL, npar = NULL) { if (is.null(func) || is.null(body(func))) { return(integer(0L)) } # seed 1: rnorm A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar)) # seed 2: rnorm A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar)) A0minA1 <- A0 - A1 linear <- apply(A0minA1, 1, function(x) all(x == 0)) which(linear) } lav_constraints_nonlinear_idx <- function(func = NULL, npar = NULL) { if (is.null(func) || is.null(body(func))) { return(integer(0L)) } # seed 1: rnorm A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar)) # seed 2: rnorm A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar)) A0minA1 <- A0 - A1 linear <- apply(A0minA1, 1, function(x) all(x == 0)) which(!linear) } # FIXME: is there a more elegant/robust way to do this?? lav_constraints_check_linear <- function(model) { # seed 1: rnorm A.ceq <- A.cin <- matrix(0, model@nx.free, 0) if (!is.null(body(model@ceq.function))) { A.ceq <- t(lav_func_jacobian_complex(func = model@ceq.function, x = rnorm(model@nx.free))) } if (!is.null(body(model@cin.function))) { A.cin <- t(lav_func_jacobian_complex(func = model@cin.function, x = rnorm(model@nx.free))) } A0 <- cbind(A.ceq, A.cin) # seed 2: rnorm A.ceq <- A.cin <- matrix(0, model@nx.free, 0) if (!is.null(body(model@ceq.function))) { A.ceq <- t(lav_func_jacobian_complex(func = model@ceq.function, x = rnorm(model@nx.free))) } if (!is.null(body(model@cin.function))) { A.cin <- t(lav_func_jacobian_complex(func = model@cin.function, x = rnorm(model@nx.free))) } A1 <- cbind(A.ceq, A.cin) A0minA1 <- all.equal(A0, A1) if (is.logical(A0minA1) && A0minA1 == TRUE) { return(TRUE) } else { return(FALSE) } } # check if the equality constraints are 'simple' (a == b) lav_constraints_check_simple <- function(lavmodel = NULL) { ones <- (lavmodel@ceq.JAC == 1 | lavmodel@ceq.JAC == -1) simple <- all(lavmodel@ceq.rhs == 0) && all(apply(lavmodel@ceq.JAC != 0, 1, sum) == 2) && all(apply(ones, 1, sum) == 2) && length(lavmodel@ceq.nonlinear.idx) == 0 # TRUE or FALSE simple } lav_constraints_R2K <- function(lavmodel = NULL, R = NULL) { # constraint matrix if (!is.null(lavmodel)) { R <- lavmodel@ceq.JAC } stopifnot(!is.null(R)) npar.full <- NCOL(R) npar.red <- npar.full - NROW(R) K <- diag(npar.full) for (i in 1:NROW(R)) { idx1 <- which(R[i, ] == 1) idx2 <- which(R[i, ] == -1) K[idx2, idx1] <- 1 } # remove redundant columns neg.idx <- which(colSums(R) < 0) K <- K[, -neg.idx] K } lav_constraints_lambda_pre <- function(lavobject = NULL, method = "Don") { # compute factor 'pre' so that pre %*% g = lambda method <- tolower(method) R <- lavobject@Model@con.jac[, ] if (is.null(R) || length(R) == 0L) { return(numeric(0L)) } INFO <- lavTech(lavobject, "information.first.order") npar <- nrow(INFO) # Don 1985 if (method == "don") { R.plus <- MASS::ginv(R) # construct augmented matrix Z <- rbind( cbind(INFO, t(R)), cbind(R, matrix(0, nrow = nrow(R), ncol = nrow(R))) ) Z.plus <- MASS::ginv(Z) P.star <- Z.plus[1:npar, 1:npar] PRE <- t(R.plus) %*% (diag(npar) - INFO %*% P.star) # Bentler EQS manual } else if (method == "bentler") { INFO.inv <- solve(INFO) PRE <- solve(R %*% INFO.inv %*% t(R)) %*% R %*% INFO.inv } PRE } lavaan/R/lav_export_estimation.R0000644000176200001440000001537214627656441016506 0ustar liggesusers#' lav_export_estimation #' #' lavaan provides a range of optimization methods with the optim.method argument #' (nlminb, BFGS, L-BFGS-B, GN, and nlminb.constr). `lav_export_estimation` #' allows exporting objects and functions necessary to pass a lavaan model into #' any optimizer that takes a combination of (1) starting values, (2) fit-function, #' (3) gradient-function, and (4) upper and lower bounds. This allows testing new #' optimization frameworks. #' #' @param lavaan_model a fitted lavaan model #' @returns List with: #' \itemize{ #' \item get_coef - When working with equality constraints, lavaan internally #' uses some transformations. get_coef is a functions that recreates the coef #' function for the parameters. #' \item starting_values - starting_values to be used in the optimization #' \item objective_function - objective function, expecting the current parameter #' values and the lavaan model #' \item gradient_function - gradient function, expecting the current parameter #' values and the lavaan model #' \item lower - lower bounds for parameters #' \item upper - upper bound for parameters #' } #' @export #' @examples #' library(lavaan) #' model <- " #' # latent variable definitions #' ind60 =~ x1 + x2 + x3 #' dem60 =~ y1 + y2 + y3 + y4 #' dem65 =~ y5 + a*y6 + y7 + y8 #' #' # regressions #' dem60 ~ ind60 #' dem65 ~ ind60 + dem60 #' " #' #' fit <- sem(model, #' data = PoliticalDemocracy, #' do.fit = FALSE #' ) #' #' est <- lav_export_estimation(lavaan_model = fit) #' #' # The starting values are: #' est$starting_values #' # Note that these do not have labels (and may also differ from coef(fit) #' # in case of equality constraints): #' coef(fit) #' # To get the same parameters, use: #' est$get_coef( #' parameter_values = est$starting_values, #' lavaan_model = fit #' ) #' #' # The objective function can be used to compute the fit at the current estimates: #' est$objective_function( #' parameter_values = est$starting_values, #' lavaan_model = fit #' ) #' #' # The gradient function can be used to compute the gradients at the current estimates: #' est$gradient_function( #' parameter_values = est$starting_values, #' lavaan_model = fit #' ) #' #' # Together, these elements provide the means to estimate the parameters with a large #' # range of optimizers. For simplicity, here is an example using optim: #' est_fit <- optim( #' par = est$starting_values, #' fn = est$objective_function, #' gr = est$gradient_function, #' lavaan_model = fit, #' method = "BFGS" #' ) #' est$get_coef( #' parameter_values = est_fit$par, #' lavaan_model = fit #' ) #' #' # This is identical to #' coef(sem(model, #' data = PoliticalDemocracy #' )) #' #' # Example using ridge regularization for parameter a #' fn_ridge <- function(parameter_values, lavaan_model, est, lambda) { #' return(est$objective_function( #' parameter_values = parameter_values, #' lavaan_model = lavaan_model #' ) + lambda * parameter_values[6]^2) #' } #' ridge_fit <- optim( #' par = est$get_coef(est$starting_values, #' lavaan_model = fit #' ), #' fn = fn_ridge, #' lavaan_model = fit, #' est = est, #' lambda = 10 #' ) #' est$get_coef( #' parameter_values = ridge_fit$par, #' lavaan_model = fit #' ) lav_export_estimation <- function(lavaan_model) { # define objective function objective_function <- function(parameter_values, lavaan_model) { if (lavaan_model@Model@eq.constraints) { parameter_values <- as.numeric(lavaan_model@Model@eq.constraints.K %*% parameter_values) + lavaan_model@Model@eq.constraints.k0 } # create group list GLIST <- lav_model_x2GLIST(lavaan_model@Model, x = parameter_values) # get objective function **value** fx <- lav_model_objective( lavmodel = lavaan_model@Model, GLIST = GLIST, lavsamplestats = lavaan_model@SampleStats, lavdata = lavaan_model@Data, lavcache = list() ) if (lavaan_model@Options$estimator == "PML") { # rescale objective function value fx <- fx / lavaan_model@SampleStats@ntotal } if (!is.finite(fx)) { fx.group <- attr(fx, "fx.group") fx <- 1e+20 attr(fx, "fx.group") <- fx.group } return(fx) } # define gradient function gradient_function <- function(parameter_values, lavaan_model) { if (lavaan_model@Model@eq.constraints) { parameter_values <- as.numeric(lavaan_model@Model@eq.constraints.K %*% parameter_values) + lavaan_model@Model@eq.constraints.k0 } GLIST <- lav_model_x2GLIST(lavaan_model@Model, x = parameter_values ) current.verbose <- lav_verbose() if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose), TRUE) dx <- lav_model_gradient( lavmodel = lavaan_model@Model, GLIST = GLIST, lavsamplestats = lavaan_model@SampleStats, lavdata = lavaan_model@Data, lavcache = list(), type = "free", group.weight = !(lavaan_model@SampleStats@missing.flag || lavaan_model@Options$estimator == "PML"), ceq.simple = lavaan_model@Model@ceq.simple.only ) lav_verbose(current.verbose) if (lavaan_model@Model@eq.constraints) { dx <- as.numeric(dx %*% lavaan_model@Model@eq.constraints.K) } if (lavaan_model@Options$estimator == "PML") { dx <- dx / lavaan_model@SampleStats@ntotal } return(dx) } # extract bounds lower <- lavaan_model@ParTable$lower[lavaan_model@ParTable$free > 0L] upper <- lavaan_model@ParTable$upper[lavaan_model@ParTable$free > 0L] # get starting values starting_values <- lav_model_get_parameters(lavaan_model@Model) if (lavaan_model@Model@eq.constraints) { starting_values <- as.numeric((starting_values - lavaan_model@Model@eq.constraints.k0) %*% lavaan_model@Model@eq.constraints.K) } # lavaan internally uses transformations when there are equality constraints. # As a result, the parameters are not necessarily those one would expect when # fitting the model. The parameters can be translated with the following function: get_coef <- function(parameter_values, lavaan_model) { if (lavaan_model@Model@eq.constraints) { parameter_values <- as.numeric(lavaan_model@Model@eq.constraints.K %*% parameter_values) + lavaan_model@Model@eq.constraints.k0 } names(parameter_values) <- lav_partable_labels(lavaan_model@ParTable, type = "free" ) return(parameter_values) } # Now we just return everything so that the user can use their own optimizer return( list( get_coef = get_coef, starting_values = starting_values, objective_function = objective_function, gradient_function = gradient_function, lower = lower, upper = upper ) ) } lavaan/R/lav_cor.R0000644000176200001440000001526114630305220013465 0ustar liggesusers# user-visible routine to # compute polychoric/polyserial/... correlations # # YR 17 Sept 2013 # # - YR 26 Nov 2013: big change - make it a wrapper around lavaan() # estimator = "none" means two.step (starting values) lavCor <- function(object, # lav.data options ordered = NULL, group = NULL, missing = "listwise", ov.names.x = NULL, sampling.weights = NULL, # lavaan options se = "none", test = "none", estimator = "two.step", baseline = FALSE, # other options (for lavaan) ..., cor.smooth = FALSE, cor.smooth.tol = 1e-04, # was 1e-06 in <0.6-14 output = "cor") { # shortcut if object = lavaan object if (inherits(object, "lavaan")) { out <- lav_cor_output(object, output = output) return(out) } # check estimator estimator <- tolower(estimator) if (estimator %in% c("two.step", "two.stage")) { estimator <- "none" } # se? se <- tolower(se) output <- tolower(output) if (se != "none") { if (output %in% c("cor", "cov", "sampstat", "th", "thresholds")) { lav_msg_warn(gettext("argument `se' is ignored since standard errors are not needed for the requested `output'")) se <- "none" } } # extract sampling.weights.normalization from dots (for lavData() call) dots <- list(...) sampling.weights.normalization <- "total" if (!is.null(dots$sampling.weights.normalization)) { sampling.weights.normalization <- dots$sampling.weights.normalization } # check object class if (inherits(object, "lavData")) { lav.data <- object } else if (inherits(object, "data.frame") || inherits(object, "matrix")) { object <- as.data.frame(object) NAMES <- names(object) if (!is.null(group)) { NAMES <- NAMES[-match(group, NAMES)] } if (!is.null(sampling.weights)) { NAMES <- NAMES[-match(sampling.weights, NAMES)] } if (is.logical(ordered)) { ordered.flag <- ordered if (ordered.flag) { ordered <- NAMES if (length(ov.names.x) > 0L) { ordered <- ordered[-which(ordered %in% ov.names.x)] } } else { ordered <- character(0L) } } else if (is.null(ordered)) { ordered <- character(0L) } else if (!is.character(ordered)) { lav_msg_stop(gettext("ordered argument must be a character vector")) } else if (length(ordered) == 1L && nchar(ordered) == 0L) { ordered <- character(0L) } else { # check if all names in "ordered" occur in the dataset? missing.idx <- which(!ordered %in% NAMES) if (length(missing.idx) > 0L) { lav_msg_warn(gettextf( "ordered variable(s): %s could not be found in the data and will be ignored", lav_msg_view(ordered[missing.idx]))) } } lav.data <- lavData( data = object, group = group, ov.names = NAMES, ordered = ordered, sampling.weights = sampling.weights, ov.names.x = ov.names.x, lavoptions = list( missing = missing, sampling.weights.normalization = sampling.weights.normalization ) ) } else { lav_msg_stop(gettext("lavCor can not handle objects of class"), paste(class(object), collapse = " ") ) } # set default estimator if se != "none" categorical <- any(lav.data@ov$type == "ordered") if (se != "none" && estimator == "none") { if (categorical) { estimator <- "WLSMV" } else { estimator <- "ML" } } # extract more partable options from dots meanstructure <- FALSE fixed.x <- FALSE mimic <- "lavaan" conditional.x <- FALSE if (!is.null(dots$meanstructure)) { meanstructure <- dots$meanstructure } if (lav.data@ngroups > 1L || categorical || tolower(missing) %in% c("ml", "fiml", "direct")) { meanstructure <- TRUE } if (!is.null(dots$fixed.x)) { fixed.x <- dots$fixed.x } if (!is.null(dots$mimic)) { mimic <- dots$mimic } if (!is.null(dots$conditional.x)) { conditional.x <- dots$conditional.x } # override, only for backwards compatibility (eg moments() in JWileymisc) # if(missing %in% c("ml", "fiml")) { # meanstructure = TRUE # } # generate partable for unrestricted model PT.un <- lav_partable_unrestricted( lavobject = NULL, lavdata = lav.data, lavoptions = list( meanstructure = meanstructure, fixed.x = fixed.x, conditional.x = conditional.x, # sampling.weights.normalization = sampling.weights.normalization, group.w.free = FALSE, missing = missing, estimator = estimator, mimic = mimic ), sample.cov = NULL, sample.mean = NULL, sample.th = NULL ) FIT <- lavaan( slotParTable = PT.un, slotData = lav.data, model.type = "unrestricted", missing = missing, baseline = baseline, h1 = TRUE, # must be TRUE! se = se, test = test, estimator = estimator, ... ) out <- lav_cor_output(FIT, output = output) # smooth correlation matrix? (only if output = "cor") if (output == "cor" && cor.smooth) { tmp.attr <- attributes(out) out <- cov2cor(lav_matrix_symmetric_force_pd(out, tol = cor.smooth.tol)) # we lost most of the attributes attributes(out) <- tmp.attr } out } lav_cor_output <- function(object, output = "cor") { # check output if (output %in% c("cor", "cov")) { out <- lavInspect(object, "sampstat") if (object@Data@ngroups == 1L) { if (object@Model@conditional.x) { out <- out$res.cov } else { out <- out$cov } if (output == "cor") { out <- cov2cor(out) } } else { if (object@Model@conditional.x) { out <- lapply(out, "[[", "res.cov") } else { out <- lapply(out, "[[", "cov") } if (output == "cor") { out <- lapply(out, cov2cor) } } } else if (output %in% c("th", "thresholds")) { out <- inspect(object, "sampstat") if (object@Data@ngroups == 1L) { if (object@Model@conditional.x) { out <- out$res.th } else { out <- out$th } } else { if (object@Model@conditional.x) { out <- lapply(out, "[[", "res.th") } else { out <- lapply(out, "[[", "th") } } } else if (output %in% c("sampstat")) { out <- inspect(object, "sampstat") } else if (output %in% c( "parameterEstimates", "pe", "parameterestimates", "est" )) { out <- standardizedSolution(object) } else { out <- object } out } lavaan/R/lav_lavaan_step11_optim.R0000644000176200001440000002652614627656441016603 0ustar liggesuserslav_lavaan_step11_estoptim <- function(lavdata = NULL, # nolint lavmodel = NULL, lavcache = NULL, lavsamplestats = NULL, lavoptions = NULL, lavpartable = NULL) { # # # # # # # # # # # # # # # # 11. est + lavoptim # # # # # # # # # # # # # # # # # if lavoptions$do.fit and lavoptions$estimator not "none" and # lavmodel$nx.free > 0 # select case lavoptions$optim.method # case "noniter" # try x <- lav_optim_noniter(...) # case "em" # if nlevels < 2L *** error *** # try x <- lav_mvnorm_cluster_em_h0(...) # case "gn" # try x <- lav_optim_gn(...) # case else # set 1 in lavoptions$optim.attempts is it wasn't specified # try x <- lav_model_estimate(...) # if not successfull and optim.attempts > 1L # try x <- lav_optim_estimate(...) with # options$optim.parscale = "standardized" # if not successfull and optim.attempts > 2L # try x <- lav_optim_estimate(...) with start = "simple" # if not successfull and optim.attempts > 3L # try x <- lav_optim_estimate(...) with # options$optim.parscale = "standardized" and start = "simple" # end select # if x not succesfully computed # ** warning ** # set starting values and appropriate attributes in x # in case of non-linear constraints: store final con.jac and # con.lambda in lavmodel # store parameters in lavmodel # store parameters in partable$est # else # initialize x and attributes (iterations, converged, warn.txt, # control, dx) of x # try fx <- lav_model_objective # if not successfull # fx = NA_real_ # attribute fx.group of fx = NA_real_ # store fx in attribute "fx" of x # set lavpartable$est to starting values # if lavoptions$optim.force.converged set attribute converged of x to TRUE # store optimization info in lavoptim x <- NULL if (lavoptions$do.fit && lavoptions$estimator != "none" && lavmodel@nx.free > 0L) { if (lav_verbose()) { cat("lavoptim ... start:\n") } # non-iterative methods (fabin, ...) if (lavoptions$optim.method == "noniter") { x <- try( lav_optim_noniter( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE ) # EM for multilevel models } else if (lavoptions$optim.method == "em") { # multilevel only for now stopifnot(lavdata@nlevels > 1L) x <- try( lav_mvnorm_cluster_em_h0( lavsamplestats = lavsamplestats, lavdata = lavdata, lavimplied = NULL, lavpartable = lavpartable, lavmodel = lavmodel, lavoptions = lavoptions, fx.tol = lavoptions$em.fx.tol, dx.tol = lavoptions$em.dx.tol, max.iter = lavoptions$em.iter.max ), silent = TRUE ) # Gauss-Newton } else if (lavoptions$optim.method == "gn") { # only tested for DLS (for now) x <- try( lav_optim_gn( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavpartable = lavpartable, lavoptions = lavoptions ), silent = TRUE ) # Quasi-Newton } else { # for backwards compatibility (<0.6) if (is.null(lavoptions$optim.attempts)) { lavoptions$optim.attempts <- 1L } # try 1 if (lav_verbose()) { cat("attempt 1 -- default options\n") } x <- try( lav_model_estimate( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavcache = lavcache ), silent = TRUE ) # try 2: optim.parscale = "standardize" (new in 0.6-7) if (lavoptions$optim.attempts > 1L && lavoptions$rstarts == 0L && (inherits(x, "try-error") || !attr(x, "converged"))) { lavoptions2 <- lavoptions lavoptions2$optim.parscale <- "standardized" if (lav_verbose()) { str(x) cat("attempt 2 -- optim.parscale = \"standardized\"\n") } x <- try( lav_model_estimate( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions2, lavcache = lavcache ), silent = TRUE ) } # try 3: start = "simple" if (lavoptions$optim.attempts > 2L && lavoptions$rstarts == 0L && (inherits(x, "try-error") || !attr(x, "converged"))) { if (lav_verbose()) { str(x) cat("attempt 3 -- start = \"simple\"\n") } x <- try( lav_model_estimate( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, start = "simple", lavcache = lavcache ), silent = TRUE ) } # try 4: start = "simple" + optim.parscale = "standardize" if (lavoptions$optim.attempts > 3L && lavoptions$rstarts == 0L && (inherits(x, "try-error") || !attr(x, "converged"))) { lavoptions2 <- lavoptions lavoptions2$optim.parscale <- "standardized" if (lav_verbose()) { str(x) cat( "attempt 4 -- optim.parscale = \"standardized\" + ", "start = \"simple\"\n" ) } x <- try( lav_model_estimate( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions2, start = "simple", lavcache = lavcache ), silent = TRUE ) } # random starts? -- new in 0.6-18 # run this even if we already have a converged solution # perhaps we find a better solution? if (lavoptions$rstarts > 0L) { x.rstarts <- vector("list", length = lavoptions$rstarts) if (lav_verbose()) { str(x) cat("trying again with random starts (", lavoptions$rstarts, " in total):\n", sep = "" ) } for (i in seq_len(lavoptions$rstarts)) { if (lav_verbose()) { cat("-- random start run: ", i, "\n") } x.rstarts[[i]] <- try( lav_model_estimate( lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, start = "random", lavcache = lavcache ), silent = TRUE ) } # pick best solution (if any) x.noerror <- x.rstarts[!sapply( x.rstarts, inherits, "try-error" )] x.converged <- vector("list", length = 0L) fx.rstarts <- numeric(0L) if (length(x.noerror) > 0L) { x.converged <- x.noerror[sapply(x.rstarts, "attr", "converged")] } if (length(x.converged) > 0L) { fx.rstarts <- sapply(x.converged, "attr", "fx") x.best <- x.converged[[which.min(fx.rstarts)]] fx.best <- attr(x.best, "fx")[1] # if we did not find a converged solution, use x.best if (inherits(x, "try-error") || !attr(x, "converged")) { x <- x.best # if we already had a converged solution, only replace # if fx.best is better than attr(x, "fx")[1] } else { if (fx.best < attr(x, "fx")[1]) { x <- x.best } } } attr(x, "x.rstarts") <- x.rstarts } # random starts } # optimization failed with error if (inherits(x, "try-error")) { warn.txt <- gettext("Model estimation FAILED! Returning starting values.") x <- lav_model_get_parameters( lavmodel = lavmodel, type = "free" ) # starting values attr(x, "iterations") <- 0L attr(x, "converged") <- FALSE attr(x, "warn.txt") <- warn.txt attr(x, "control") <- lavoptions$control attr(x, "dx") <- numeric(0L) fx <- as.numeric(NA) attr(fx, "fx.group") <- as.numeric(NA) attr(x, "fx") <- fx } # if a warning was produced, say it here warn.txt <- attr(x, "warn.txt") if (nchar(warn.txt) > 0L) { lav_msg_warn( gettext("Model estimation FAILED! Returning starting values.")) } # in case of non-linear constraints: store final con.jac and con.lambda # in lavmodel if (!is.null(attr(x, "con.jac"))) { lavmodel@con.jac <- attr(x, "con.jac") } if (!is.null(attr(x, "con.lambda"))) { lavmodel@con.lambda <- attr(x, "con.lambda") } # store parameters in lavmodel lavmodel <- lav_model_set_parameters(lavmodel, x = as.numeric(x)) # store parameters in @ParTable$est lavpartable$est <- lav_model_get_parameters( lavmodel = lavmodel, type = "user", extra = TRUE ) if (lav_verbose()) { cat("lavoptim ... done.\n") } } else { x <- numeric(0L) attr(x, "iterations") <- 0L attr(x, "converged") <- FALSE attr(x, "warn.txt") <- "" attr(x, "control") <- lavoptions$control attr(x, "dx") <- numeric(0L) fx <- try(lav_model_objective( lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache ), silent = TRUE) if (!inherits(fx, "try-error")) { attr(x, "fx") <- fx } else { fx <- as.numeric(NA) attr(fx, "fx.group") <- as.numeric(NA) attr(x, "fx") <- fx } lavpartable$est <- lavpartable$start } # should we fake/force convergence? (eg. to enforce the # computation of a test statistic) if (lavoptions$optim.force.converged) { attr(x, "converged") <- TRUE } # store optimization info in lavoptim lavoptim <- list() x2 <- x attributes(x2) <- NULL lavoptim$x <- x2 lavoptim$dx <- attr(x, "dx") lavoptim$npar <- length(x) lavoptim$iterations <- attr(x, "iterations") lavoptim$converged <- attr(x, "converged") lavoptim$warn.txt <- attr(x, "warn.txt") lavoptim$parscale <- attr(x, "parscale") lavoptim$partrace <- attr(x, "partrace") fx.copy <- fx <- attr(x, "fx") attributes(fx) <- NULL lavoptim$fx <- fx lavoptim$fx.group <- attr(fx.copy, "fx.group") if (!is.null(attr(fx.copy, "logl.group"))) { lavoptim$logl.group <- attr(fx.copy, "logl.group") lavoptim$logl <- sum(lavoptim$logl.group) } else { lavoptim$logl.group <- as.numeric(NA) lavoptim$logl <- as.numeric(NA) } lavoptim$control <- attr(x, "control") if (!is.null(attr(x, "x.rstarts"))) { lavoptim$x.rstarts <- attr(x, "x.rstarts") } lavpartable <- lav_partable_set_cache(lavpartable, force = TRUE) list( lavoptim = lavoptim, lavmodel = lavmodel, lavpartable = lavpartable, x = x ) } lavaan/R/lav_partable_check.R0000644000176200001440000000653514627656441015661 0ustar liggesusers# check if the partable is complete/consistent # we may have added intercepts/variances (user = 0), fixed to zero lav_partable_check <- function(partable, categorical = FALSE) { check <- TRUE # check for empy table - or should we WARN? if (length(partable$lhs) == 0) { return(check) } # get observed/latent variables ov.names <- vnames(partable, "ov.nox") # no need to specify exo?? lv.names <- vnames(partable, "lv") all.names <- c(ov.names, lv.names) ov.names.ord <- vnames(partable, "ov.ord") nlevels <- lav_partable_nlevels(partable) # if categorical, we should have some ov.names.ord if (categorical && length(ov.names.ord) == 0L) { check <- FALSE lav_msg_warn(gettext("parameter table does not contain thresholds")) } # we should have a (residual) variance for *each* ov/lv # note: if lavaanify() has been used, this is always TRUE var.idx <- which(partable$op == "~~" & partable$lhs == partable$rhs) missing.idx <- which(is.na(match(all.names, partable$lhs[var.idx]))) if (length(missing.idx) > 0L) { check <- FALSE lav_msg_warn(gettextf( "parameter table does not contain (residual) variances for one or more variables: %s", lav_msg_view(all.names[missing.idx]))) } # meanstructure? meanstructure <- any(partable$op == "~1") # if meanstructure, check for missing intercepts # note if lavaanify() has been used, this is always TRUE if (meanstructure) { # we should have a intercept for *each* ov/lv int.idx <- which(partable$op == "~1") missing.idx <- which(is.na(match(all.names, partable$lhs[int.idx]))) if (length(missing.idx) > 0L) { check <- FALSE lav_msg_warn(gettextf( "parameter table does not contain intercepts for one or more variables: %s", lav_msg_view(all.names[missing.idx]))) } } # ok, now the 'real' checks # do we have added (residual) variances (user = 0) that are fixed to zero? # this is not necessarily problematic! # eg. in latent change score models # therefore, we do NOT give a warning # var.fixed <- which(partable$op == "~~" & # partable$lhs == partable$rhs & # partable$user == 0 & # partable$free == 0) # if(length(var.fixed) > 0L) { # check <- FALSE # if(warn) { # warning("lavaan WARNING: missing (residual) variances are set to", # " zero: [", paste(partable$lhs[var.fixed], collapse = " "), "]") # } # } # do we have added intercepts (user = 0) that are fixed to zero? # this is not necessarily problematic; perhaps only for # exogenous variables? ov.ind <- unique(partable$rhs[partable$op == "=~"]) lv.names <- unique(partable$lhs[partable$op == "=~"]) int.fixed <- which(partable$op == "~1" & partable$user == 0L & partable$free == 0L & partable$ustart == 0L & # ignore block/group 1 -- typically within level exo !(partable$block %% nlevels == 1L) & # do not include factors !partable$lhs %in% lv.names & # do not include ordered variables !partable$lhs %in% ov.names.ord & # do not include indicators !partable$lhs %in% ov.ind) if (length(int.fixed) > 0L) { check <- FALSE lav_msg_warn(gettext("automatically added intercepts are set to zero:"), lav_msg_view(partable$lhs[int.fixed])) } # return check code check } lavaan/R/lav_muthen1984.R0000644000176200001440000003513414627656441014555 0ustar liggesusers# This function was written in January 2012 -- Yves Rosseel # First success: Friday 20 Jan 2012: the standard errors for # thresholds and polychoric correlations (in an # unrestricted/saturated model) are spot on! # Second success: Saturday 9 June 2012: support for mixed (ordinal + metric) # variables; thanks to the delta method to get the ACOV # right (see H matrix) # Third success: Monday 2 July 2012: support for fixed.x covariates # # Friday 13 July 2012: merge exo + non-exo code # Monday 16 July 2012: fixed sign numeric in WLS.W; I think we got it right now # YR 26 Nov 2015: move step1 + step2 to external functions # muthen1984 <- function(Data = NULL, ov.names = NULL, ov.types = NULL, ov.levels = NULL, ov.names.x = character(0L), eXo = NULL, wt = NULL, WLS.W = TRUE, zero.add = c(0.5, 0.0), zero.keep.margins = TRUE, zero.cell.warn = FALSE, zero.cell.tables = TRUE, group = 1L) { # group only for error messages # just in case Data is a vector Data <- as.matrix(Data) nvar <- NCOL(Data) N <- NROW(Data) num.idx <- which(ov.types == "numeric") ord.idx <- which(ov.types == "ordered") nexo <- length(ov.names.x) if (nexo > 0L) stopifnot(NCOL(eXo) == nexo) pstar <- nvar * (nvar - 1) / 2 if (lav_verbose()) { cat("\nPreparing for WLS estimation -- STEP 1 + 2\n") cat("Number of endogenous variables: ", nvar, "\n") cat("Endogenous variable names:\n") print(ov.names) cat("\n") cat("Endogenous ov types:\n") print(ov.types) cat("\n") cat("Endogenous ov levels:\n ") print(ov.levels) cat("\n") cat("Number of exogenous variables: ", nexo, "\n") cat("Exogenous variable names:\n") print(ov.names.x) cat("\n") } step1 <- lav_samplestats_step1( Y = Data, wt = wt, ov.names = ov.names, ov.types = ov.types, ov.levels = ov.levels, ov.names.x = ov.names.x, eXo = eXo, scores.flag = WLS.W, group = group ) FIT <- step1$FIT TH <- step1$TH TH.NOX <- step1$TH.NOX TH.IDX <- step1$TH.IDX TH.NAMES <- step1$TH.NAMES VAR <- step1$VAR SLOPES <- step1$SLOPES SC.TH <- step1$SC.TH SC.SL <- step1$SC.SL SC.VAR <- step1$SC.VAR th.start.idx <- step1$th.start.idx th.end.idx <- step1$th.end.idx # rm SC.VAR columns from ordinal variables if (WLS.W && length(ord.idx) > 0L) { SC.VAR <- SC.VAR[, -ord.idx, drop = FALSE] } if (lav_verbose()) { cat("STEP 1: univariate statistics\n") cat("Threshold + means:\n") TTHH <- unlist(TH) names(TTHH) <- unlist(TH.NAMES) print(TTHH) cat("Slopes (if any):\n") colnames(SLOPES) <- ov.names.x rownames(SLOPES) <- ov.names print(SLOPES) cat("Variances:\n") names(VAR) <- ov.names print(unlist(VAR)) } # stage two -- correlations if (lav_verbose()) cat("\n\nSTEP 2: covariances/correlations:\n") COR <- lav_samplestats_step2( UNI = FIT, wt = wt, ov.names = ov.names, zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = zero.cell.warn, zero.cell.tables = zero.cell.tables ) empty.cell.tables <- attr(COR, "zero.cell.tables") attr(COR, "zero.cell.tables") <- NULL if (lav_verbose()) { colnames(COR) <- rownames(COR) <- ov.names print(COR) } if (!WLS.W) { # we do not need the asymptotic variance matrix if (any("numeric" %in% ov.types)) { COV <- cor2cov(R = COR, sds = sqrt(unlist(VAR))) } else { COV <- COR } out <- list( TH = TH, SLOPES = SLOPES, VAR = VAR, COR = COR, COV = COV, SC = NULL, TH.NOX = TH.NOX, TH.NAMES = TH.NAMES, TH.IDX = TH.IDX, INNER = NULL, A11 = NULL, A12 = NULL, A21 = NULL, A22 = NULL, WLS.W = NULL, H = NULL, zero.cell.tables = matrix("", 0, 2) ) return(out) } # stage three -- WLS.W SC.COR <- matrix(0, N, pstar) PSTAR <- matrix(0, nvar, nvar) PSTAR[lav_matrix_vech_idx(nvar, diagonal = FALSE)] <- 1:pstar A11.size <- NCOL(SC.TH) + NCOL(SC.SL) + NCOL(SC.VAR) # A21 A21 <- matrix(0, pstar, A11.size) H22 <- diag(pstar) # for the delta rule H21 <- matrix(0, pstar, A11.size) # for this one, we need new scores: for each F_ij (cor), the # scores with respect to the TH, VAR, ... for (j in seq_len(nvar - 1L)) { for (i in (j + 1L):nvar) { pstar.idx <- PSTAR[i, j] th.idx_i <- th.start.idx[i]:th.end.idx[i] th.idx_j <- th.start.idx[j]:th.end.idx[j] if (nexo > 0L) { sl.idx_i <- NCOL(SC.TH) + seq(i, by = nvar, length.out = nexo) sl.idx_j <- NCOL(SC.TH) + seq(j, by = nvar, length.out = nexo) var.idx_i <- NCOL(SC.TH) + NCOL(SC.SL) + match(i, num.idx) var.idx_j <- NCOL(SC.TH) + NCOL(SC.SL) + match(j, num.idx) } else { var.idx_i <- NCOL(SC.TH) + match(i, num.idx) var.idx_j <- NCOL(SC.TH) + match(j, num.idx) } if (ov.types[i] == "numeric" && ov.types[j] == "numeric") { SC.COR.UNI <- lav_bvreg_cor_scores( rho = COR[i, j], fit.y1 = FIT[[i]], fit.y2 = FIT[[j]], wt = wt ) # RHO if (is.null(wt)) { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho } else { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho / wt # unweight } # TH A21[pstar.idx, th.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.mu.y1 ) A21[pstar.idx, th.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.mu.y2 ) # SL if (nexo > 0L) { A21[pstar.idx, sl.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y1 ) A21[pstar.idx, sl.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y2 ) } # VAR A21[pstar.idx, var.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.var.y1 ) A21[pstar.idx, var.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.var.y2 ) # H21 only needed for VAR H21[pstar.idx, var.idx_i] <- (sqrt(VAR[j]) * COR[i, j]) / (2 * sqrt(VAR[i])) H21[pstar.idx, var.idx_j] <- (sqrt(VAR[i]) * COR[i, j]) / (2 * sqrt(VAR[j])) H22[pstar.idx, pstar.idx] <- sqrt(VAR[i]) * sqrt(VAR[j]) } else if (ov.types[i] == "numeric" && ov.types[j] == "ordered") { SC.COR.UNI <- lav_bvmix_cor_scores( rho = COR[i, j], fit.y1 = FIT[[i]], fit.y2 = FIT[[j]], wt = wt ) # RHO if (is.null(wt)) { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho } else { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho / wt # unweight } # TH A21[pstar.idx, th.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.mu.y1 ) A21[pstar.idx, th.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.th.y2 ) # SL if (nexo > 0L) { A21[pstar.idx, sl.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y1 ) A21[pstar.idx, sl.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y2 ) } # VAR A21[pstar.idx, var.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.var.y1 ) # H21 only need for VAR H21[pstar.idx, var.idx_i] <- COR[i, j] / (2 * sqrt(VAR[i])) H22[pstar.idx, pstar.idx] <- sqrt(VAR[i]) } else if (ov.types[j] == "numeric" && ov.types[i] == "ordered") { SC.COR.UNI <- lav_bvmix_cor_scores( rho = COR[i, j], fit.y1 = FIT[[j]], fit.y2 = FIT[[i]], wt = wt ) # RHO if (is.null(wt)) { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho } else { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho / wt # unweight } # TH A21[pstar.idx, th.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.mu.y1 ) A21[pstar.idx, th.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.th.y2 ) # SL if (nexo > 0L) { A21[pstar.idx, sl.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y1 ) A21[pstar.idx, sl.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y2 ) } # VAR A21[pstar.idx, var.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.var.y1 ) # H21 only for VAR H21[pstar.idx, var.idx_j] <- COR[i, j] / (2 * sqrt(VAR[j])) H22[pstar.idx, pstar.idx] <- sqrt(VAR[j]) } else if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { # polychoric correlation SC.COR.UNI <- lav_bvord_cor_scores( rho = COR[i, j], fit.y1 = FIT[[i]], fit.y2 = FIT[[j]], wt = wt ) # RHO if (is.null(wt)) { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho } else { SC.COR[, pstar.idx] <- SC.COR.UNI$dx.rho / wt # unweight } # TH A21[pstar.idx, th.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.th.y1 ) A21[pstar.idx, th.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.th.y2 ) # SL if (nexo > 0L) { A21[pstar.idx, sl.idx_i] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y1 ) A21[pstar.idx, sl.idx_j] <- lav_matrix_crossprod( SC.COR[, pstar.idx], SC.COR.UNI$dx.sl.y2 ) } # NO VAR } } } if (!is.null(wt)) { SC.COR <- SC.COR * wt # reweight } # stage three SC <- cbind(SC.TH, SC.SL, SC.VAR, SC.COR) INNER <- lav_matrix_crossprod(SC) # A11 # new approach (2 June 2012): A11 is just a 'sparse' version of # (the left upper block of) INNER A11 <- matrix(0, A11.size, A11.size) if (!is.null(wt)) { INNER2 <- lav_matrix_crossprod(SC / wt, SC) } else { INNER2 <- INNER } for (i in 1:nvar) { th.idx <- th.start.idx[i]:th.end.idx[i] sl.idx <- integer(0L) var.idx <- integer(0L) if (nexo > 0L) { sl.idx <- NCOL(SC.TH) + seq(i, by = nvar, length.out = nexo) # sl.end.idx <- (i*nexo); sl.start.idx <- (i-1L)*nexo + 1L # sl.idx <- NCOL(SC.TH) + (sl.start.idx:sl.end.idx) } if (ov.types[i] == "numeric") { var.idx <- NCOL(SC.TH) + NCOL(SC.SL) + match(i, num.idx) } a11.idx <- c(th.idx, sl.idx, var.idx) A11[a11.idx, a11.idx] <- INNER2[a11.idx, a11.idx] } ##### DEBUG ###### #### for numeric VAR only, use hessian to get better residual var value #### # for(i in 1:nvar) { # if(ov.types[i] == "numeric") { # tmp.npar <- FIT[[i]]$npar # e.var <- FIT[[i]]$theta[ tmp.npar ] # sq.e.var <- sqrt(e.var) # sq.e.var6 <- sq.e.var*sq.e.var*sq.e.var*sq.e.var*sq.e.var*sq.e.var # dx2.var <- N/(2*e.var*e.var) - 1/sq.e.var6 * (e.var * N) # # var.idx <- NCOL(SC.TH) + NCOL(SC.SL) + match(i, num.idx) # A11[var.idx, var.idx] <- -1 * dx2.var # } # } ################ ################ # A22 (diagonal) A22 <- matrix(0, pstar, pstar) for (i in seq_len(pstar)) { if (is.null(wt)) { A22[i, i] <- sum(SC.COR[, i] * SC.COR[, i], na.rm = TRUE) } else { A22[i, i] <- sum(SC.COR[, i] * SC.COR[, i] / wt, na.rm = TRUE) } } # A12 (zero) A12 <- matrix(0, NROW(A11), NCOL(A22)) # B <- rbind( cbind(A11,A12), # cbind(A21,A22) ) # we invert B as a block-triangular matrix (0.5-23) # # B.inv = A11^{-1} 0 # -A22^{-1} A21 A11^{-1} A22^{-1} # # invert A A11.inv <- try(solve(A11), silent = TRUE) if (inherits(A11.inv, "try-error")) { # brute force A11.inv <- MASS::ginv(A11) lav_msg_warn(gettext("trouble constructing W matrix; used generalized inverse for A11 submatrix")) } # invert da22 <- diag(A22) if (any(da22 == 0)) { lav_msg_warn(gettext("trouble constructing W matrix; used generalized inverse for A22 submatrix")) A22.inv <- MASS::ginv(A22) } else { A22.inv <- A22 diag(A22.inv) <- 1 / da22 } # lower-left block A21.inv <- -A22.inv %*% A21 %*% A11.inv # upper-left block remains zero A12.inv <- A12 # construct B.inv B.inv <- rbind( cbind(A11.inv, A12.inv), cbind(A21.inv, A22.inv) ) # weight matrix (correlation metric) WLS.W <- B.inv %*% INNER %*% t(B.inv) # COV matrix? if (any("numeric" %in% ov.types)) { COV <- cor2cov(R = COR, sds = sqrt(unlist(VAR))) # construct H matrix to apply delta rule (for the tranformation # of rho_ij to cov_ij) H11 <- diag(NROW(A11)) H12 <- matrix(0, NROW(A11), NCOL(A22)) # H22 and H21 already filled in H <- rbind( cbind(H11, H12), cbind(H21, H22) ) WLS.W <- H %*% WLS.W %*% t(H) } else { COV <- COR H <- diag(NCOL(WLS.W)) } # reverse sign numeric TH (because we provide -mu in WLS.obs) # (WOW, it took me a LOOONGGG time to realize this!) # YR 16 July 2012 # NOTE: prior to 0.5-17, we used num.idx (instead of NUM.idx) # which is WRONG if we have more than one threshold per variable # (thanks to Sacha Epskamp for spotting this!) if (length(num.idx) > 0L) { NUM.idx <- which(unlist(TH.IDX) == 0L) WLS.W[NUM.idx, ] <- -WLS.W[NUM.idx, ] WLS.W[, NUM.idx] <- -WLS.W[, NUM.idx] } out <- list( TH = TH, SLOPES = SLOPES, VAR = VAR, COR = COR, COV = COV, SC = SC, TH.NOX = TH.NOX, TH.NAMES = TH.NAMES, TH.IDX = TH.IDX, INNER = INNER, A11 = A11, A12 = A12, A21 = A21, A22 = A22, WLS.W = WLS.W, H = H, zero.cell.tables = empty.cell.tables ) out } lavaan/R/lav_start.R0000644000176200001440000011361114627656441014061 0ustar liggesusers# lav_start.R: provide starting values for model parameters # # YR 30/11/2010: initial version # YR 08/06/2011: add fabin3 start values for factor loadings # YR 14 Jan 2014: moved to lav_start.R # fill in the 'ustart' column in a User data.frame with reasonable # starting values, using the sample data lav_start <- function(start.method = "default", lavpartable = NULL, lavsamplestats = NULL, lavh1 = NULL, # fixme: only use lavh1? model.type = "sem", mimic = "lavaan", reflect = FALSE, # rotation only samplestats.flag = TRUE, order.lv.by = "none" # rotation only ) { # check arguments stopifnot(is.list(lavpartable)) # categorical? categorical <- any(lavpartable$op == "|") # correlation structure? correlation <- any(lavpartable$op == "~*~") # conditional.x? conditional.x <- any(lavpartable$exo == 1L & lavpartable$op %in% c("~", "<~")) # ord.names <- unique(lavpartable$lhs[ lavpartable$op == "|" ]) # nlevels? nlevels <- lav_partable_nlevels(lavpartable) # reflect/order.lv.by if (is.null(reflect)) { reflect <- FALSE } if (is.null(order.lv.by)) { order.lv.by <- "index" } # check start.method if (mimic == "lavaan") { start.initial <- "lavaan" } else if (mimic == "Mplus") { start.initial <- "mplus" } else { # FIXME: use LISREL/EQS/AMOS/.... schemes start.initial <- "lavaan" } # start.method start.user <- NULL if (is.character(start.method)) { start.method.lc <- tolower(start.method) if (start.method.lc != "simple" && !samplestats.flag) { start.method.lc <- start.method <- "simple" } if (start.method.lc == "default") { # nothing to do } else if (start.method == "simple") { start <- numeric(length(lavpartable$ustart)) # if(categorical || correlation) { start[which(lavpartable$op == "=~")] <- 0.7 # } else { # start[ which(lavpartable$op == "=~") ] <- 1.0 # } start[which(lavpartable$op == "~*~")] <- 1.0 ov.names.ord <- vnames(lavpartable, "ov.ord") var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs & !(lavpartable$lhs %in% ov.names.ord)) start[var.idx] <- 1.0 user.idx <- which(!is.na(lavpartable$ustart)) start[user.idx] <- lavpartable$ustart[user.idx] return(start) # assuming fixed.x = FALSE! } else if (start.method == "est") { return(lavpartable$est) } else if (start.method.lc %in% c("simple", "lavaan", "mplus")) { start.initial <- start.method.lc } else { lav_msg_stop(gettext("unknown value for start argument")) } } else if (is.list(start.method)) { start.user <- start.method } else if (is.numeric(start.method)) { nx.free <- sum(lavpartable$free > 0L) if (length(start.method) != nx.free) { lav_msg_stop(gettextf( "start argument contains %1$s elements; but parameter table expects %2$s free parameters.", length(start.method), nx.free)) } lavpartable$ustart[lavpartable$free > 0L] <- start.method } else if (inherits(start.method, "lavaan")) { start.user <- parTable(start.method) } # check model list elements, if provided if (!is.null(start.user)) { if (is.null(start.user$lhs) || is.null(start.user$op) || is.null(start.user$rhs)) { lav_msg_stop(gettext( "problem with start argument: model list does not contain all elements: lhs/op/rhs")) } if (!is.null(start.user$est)) { # excellent, we got an est column; nothing to do } else if (!is.null(start.user$start)) { # no est column, but we use the start column start.user$est <- start.user$start } else if (!is.null(start.user$ustart)) { # no ideal, but better than nothing start.user$est <- start.user$ustart } else { lav_msg_stop(gettext( "problem with start argument: could not find est/start column in model list")) } } # global settings # 0. everyting is zero start <- numeric(length(lavpartable$ustart)) # 1. =~ factor loadings: if (categorical || correlation) { # if std.lv=TRUE, 0.8 is too large start[which(lavpartable$op == "=~")] <- 0.7 } else { start[which(lavpartable$op == "=~")] <- 1.0 } # 2. (residual) lv variances for latent variables lv.names <- vnames(lavpartable, "lv") # all groups lv.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs %in% lv.names & lavpartable$lhs == lavpartable$rhs) start[lv.var.idx] <- 0.05 # start[lv.var.idx] <- 0.5 # new in 0.6-2? (for optim.parscale = "stand") # 3. latent response scales (if any) delta.idx <- which(lavpartable$op == "~*~") start[delta.idx] <- 1.0 # group-specific settings ngroups <- lav_partable_ngroups(lavpartable) # for now, if no group column, add one (again), until we rewrite # this function to handle block/group hybrid settings if (is.null(lavpartable$group) && ngroups == 1L) { lavpartable$group <- rep(1L, length(lavpartable$lhs)) lavpartable$group[lavpartable$block == 0L] <- 0L } for (g in 1:ngroups) { # group values (not necessarily 1,2,... anymore) group.values <- lav_partable_group_values(lavpartable) # info from user model for this group if (conditional.x) { ov.names <- vnames(lavpartable, "ov.nox", group = group.values[g]) } else { ov.names <- vnames(lavpartable, "ov", group = group.values[g]) } if (categorical) { ov.names.num <- vnames(lavpartable, "ov.num", group = group.values[g]) ov.names.ord <- vnames(lavpartable, "ov.ord", group = group.values[g]) } else { ov.names.num <- ov.names } lv.names <- vnames(lavpartable, "lv", group = group.values[g]) lv.names.efa <- vnames(lavpartable, "lv.efa", group = group.values[g]) ov.names.x <- vnames(lavpartable, "ov.x", group = group.values[g]) # just for the nlevels >1 case ov.names <- unique(unlist(ov.names)) ov.names.num <- unique(unlist(ov.names.num)) lv.names <- unique(unlist(lv.names)) lv.names.efa <- unique(unlist(lv.names.efa)) ov.names.x <- unique(unlist(ov.names.x)) # residual ov variances (including exo/ind, to be overriden) ov.var.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.num & lavpartable$lhs == lavpartable$rhs) sample.var.idx <- match(lavpartable$lhs[ov.var.idx], ov.names) if (model.type == "unrestricted") { if (!is.null(lavsamplestats@missing.h1[[g]])) { start[ov.var.idx] <- diag(lavsamplestats@missing.h1[[g]]$sigma)[sample.var.idx] } else { start[ov.var.idx] <- diag(lavsamplestats@cov[[g]])[sample.var.idx] } } else { if (start.initial == "mplus") { if (conditional.x && nlevels == 1L) { start[ov.var.idx] <- (1.0 - 0.50) * lavsamplestats@res.var[[1L]][sample.var.idx] } else { start[ov.var.idx] <- (1.0 - 0.50) * lavsamplestats@var[[1L]][sample.var.idx] } } else { if (conditional.x && nlevels == 1L) { start[ov.var.idx] <- (1.0 - 0.50) * diag(lavsamplestats@res.cov[[g]])[sample.var.idx] } else { start[ov.var.idx] <- (1.0 - 0.50) * diag(lavsamplestats@cov[[g]])[sample.var.idx] } } } # 1-fac measurement models: loadings, psi, theta if (start.initial %in% c("lavaan", "mplus") && model.type %in% c("sem", "cfa")) { # fabin3 estimator (2sls) of Hagglund (1982) per factor for (f in lv.names) { # not for efa factors if (f %in% lv.names.efa) { next } lambda.idx <- which(lavpartable$lhs == f & lavpartable$op == "=~" & lavpartable$group == group.values[g]) # standardized? std.lv <- FALSE var.f.idx <- which(lavpartable$lhs == f & lavpartable$op == "~~" & lavpartable$group == group.values[g] & lavpartable$rhs == f) if (length(var.f.idx) > 0L && all(lavpartable$free[var.f.idx] == 0) && all(lavpartable$ustart[var.f.idx] == 1)) { std.lv <- TRUE } # no second order if (any(lavpartable$rhs[lambda.idx] %in% lv.names)) next # get observed indicators for this latent variable ov.idx <- match(lavpartable$rhs[lambda.idx], ov.names) if (length(ov.idx) > 0L && !any(is.na(ov.idx))) { if (lavsamplestats@missing.flag && nlevels == 1L) { COV <- lavsamplestats@missing.h1[[g]]$sigma[ov.idx, ov.idx, drop = FALSE ] } else { if (conditional.x && nlevels == 1L) { COV <- lavsamplestats@res.cov[[g]][ov.idx, ov.idx, drop = FALSE ] } else { COV <- lavsamplestats@cov[[g]][ov.idx, ov.idx, drop = FALSE ] } } # fabin for 1-factor fabin <- lav_cfa_1fac_fabin(COV, std.lv = std.lv, lambda.only = TRUE, method = "fabin3" ) # factor loadings tmp <- fabin$lambda tmp[!is.finite(tmp)] <- 1.0 # just in case (eg 0/0) # check for negative triad if nvar=3L (new in 0.6-8) if (!is.null(fabin$neg.triad) && fabin$neg.triad) { if (std.lv) { tmp <- rep(0.7, length(tmp)) } else { tmp <- rep(1.0, length(tmp)) } } start[lambda.idx] <- tmp # factor variance # if(!std.lv) { # start[var.f.idx] <- fabin$psi # # if residual var, make smaller # y.idx <- which(lavpartable$lhs == f & # lavpartable$group == group.values[g] & # lavpartable$op == "~") # if(length(y.idx) > 0L) { # # how much explained variance do we expect? # # we take 0.50 # start[var.f.idx] <- 0.5 * start[var.f.idx] # } # # no negative variances (we get these if we have an # # inconsistent triad (eg, covariance signs are +,+,-) # if(start[var.f.idx] < 0) { # start[var.f.idx] <- 0.05 # } # } # NOTE: fabin (sometimes) gives residual variances # that are larger than the original variances... # residual variances -- order? # res.idx <- which(lavpartable$lhs %in% ov.names[ov.idx] & # lavpartable$op == "~~" & # lavpartable$group == group.values[g] & # lavpartable$rhs == lavpartable$lhs) # start[res.idx] <- fabin$theta # negative variances? # neg.idx <- which(start[res.idx] < 0) # if(length(neg.idx) > 0L) { # start[res.idx][neg.idx] <- 0.05 # } } } # fabin3 # efa? nefa <- lav_partable_nefa(lavpartable) if (nefa > 0L) { efa.values <- lav_partable_efa_values(lavpartable) for (set in seq_len(nefa)) { # determine ov idx for this set ov.efa <- unique(lavpartable$rhs[lavpartable$op == "=~" & lavpartable$block == g & lavpartable$efa == efa.values[set]]) lv.efa <- unique(lavpartable$lhs[lavpartable$op == "=~" & lavpartable$block == g & lavpartable$efa == efa.values[set]]) lambda.idx <- which(lavpartable$lhs %in% lv.efa & lavpartable$op == "=~" & lavpartable$group == group.values[g]) theta.idx <- which(lavpartable$lhs %in% ov.efa & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs & lavpartable$group == group.values[g]) # get observed indicators for these EFA lv variables ov.idx <- match( unique(lavpartable$rhs[lambda.idx]), ov.names ) if (length(ov.idx) > 0L && !any(is.na(ov.idx))) { if (lavsamplestats@missing.flag && nlevels == 1L) { COV <- lavsamplestats@missing.h1[[g]]$sigma[ov.idx, ov.idx, drop = FALSE ] } else { if (conditional.x) { COV <- lavsamplestats@res.cov[[g]][ov.idx, ov.idx, drop = FALSE ] } else { COV <- lavsamplestats@cov[[g]][ov.idx, ov.idx, drop = FALSE ] } } # EFA solution with zero upper-right corner EFA <- lav_efa_extraction( S = COV, nfactors = length(lv.efa), method = "ML", order.lv.by = order.lv.by, # order.lv.by = "none", # reflect = reflect, reflect = FALSE, corner = TRUE ) # factor loadings tmp <- as.numeric(EFA$LAMBDA) tmp[!is.finite(tmp)] <- 1.0 # just in case (eg 0/0) start[lambda.idx] <- tmp # residual variances tmp <- diag(EFA$THETA) tmp[!is.finite(tmp)] <- 1.0 # just in case start[theta.idx] <- tmp } } # set } # efa } # factor loadings if (model.type == "unrestricted") { # fill in 'covariances' from lavsamplestats cov.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~~" & lavpartable$lhs != lavpartable$rhs) lhs.idx <- match(lavpartable$lhs[cov.idx], ov.names) rhs.idx <- match(lavpartable$rhs[cov.idx], ov.names) if (!is.null(lavsamplestats@missing.h1[[g]])) { start[cov.idx] <- lavsamplestats@missing.h1[[g]]$sigma[ cbind(lhs.idx, rhs.idx) ] } else { start[cov.idx] <- lavsamplestats@cov[[g]][ cbind(lhs.idx, rhs.idx) ] } } # variances of ordinal variables - set to 1.0 if (categorical) { ov.var.ord.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.ord & lavpartable$lhs == lavpartable$rhs) start[ov.var.ord.idx] <- 1.0 } # 3g) intercepts/means ov.int.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~1" & lavpartable$lhs %in% ov.names) sample.int.idx <- match(lavpartable$lhs[ov.int.idx], ov.names) if (lavsamplestats@missing.flag && nlevels == 1L) { start[ov.int.idx] <- lavsamplestats@missing.h1[[g]]$mu[sample.int.idx] } else { if (conditional.x && nlevels == 1L) { start[ov.int.idx] <- lavsamplestats@res.int[[g]][sample.int.idx] } else { start[ov.int.idx] <- lavsamplestats@mean[[g]][sample.int.idx] } } # TODo: if marker.int.zero = TRUE, set lv means to marker means, # and the non-marker means to # lavsamplestats@mean[[g]] - LAMBDA %*% ALPHA # where ALPHA = means of the markers # 4g) thresholds th.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "|") if (length(th.idx) > 0L) { th.names.lavpartable <- paste(lavpartable$lhs[th.idx], "|", lavpartable$rhs[th.idx], sep = "" ) th.names.sample <- lavsamplestats@th.names[[g]][lavsamplestats@th.idx[[g]] > 0L] # th.names.sample should identical to # vnames(lavpartable, "th", group = group.values[g]) if (conditional.x && nlevels == 1L) { th.values <- lavsamplestats@res.th[[g]][lavsamplestats@th.idx[[g]] > 0L] } else { th.values <- lavsamplestats@th[[g]][lavsamplestats@th.idx[[g]] > 0L] } start[th.idx] <- th.values[match( th.names.lavpartable, th.names.sample )] } # 5g) exogenous `fixed.x' covariates if (length(ov.names.x) > 0) { exo.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.x & lavpartable$rhs %in% ov.names.x) if (!conditional.x) { row.idx <- match(lavpartable$lhs[exo.idx], ov.names) col.idx <- match(lavpartable$rhs[exo.idx], ov.names) if (lavsamplestats@missing.flag && nlevels == 1L) { start[exo.idx] <- lavsamplestats@missing.h1[[g]]$sigma[cbind(row.idx, col.idx)] # using slightly smaller starting values for free # variance/covariances (fixed.x = FALSE); # this somehow avoids false convergence in saturated models nobs <- lavsamplestats@nobs[[g]] this.idx <- which(seq_len(length(lavpartable$free)) %in% exo.idx & lavpartable$free > 0L) start[this.idx] <- start[this.idx] * (nobs - 1) / nobs } else { start[exo.idx] <- lavsamplestats@cov[[g]][cbind(row.idx, col.idx)] } } else { # cov.x row.idx <- match(lavpartable$lhs[exo.idx], ov.names.x) col.idx <- match(lavpartable$rhs[exo.idx], ov.names.x) start[exo.idx] <- lavsamplestats@cov.x[[g]][cbind( row.idx, col.idx )] # mean.x exo.int.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~1" & lavpartable$lhs %in% ov.names.x) int.idx <- match(lavpartable$lhs[exo.int.idx], ov.names.x) start[exo.int.idx] <- lavsamplestats@mean.x[[g]][int.idx] } } # 6b. exogenous lv variances if single indicator -- new in 0.5-21 lv.x <- vnames(lavpartable, "lv.x", group = group.values[g]) # FIXME: also for multilevel? lv.x <- unique(unlist(lv.x)) if (length(lv.x) > 0L) { for (ll in lv.x) { ind.idx <- which(lavpartable$op == "=~" & lavpartable$lhs == ll & lavpartable$group == group.values[g]) if (length(ind.idx) == 1L) { single.ind <- lavpartable$rhs[ind.idx] single.fvar.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == ll & lavpartable$rhs == ll & lavpartable$group == group.values[g]) single.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == single.ind & lavpartable$rhs == single.ind & lavpartable$group == group.values[g]) # user-defined residual variance # fixme: we take the first, in case we have multiple matches # (eg nlevels) single.var <- lavpartable$ustart[single.var.idx[1]] if (is.na(single.var)) { single.var <- 1 } ov.idx <- match(single.ind, ov.names) if (conditional.x && nlevels == 1L) { ov.var <- diag(lavsamplestats@res.cov[[g]])[ov.idx] } else { ov.var <- diag(lavsamplestats@cov[[g]])[ov.idx] } # take (1 - (rvar/ov.var) * ov.var tmp <- (1 - (single.var / ov.var)) * ov.var # just in case if (is.na(tmp) || tmp < 0.05) { tmp <- 0.05 } start[single.fvar.idx] <- tmp } } } # 7g) regressions "~" # new in 0.6-10 if (length(lv.names) == 0L && nlevels == 1L && !conditional.x) { # observed only reg.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~") if (length(reg.idx) > 0L) { eqs.y <- unique(lavpartable$lhs[reg.idx]) ny <- length(eqs.y) for (i in seq_len(ny)) { y.name <- eqs.y[i] start.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~" & lavpartable$lhs == y.name) x.names <- lavpartable$rhs[start.idx] COV <- lavsamplestats@cov[[g]] y.idx <- match(y.name, ov.names) x.idx <- match(x.names, ov.names) S.xx <- COV[x.idx, x.idx, drop = FALSE] S.xy <- COV[x.idx, y.idx, drop = FALSE] # regression coefficient(s) beta.i <- try(solve(S.xx, S.xy), silent = TRUE) if (inherits(beta.i, "try-error")) { start[start.idx] <- beta.i <- rep(0, length(start.idx)) } else { start[start.idx] <- drop(beta.i) } # residual variance res.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~~" & lavpartable$lhs == y.name & lavpartable$rhs == y.name) res.val <- COV[y.idx, y.idx] - drop(crossprod(beta.i, S.xy)) if (res.val > 0.001 * COV[y.idx, y.idx] && res.val < 0.999 * COV[y.idx, y.idx]) { start[res.idx] <- res.val } else { # do nothing (keep what we have) } # intercept int.idx <- which(lavpartable$group == group.values[g] & lavpartable$op == "~1" & lavpartable$lhs == y.name) if (length(int.idx) > 0L) { MEAN <- lavsamplestats@mean[[g]] Ybar <- MEAN[y.idx] Xbar <- MEAN[x.idx] int.val <- Ybar - drop(crossprod(beta.i, Xbar)) if (is.finite(int.val)) { start[int.idx] <- int.val } } } } } # # 8 latent variances (new in 0.6-2) # lv.names.y <- vnames(lavpartable, "lv.y", group = group.values[g]) # lv.names.x <- vnames(lavpartable, "lv.x", group = group.values[g]) # # multilevel? take first level only # if(is.list(lv.names.y)) { # lv.names.y <- unlist(lv.names.y) # for now # } # if(is.list(lv.names.x)) { # lv.names.x <- unlist(lv.names.x) # for now # } # lv.names.xy <- unique(c(lv.names.x, lv.names.y)) # if(length(lv.names.xy) > 0L) { # free.var.idx <- which(lavpartable$op == "~~" & # lavpartable$lhs %in% lv.names.xy & # lavpartable$rhs == lavpartable$lhs & # lavpartable$group == group.values[g]) # if(length(free.var.idx) > 0L) { # this.lv.names <- lavpartable$lhs[free.var.idx] # for(v in seq_len(length(free.var.idx))) { # # single marker item? # ind.idx <- which(lavpartable$op == "=~" & # lavpartable$lhs %in% this.lv.names[v] & # #lavpartable$rhs %in% ov.names.num & # lavpartable$free == 0L & # lavpartable$group == group.values[g]) # if(length(ind.idx) == 0) { # next # } else if(length(ind.idx) > 1L) { # # FIXME! perhaps a random effect? do something clever # next # } else if(length(ind.idx) == 1L) { # marker.ind <- lavpartable$rhs[ind.idx] # ov.idx <- match(marker.ind, ov.names) # if(conditional.x) { # ov.var <- diag(lavsamplestats@res.cov[[g]])[ov.idx] # } else { # ov.var <- diag(lavsamplestats@cov[[g]])[ov.idx] # } # # # exogenous? assume rel = 0.50 # lambda <- lavpartable$ustart[ind.idx] # tmp <- (0.50 * ov.var)/lambda^2 # if(this.lv.names[v] %in% lv.names.y) { # # endogenous, assume R2 = 0.2 # tmp <- 0.8 * tmp # } # # within variance? # if(nlevels > 1L && # lavpartable$level[ free.var.idx[v] ] == 1L) { # tmp <- tmp * 0.75 # } # # between variance? # if(nlevels > 1L && # lavpartable$level[ free.var.idx[v] ] > 1L) { # tmp <- tmp * 0.25 # } # # just in case # if(is.na(tmp) || tmp < 0.05) { # tmp <- 0.05 # } # start[ free.var.idx[v] ] <- tmp # } # } # v # } # free.var.idx # } # lv var # nlevels > 1L if (nlevels > 1L) { level.values <- lav_partable_level_values(lavpartable) # Note: ov.names.x contains all levels within a group! if (length(ov.names.x) > 0) { for (l in 1:nlevels) { # block number block <- (g - 1L) * nlevels + l this.block.x <- lav_partable_vnames(lavpartable, "ov.x", block = block ) this.block.ov <- lav_partable_vnames(lavpartable, "ov", block = block ) if (length(this.block.x) == 0L) { next } # var/cov exo.idx <- which(lavpartable$group == group.values[g] & lavpartable$level == level.values[l] & lavpartable$op == "~~" & lavpartable$lhs %in% this.block.x & lavpartable$rhs %in% this.block.x) if (is.null(lavh1$implied$cov[[1]])) { row.idx <- match(lavpartable$lhs[exo.idx], ov.names) col.idx <- match(lavpartable$rhs[exo.idx], ov.names) if (l == 1L) { COV <- lavsamplestats@YLp[[g]][[2]]$S.PW.start } else { COV <- lavsamplestats@YLp[[g]][[l]]$Sigma.B } } else { row.idx <- match(lavpartable$lhs[exo.idx], this.block.ov) col.idx <- match(lavpartable$rhs[exo.idx], this.block.ov) COV <- lavh1$implied$cov[[block]] } # make sure starting values for variances are positive neg.idx <- which(diag(COV) < 0.001) if (length(neg.idx) > 0L) { diag(COV)[neg.idx] <- 0.001 } start[exo.idx] <- COV[cbind(row.idx, col.idx)] # intercepts ov.int.idx <- which(lavpartable$group == group.values[g] & lavpartable$level == level.values[l] & lavpartable$op == "~1" & lavpartable$lhs %in% this.block.x) if (is.null(lavh1$implied$mean[[1]])) { idx <- match(lavpartable$lhs[ov.int.idx], ov.names) if (l == 1L) { INT <- lavsamplestats@YLp[[g]][[2]]$Mu.W } else { INT <- lavsamplestats@YLp[[g]][[l]]$Mu.B.start } } else { idx <- match(lavpartable$lhs[ov.int.idx], this.block.ov) INT <- lavh1$implied$mean[[block]] } start[ov.int.idx] <- INT[idx] # new in 0.6-12 # very special case: conditional.x with a combination of # splitted-x and regular-x # here, we must: # 1) replace var/cov of splitted-x by *residual* varcov # after regressing out regular-x # 2) replace means of splitted-x by intercepts # 3) fill splitted-x ~ regular-x regression coefficients if (conditional.x) { if (is.null(lavh1$implied$cov[[l]])) { lav_msg_stop(gettext( "lavh1 information is needed; please rerun with h1 = TRUE")) } blocks.within.group <- (g - 1L) * nlevels + seq_len(nlevels) OTHER.BLOCK.NAMES <- lav_partable_vnames(lavpartable, "ov", block = blocks.within.group[-block]) ov.names.x.block <- this.block.x idx <- which(ov.names.x.block %in% OTHER.BLOCK.NAMES) if (length(idx) > 0L) { ov.names.x.block <- ov.names.x.block[-idx] } ov.names.x1 <- this.block.x[!this.block.x %in% ov.names.x.block] ov.names.x2 <- ov.names.x.block nx1 <- length(ov.names.x1) # splitted x nx2 <- length(ov.names.x2) # regular x if (nx1 > 0L && nx2 > 0L) { # COV c1.idx <- match(ov.names.x1, this.block.ov) c2.idx <- match(ov.names.x2, this.block.ov) COV.Y <- COV[c1.idx, c1.idx, drop = FALSE] COV.X <- COV[c2.idx, c2.idx, drop = FALSE] COV.YX <- COV[c1.idx, c2.idx, drop = FALSE] COV.XY <- COV[c2.idx, c1.idx, drop = FALSE] COV.XinvYX <- solve(COV.X, COV.XY) RES.COV <- COV.Y - COV.YX %*% COV.XinvYX res.cov.idx <- which(lavpartable$group == group.values[g] & lavpartable$level == level.values[l] & lavpartable$op == "~~" & lavpartable$lhs %in% ov.names.x1 & lavpartable$rhs %in% ov.names.x1) row.idx <- match(lavpartable$lhs[res.cov.idx], ov.names.x1) col.idx <- match(lavpartable$rhs[res.cov.idx], ov.names.x1) start[res.cov.idx] <- RES.COV[cbind(row.idx, col.idx)] # INT INT.Y <- INT[c1.idx] INT.X <- INT[c2.idx] RES.INT <- INT.Y - t(COV.XinvYX) %*% INT.X res.int.idx <- which(lavpartable$group == group.values[g] & lavpartable$level == level.values[l] & lavpartable$op == "~1" & lavpartable$lhs %in% ov.names.x1) idx <- match(lavpartable$lhs[res.int.idx], ov.names.x1) start[res.int.idx] <- RES.INT[idx] # REG reg.idx <- which(lavpartable$group == group.values[g] & lavpartable$level == level.values[l] & lavpartable$op == "~" & lavpartable$lhs %in% ov.names.x1 & lavpartable$rhs %in% ov.names.x2) row.idx <- match(lavpartable$lhs[reg.idx], ov.names.x1) col.idx <- match(lavpartable$rhs[reg.idx], ov.names.x2) start[reg.idx] <- t(COV.XinvYX)[cbind(row.idx, col.idx)] } # special case } # conditional.x } # levels } # fixed.x } # nlevels > 1L } # groups # group weights group.idx <- which(lavpartable$lhs == "group" & lavpartable$op == "%") if (length(group.idx) > 0L) { ngroups <- length(group.idx) # prop <- rep(1/ngroups, ngroups) # use last group as reference # start[group.idx] <- log(prop/prop[ngroups]) # poisson version start[group.idx] <- log(rep(lavsamplestats@ntotal / ngroups, ngroups)) } # growth models: # - compute starting values for mean latent variables # - compute starting values for variance latent variables if (start.initial %in% c("lavaan", "mplus") && model.type == "growth") { ### DEBUG ONLY # lv.var.idx <- which(lavpartable$op == "~~" & # lavpartable$lhs %in% lv.names & # lavpartable$lhs == lavpartable$rhs) ### DEBUG ONLY # lv.int.idx <- which(lavpartable$op == "~1" & # lavpartable$lhs %in% lv.names) } # adjust if outside bounds -- new in 0.6-6 if (!is.null(lavpartable$lower)) { bad.idx <- which(start < lavpartable$lower) if (length(bad.idx)) { start[bad.idx] <- lavpartable$lower[bad.idx] } } if (!is.null(lavpartable$upper)) { bad.idx <- which(start > lavpartable$upper) if (length(bad.idx)) { start[bad.idx] <- lavpartable$upper[bad.idx] } } # override if the model syntax contains explicit starting values (free only) # user.idx <- which(!is.na(lavpartable$ustart) & # lavpartable$user != 7L) # new in 0.6-7, if rotation and # # and we change the order of lv's user.idx <- which(!is.na(lavpartable$ustart) & lavpartable$free > 0L) start[user.idx] <- lavpartable$ustart[user.idx] # override if a user list with starting values is provided # we only look at the 'est' column for now if (!is.null(start.user)) { if (is.null(lavpartable$group)) { lavpartable$group <- rep(1L, length(lavpartable$lhs)) } if (is.null(start.user$group)) { start.user$group <- rep(1L, length(start.user$lhs)) } # FIXME: avoid for loop!!! for (i in seq_along(lavpartable$lhs)) { # find corresponding parameters lhs <- lavpartable$lhs[i] op <- lavpartable$op[i] rhs <- lavpartable$rhs[i] grp <- lavpartable$group[i] start.user.idx <- which(start.user$lhs == lhs & start.user$op == op & start.user$rhs == rhs & start.user$group == grp) if (length(start.user.idx) == 1L && is.finite(start.user$est[start.user.idx])) { start[i] <- start.user$est[start.user.idx] } } } # override fixed values with ustart values user.idx <- which(!is.na(lavpartable$ustart) & lavpartable$free == 0L) start[user.idx] <- lavpartable$ustart[user.idx] # final check: no NaN or other non-finite values bad.idx <- which(!is.finite(start)) if (length(bad.idx) > 0L) { cat("starting values:\n") print(start) lav_msg_warn(gettext( "some starting values are non-finite; replacing them with 0.5; please provide better starting values.")) start[bad.idx] <- 0.5 } if (lav_debug()) { cat("lavaan DEBUG: lavaanStart\n") print(start) } start } # backwards compatibility # StartingValues <- lav_start # sanity check: (user-specified) variances smaller than covariances lav_start_check_cov <- function(lavpartable = NULL, start = lavpartable$start) { nblocks <- lav_partable_nblocks(lavpartable) block.values <- lav_partable_block_values(lavpartable) for (g in 1:nblocks) { # collect all non-zero covariances cov.idx <- which(lavpartable$op == "~~" & lavpartable$block == block.values[g] & lavpartable$lhs != lavpartable$rhs & !lavpartable$exo & start != 0) # for each covariance, use corresponding variances to standardize; # the end result should not exceed abs(1) for (cc in seq_along(cov.idx)) { this.cov.idx <- cov.idx[cc] # find corresponding variances var.lhs <- lavpartable$lhs[this.cov.idx] var.rhs <- lavpartable$rhs[this.cov.idx] var.lhs.idx <- which(lavpartable$op == "~~" & lavpartable$block == block.values[g] & lavpartable$lhs == var.lhs & lavpartable$lhs == lavpartable$rhs) var.rhs.idx <- which(lavpartable$op == "~~" & lavpartable$block == block.values[g] & lavpartable$lhs == var.rhs & lavpartable$lhs == lavpartable$rhs) var.lhs.value <- start[var.lhs.idx] var.rhs.value <- start[var.rhs.idx] block.txt <- "" if (nblocks > 1L) { block.txt <- paste(" [in block ", g, "]", sep = "") } # check for zero variances if (var.lhs.value == 0 || var.rhs.value == 0) { # this can only happen if it is user-specified # cov.idx free? set it to zero if (start[this.cov.idx] == 0) { # nothing to do } else if (lavpartable$free[this.cov.idx] > 0L) { lav_msg_warn(gettextf( "non-zero covariance element set to zero, due to fixed-to-zero variances variables involved are: %s", var.lhs), var.rhs, block.txt ) start[this.cov.idx] <- 0 } else { lav_msg_stop(gettextf( "please provide better fixed values for (co)variances; variables involved are: %s ", var.lhs), var.rhs, block.txt ) } next } # which one is the smallest? abs() in case of negative variances if (abs(var.lhs.value) < abs(var.rhs.value)) { var.min.idx <- var.lhs.idx var.max.idx <- var.rhs.idx } else { var.min.idx <- var.rhs.idx var.max.idx <- var.lhs.idx } # check COR <- abs(start[this.cov.idx] / sqrt(var.lhs.value * var.rhs.value)) # NOTE: we treat this as an unconditional COR! if (!is.finite(COR)) { # force simple values lav_msg_warn(gettextf( "starting values imply NaN for a correlation value; variables involved are: %s", var.lhs), var.rhs, block.txt ) start[var.lhs.idx] <- 1 start[var.rhs.idx] <- 1 start[this.cov.idx] <- 0 } else if (COR > 1) { txt <- gettextf( "starting values imply a correlation larger than 1; variables involved are: %1$s %2$s %3$s", var.lhs, var.rhs, block.txt) # three ways to fix it: rescale cov12, var1 or var2 # we prefer a free parameter, and not user-specified if (lavpartable$free[this.cov.idx] > 0L && is.na(lavpartable$ustart[this.cov.idx])) { lav_msg_warn(gettext(txt)) start[this.cov.idx] <- start[this.cov.idx] / (COR * 1.1) } else if (lavpartable$free[var.min.idx] > 0L && is.na(lavpartable$ustart[var.min.idx])) { lav_msg_warn(gettext(txt)) start[var.min.idx] <- start[var.min.idx] * (COR * 1.1)^2 } else if (lavpartable$free[var.max.idx] > 0L && is.na(lavpartable$ustart[var.max.idx])) { lav_msg_warn(gettext(txt)) start[var.max.idx] <- start[var.max.idx] * (COR * 1.1)^2 # not found? try just a free parameter } else if (lavpartable$free[this.cov.idx] > 0L) { lav_msg_warn(gettext(txt)) start[this.cov.idx] <- start[this.cov.idx] / (COR * 1.1) } else if (lavpartable$free[var.min.idx] > 0L) { lav_msg_warn(gettext(txt)) start[var.min.idx] <- start[var.min.idx] * (COR * 1.1)^2 } else if (lavpartable$free[var.max.idx] > 0L) { lav_msg_warn(gettext(txt)) start[var.max.idx] <- start[var.max.idx] * (COR * 1.1)^2 # nothing? abort or warn (and fail later...): warn } else { lav_msg_warn(gettext(txt)) # lav_msg_stop(gettextf( # "please provide better fixed values for (co)variances; # variables involved are: %s ", var.lhs), var.rhs, block.txt) } } # COR > 1 } # cov.idx } start } lavaan/R/lav_cfa_bentler1982.R0000644000176200001440000002475614627656441015527 0ustar liggesusers# a partial implementation of the Bentler (1982) non-iterative method for CFA # # Bentler, P. M. (1982). Confirmatory factor-analysis via noniterative # estimation - a fast, inexpensive method. Journal of Marketing Research, # 19(4), 417-424. https://doi.org/10.1177/002224378201900403 # # # YR 03 Feb 2023: - first version in lavaan: simple setting only, # no constraints, no 'fixed' (but nonzero) values, # no correlated residuals (ie diagonal-theta only!) # YR 23 Apr 2023: - quadprog is not needed if we have no (in)equality # constraints lav_cfa_bentler1982 <- function(S, marker.idx = NULL, lambda.nonzero.idx = NULL, GLS = FALSE, bounds = TRUE, min.reliability.marker = 0.1, quadprog = FALSE, nobs = 20L) { # for cutoff # dimensions nvar <- ncol(S) nfac <- length(marker.idx) # lambda structure B <- matrix(0, nvar, nfac) lambda.marker.idx <- (seq_len(nfac) - 1L) * nvar + marker.idx B[lambda.marker.idx] <- 1L B[lambda.nonzero.idx] <- 1L # partition sample covariance matrix: marker vs non-marker S.xx <- S[marker.idx, marker.idx, drop = FALSE] S.yx <- S[-marker.idx, marker.idx, drop = FALSE] S.xy <- S[marker.idx, -marker.idx, drop = FALSE] S.yy <- S[-marker.idx, -marker.idx, drop = FALSE] p <- nvar - nfac B.y <- B[-marker.idx, , drop = FALSE] # check for p = 0? # phase 1: initial estimate for Sigma.yx Sigma.yx.hat <- S.yx # phase 2: using GLS/ULS to obtain PSI and Theta if (GLS) { W <- try(solve(S.yy), silent = TRUE) if (inherits(W, "try-error")) { lav_msg_warn(gettext("could not inverte S.yy; switching to ULS")) W <- diag(p) } WS.yx <- W %*% S.yx xy.SWS.yx <- crossprod(S.yx, WS.yx) G <- WS.yx %*% solve(xy.SWS.yx) %*% t(WS.yx) } else { Ip <- diag(p) xy.SS.yx <- crossprod(S.yx) G <- S.yx %*% solve(xy.SS.yx) %*% t(S.yx) } # only needed if theta.y is not diagonal: # q <- 6 # all free # # dimension P: q x p*p where q is the number of free elements theta.y # theta.fy <- function(x) { # theta.y <- matrix(0, p, p) # # insert 'free' parameters only # diag(theta.y) <- x # lav_matrix_vec(theta.y) # } # P <- t(numDeriv::jacobian(func = theta.fy, x = rep(1, q))) # tmp1 <- P %*% ((W %x% W) - (G %x% G)) %*% t(P) # NOTE: # if only the 'diagonal' element of Theta are free (as usual), then we # can write tmp1 as if (GLS) { tmp1 <- W * W - G * G } else { tmp1 <- Ip - G * G } # only needed if fixed values # Theta.F <- matrix(0, p, p) # all free # tmp2 <- W %*% (S.yy - Theta.F) %*% W - G %*% (S.yy - Theta.F) %*% G if (GLS) { tmp2 <- W %*% S.yy %*% W - G %*% S.yy %*% G } else { tmp2 <- S.yy - G %*% S.yy %*% G } # Theta.f <- as.numeric(solve(tmp1) %*% P %*% lav_matrix_vec(tmp2)) # Note: # if only the 'diagonal' element of Theta are free (as usual), then we # can write Theta.f as Theta.f <- solve(tmp1, diag(tmp2)) Theta.f.nobounds <- Theta.f # store unbounded Theta.f values # ALWAYS apply standard bounds to proceed too.small.idx <- which(Theta.f < 0) if (length(too.small.idx) > 0L) { Theta.f[too.small.idx] <- 0 } too.large.idx <- which(Theta.f > diag(S.yy)) if (length(too.large.idx) > 0L) { Theta.f[too.large.idx] <- diag(S.yy)[too.large.idx] * 1 } # create diagonal matrix with Theta.f elements on diagonal Theta.yhat <- diag(Theta.f, p) # force (S.yy - Theta.yhat) to be positive definite lambda <- try(lav_matrix_symmetric_diff_smallest_root(S.yy, Theta.yhat), silent = TRUE ) if (inherits(lambda, "try-error")) { lav_msg_warn(gettext("failed to compute lambda")) SminTheta <- S.yy - Theta.yhat # and hope for the best } else { cutoff <- 1 + 1 / (nobs - 1) if (lambda < cutoff) { lambda.star <- lambda - 1 / (nobs - 1) SminTheta <- S.yy - lambda.star * Theta.yhat } else { SminTheta <- S.yy - Theta.yhat } } # estimate Phi if (GLS) { tmp1 <- xy.SWS.yx tmp2 <- t(WS.yx) %*% SminTheta %*% WS.yx } else { tmp1 <- xy.SS.yx tmp2 <- t(S.yx) %*% SminTheta %*% S.yx } PSI <- tmp1 %*% solve(tmp2, tmp1) PSI.nobounds <- PSI # ALWAYS apply bounds to proceed lower.bounds.psi <- diag(S.xx) - (1 - min.reliability.marker) * diag(S.xx) toolow.idx <- which(diag(PSI) < lower.bounds.psi) if (length(toolow.idx) > 0L) { diag(PSI)[toolow.idx] <- lower.bounds.psi[toolow.idx] } too.large.idx <- which(diag(PSI) > diag(S.xx)) if (length(too.large.idx) > 0L) { diag(PSI)[too.large.idx] <- diag(S.xx)[too.large.idx] * 1 } # in addition, force PSI to be PD PSI <- lav_matrix_symmetric_force_pd(PSI, tol = 1e-04) # residual variances markers Theta.x <- diag(S.xx - PSI) # create theta vector theta.nobounds <- numeric(nvar) theta.nobounds[marker.idx] <- Theta.x theta.nobounds[-marker.idx] <- Theta.f.nobounds # compute LAMBDA for non-marker items if (quadprog) { # only really needed if we need to impose (in)equality constraints # (TODO) Dmat <- lav_matrix_bdiag(rep(list(PSI), p)) dvec <- as.vector(t(S.yx)) eq.idx <- which(t(B.y) != 1) # these must be zero (row-wise!) Rmat <- diag(nrow(Dmat))[eq.idx, , drop = FALSE] bvec <- rep(0, length(eq.idx)) # optional, 0=default out <- try(quadprog::solve.QP( Dmat = Dmat, dvec = dvec, Amat = t(Rmat), meq = length(eq.idx), bvec = bvec ), silent = TRUE) if (inherits(out, "try-error")) { lav_msg_warn(gettext("solve.QP failed to find a solution")) Lambda <- matrix(0, nvar, nfac) Lambda[marker.idx, ] <- diag(nfac) Lambda[lambda.nonzero.idx] <- as.numeric(NA) Theta <- numeric(nvar) Theta[marker.idx] <- Theta.x Theta[-marker.idx] <- Theta.f Psi <- PSI return(list( lambda = Lambda, theta = theta.nobounds, psi = PSI.nobounds )) } else { LAMBDA.y <- matrix(out$solution, nrow = p, ncol = nfac, byrow = TRUE ) # zap almost zero elements LAMBDA.y[abs(LAMBDA.y) < sqrt(.Machine$double.eps)] <- 0 } } else { # simple version LAMBDA.y <- t(t(S.yx) / diag(PSI)) * B.y } # assemble matrices LAMBDA <- matrix(0, nvar, nfac) LAMBDA[marker.idx, ] <- diag(nfac) LAMBDA[-marker.idx, ] <- LAMBDA.y list(lambda = LAMBDA, theta = theta.nobounds, psi = PSI.nobounds) } # internal function to be used inside lav_optim_noniter # return 'x', the estimated vector of free parameters lav_cfa_bentler1982_internal <- function(lavobject = NULL, # convenience # internal slot lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL, GLS = TRUE, min.reliability.marker = 0.1, quadprog = FALSE, nobs = 20L) { lavpta <- NULL if (!is.null(lavobject)) { stopifnot(inherits(lavobject, "lavaan")) # extract slots lavmodel <- lavobject@Model lavsamplestats <- lavobject@SampleStats lavpartable <- lav_partable_set_cache(lavobject@ParTable, lavobject@pta) lavpta <- lavobject@pta lavdata <- lavobject@Data lavoptions <- lavobject@Options } if (is.null(lavpta)) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) } # no structural part! if (any(lavpartable$op == "~")) { lav_msg_stop(gettext("bentler1982 estimator only available for CFA models")) } # no BETA matrix! (i.e., no higher-order factors) if (!is.null(lavmodel@GLIST$beta)) { lav_msg_stop(gettext("bentler1982 estimator not available for models that require a BETA matrix")) } # no std.lv = TRUE for now if (lavoptions$std.lv) { lav_msg_stop(gettext( "bentler1982 estimator not available if std.lv = TRUE")) } nblocks <- lav_partable_nblocks(lavpartable) stopifnot(nblocks == 1L) # for now b <- 1L sample.cov <- lavsamplestats@cov[[b]] nvar <- nrow(sample.cov) lv.names <- lavpta$vnames$lv.regular[[b]] nfac <- length(lv.names) marker.idx <- lavpta$vidx$lv.marker[[b]] lambda.idx <- which(names(lavmodel@GLIST) == "lambda") lambda.nonzero.idx <- lavmodel@m.free.idx[[lambda.idx]] # only diagonal THETA for now... # because if we have correlated residuals, we should remove the # corresponding variables as instruments before we estimate lambda... # (see MIIV) theta.idx <- which(names(lavmodel@GLIST) == "theta") # usually '2' m.theta <- lavmodel@m.free.idx[[theta.idx]] nondiag.idx <- m.theta[!m.theta %in% lav_matrix_diag_idx(nvar)] if (length(nondiag.idx) > 0L) { lav_msg_warn(gettext( "this implementation of FABIN does not handle correlated residuals yet!")) } if (!missing(GLS)) { GLS.flag <- GLS } else { GLS.flag <- FALSE if (!is.null(lavoptions$estimator.args$GLS) && lavoptions$estimator.args$GLS) { GLS.flag <- TRUE } } if (missing(quadprog) && !is.null(lavoptions$estimator.args$quadprog)) { quadprog <- lavoptions$estimator.args$quadprog } # run bentler1982 non-iterative CFA algorithm out <- lav_cfa_bentler1982( S = sample.cov, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx, GLS = GLS.flag, min.reliability.marker = 0.1, quadprog = quadprog, nobs = lavsamplestats@ntotal ) LAMBDA <- out$lambda THETA <- diag(out$theta, nvar) PSI <- out$psi # store matrices in lavmodel@GLIST lavmodel@GLIST$lambda <- LAMBDA lavmodel@GLIST$theta <- THETA lavmodel@GLIST$psi <- PSI # extract free parameters only x <- lav_model_get_parameters(lavmodel) # apply bounds (if any) if (!is.null(lavpartable$lower)) { lower.x <- lavpartable$lower[lavpartable$free > 0] too.small.idx <- which(x < lower.x) if (length(too.small.idx) > 0L) { x[too.small.idx] <- lower.x[too.small.idx] } } if (!is.null(lavpartable$upper)) { upper.x <- lavpartable$upper[lavpartable$free > 0] too.large.idx <- which(x > upper.x) if (length(too.large.idx) > 0L) { x[too.large.idx] <- upper.x[too.large.idx] } } x } lavaan/R/lav_model_efa.R0000644000176200001440000003000214627656441014627 0ustar liggesusers# efa related functions # YR - April 2019 # # the lav_model_efa_rotate_x() function was based on a script orginally # written by Florian Scharf (Muenster University, Germany) # rotate solution lav_model_efa_rotate <- function(lavmodel = NULL, lavoptions = NULL) { if (lavmodel@nefa == 0L || lavoptions$rotation == "none") { return(lavmodel) } # extract unrotated parameters from lavmodel x.orig <- lav_model_get_parameters(lavmodel, type = "free", extra = FALSE) # rotate, extract information from 'extra' attribute tmp <- lav_model_efa_rotate_x( x = x.orig, lavmodel = lavmodel, lavoptions = lavoptions, extra = TRUE ) extra <- attr(tmp, "extra") attr(tmp, "extra") <- NULL # store full rotation matrix (per group) # lavmodel@H <- extra$H # lavmodel@lv.order <- extra$lv.order # lavmodel@GLIST <- extra$GLIST # return updated lavmodel # lavmodel out <- list(GLIST = extra$GLIST, H = extra$H, lv.order = extra$lv.order) out } # lower-level function, needed for numDeriv lav_model_efa_rotate_x <- function(x, lavmodel = NULL, lavoptions = NULL, init.rot = NULL, extra = FALSE, type = "free") { # extract rotation options from lavoptions method <- lavoptions$rotation if (method == "none") { return(x) } ropts <- lavoptions$rotation.args # place parameters into model matrices lavmodel.orig <- lav_model_set_parameters(lavmodel, x = x) # GLIST GLIST <- lavmodel.orig@GLIST # H per group H <- vector("list", lavmodel@ngroups) ORDER <- vector("list", lavmodel@ngroups) # for now, rotate per group (not per block) for (g in seq_len(lavmodel@ngroups)) { # select model matrices for this group mm.in.group <- seq_len(lavmodel@nmat[g]) + cumsum(c(0, lavmodel@nmat))[g] MLIST <- GLIST[mm.in.group] # general rotation matrix (all latent variables) H[[g]] <- Hg <- diag(ncol(MLIST$lambda)) lv.order <- seq_len(ncol(MLIST$lambda)) # reconstruct full LAMBDA (in case of dummy ov's) LAMBDA.g <- computeLAMBDA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], remove.dummy.lv = TRUE ) # reconstruct full THETA (in case of dummy ov's) THETA.g <- computeTHETA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]] ) # fill in optimal rotation for each set for (set in seq_len(lavmodel@nefa)) { # which ov/lv's are involved in this set? ov.idx <- lavmodel@ov.efa.idx[[g]][[set]] lv.idx <- lavmodel@lv.efa.idx[[g]][[set]] # empty set? if (length(ov.idx) == 0L) { next } # just 1 factor? if (length(lv.idx) < 2L) { # new in 0.6-18: reflect if needed if (lavoptions$rotation.args$reflect) { tmp <- LAMBDA.g[ov.idx, lv.idx, drop = TRUE] if (sum(tmp) < 0) { MLIST$lambda[ov.idx, 1] <- -1 * MLIST$lambda[ov.idx, 1] } } next } # unrotated 'A' for this set A <- LAMBDA.g[ov.idx, lv.idx, drop = FALSE] # std.ov? we use diagonal of Sigma for this set of ov's only if (ropts$std.ov) { THETA <- THETA.g[ov.idx, ov.idx, drop = FALSE] Sigma <- tcrossprod(A) + THETA this.ov.var <- diag(Sigma) } else { this.ov.var <- NULL } # init.rot? if (!is.null(init.rot) && lavoptions$rotation.args$jac.init.rot) { init.ROT <- init.rot[[g]][lv.idx, lv.idx, drop = FALSE] rstarts <- 0 } else { init.ROT <- NULL rstarts <- ropts$rstarts } # set warn and verbose to ropts-values current.warn <- lav_warn() current.verbose <- lav_verbose() if (lav_warn(ropts$warn)) on.exit(lav_warn(current.warn), TRUE) if (lav_verbose(ropts$verbose)) on.exit(lav_verbose(current.verbose), TRUE) # rotate this set res <- lav_matrix_rotate( A = A, orthogonal = ropts$orthogonal, method = method, method.args = list( geomin.epsilon = ropts$geomin.epsilon, orthomax.gamma = ropts$orthomax.gamma, cf.gamma = ropts$orthomax.gamma, oblimin.gamma = ropts$oblimin.gamma, promax.kappa = ropts$promax.kappa, target = ropts$target, target.mask = ropts$target.mask ), init.ROT = init.ROT, init.ROT.check = FALSE, rstarts = rstarts, row.weights = ropts$row.weights, std.ov = ropts$std.ov, ov.var = this.ov.var, algorithm = ropts$algorithm, reflect = ropts$reflect, order.lv.by = ropts$order.lv.by, gpa.tol = ropts$gpa.tol, tol = ropts$tol, max.iter = ropts$max.iter, group = g ) # extract rotation matrix (note, in Asp & Muthen, 2009; this is H') # note: as of 0.6-6, order.idx has already been applied to ROT, # so no need to reorder rows/columns after rotation H.efa <- res$ROT # fill in optimal rotation for this set Hg[lv.idx, lv.idx] <- H.efa # keep track of possible re-orderings lv.order[lv.idx] <- lv.idx[res$order.idx] } # set # rotate all the SEM parametersa # 1. lambda MLIST$lambda <- t(solve(Hg, t(MLIST$lambda))) # 2. psi (note: eq 22 Asp & Muthen, 2009: transpose reversed) MLIST$psi <- t(Hg) %*% MLIST$psi %*% Hg # 3. beta if (!is.null(MLIST$beta)) { MLIST$beta <- t(Hg) %*% t(solve(Hg, t(MLIST$beta))) } # 4. alpha if (!is.null(MLIST$alpha)) { MLIST$alpha <- t(Hg) %*% MLIST$alpha } # no need for rotation: nu, theta # store rotated matrices in GLIST GLIST[mm.in.group] <- MLIST # store rotation matrix + lv.order H[[g]] <- Hg ORDER[[g]] <- lv.order } # group # extract all rotated parameter estimates x.rot <- lav_model_get_parameters(lavmodel, GLIST = GLIST, type = type) # extra? if (extra) { attr(x.rot, "extra") <- list(GLIST = GLIST, H = H, lv.order = ORDER) } # return rotated parameter estimates as a vector x.rot } # lower-level function, needed for numDeriv lav_model_efa_rotate_border_x <- function(x, lavmodel = NULL, lavoptions = NULL, lavpartable = NULL) { # extract rotation options from lavoptions method <- lavoptions$rotation ropts <- lavoptions$rotation.args method.args <- list( geomin.epsilon = ropts$geomin.epsilon, orthomax.gamma = ropts$orthomax.gamma, cf.gamma = ropts$orthomax.gamma, oblimin.gamma = ropts$oblimin.gamma, promax.kappa = ropts$oblimin.kappa, target = ropts$target, target.mask = ropts$target.mask ) # place parameters into model matrices lavmodel <- lav_model_set_parameters(lavmodel, x = x) # GLIST GLIST <- lavmodel@GLIST # res res <- numeric(0L) # per group (not per block) for (g in seq_len(lavmodel@ngroups)) { # group-specific method.args this.method.args <- method.args # set group-specific target/target.mask (if needed) # if target, check target matrix if (method == "target" || method == "pst") { target <- method.args$target if (is.list(target)) { this.method.args$target <- target[[g]] } } if (method == "pst") { target.mask <- method.args$target.mask if (is.list(target.mask)) { this.method.args$target.mask <- target.mask[[g]] } } # select model matrices for this group mm.in.group <- seq_len(lavmodel@nmat[g]) + cumsum(c(0, lavmodel@nmat))[g] MLIST <- GLIST[mm.in.group] # reconstruct full LAMBDA (in case of dummy ov's) LAMBDA.g <- computeLAMBDA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]], remove.dummy.lv = TRUE ) # reconstruct full THETA (in case of dummy ov's) THETA.g <- computeTHETA.LISREL( MLIST = MLIST, ov.y.dummy.ov.idx = lavmodel@ov.y.dummy.ov.idx[[g]], ov.x.dummy.ov.idx = lavmodel@ov.x.dummy.ov.idx[[g]], ov.y.dummy.lv.idx = lavmodel@ov.y.dummy.lv.idx[[g]], ov.x.dummy.lv.idx = lavmodel@ov.x.dummy.lv.idx[[g]] ) # setnames set.names <- lav_partable_efa_values(lavpartable) # for each set for (set in seq_len(lavmodel@nefa)) { # check if we have any user=7 elements in this set # if not, skip constraints ind.idx <- which(lavpartable$op == "=~" & lavpartable$group == g & lavpartable$efa == set.names[set]) if (!any(lavpartable$user[ind.idx] == 7L)) { next } # which ov/lv's are involved in this set? ov.idx <- lavmodel@ov.efa.idx[[g]][[set]] lv.idx <- lavmodel@lv.efa.idx[[g]][[set]] # empty set? if (length(ov.idx) == 0L) { next } # just 1 factor? if (length(lv.idx) < 2L) { next } A <- LAMBDA.g[ov.idx, lv.idx, drop = FALSE] P <- nrow(A) M <- ncol(A) # for oblique, we also need PSI if (!ropts$orthogonal) { PSI <- MLIST$psi[lv.idx, lv.idx, drop = FALSE] } # std.ov? we use diagonal of Sigma for this set of ov's only if (ropts$std.ov) { THETA <- THETA.g[ov.idx, ov.idx, drop = FALSE] Sigma <- tcrossprod(A) + THETA this.ov.var <- diag(Sigma) } else { this.ov.var <- rep(1, P) } # choose method method <- tolower(method) if (method %in% c( "cf-quartimax", "cf-varimax", "cf-equamax", "cf-parsimax", "cf-facparsim" )) { method.fname <- "lav_matrix_rotate_cf" this.method.args$cf.gamma <- switch(method, "cf-quartimax" = 0, "cf-varimax" = 1 / P, "cf-equamax" = M / (2 * P), "cf-parsimax" = (M - 1) / (P + M - 2), "cf-facparsim" = 1 ) } else { method.fname <- paste("lav_matrix_rotate_", method, sep = "") } # check if rotation method exists check <- try(get(method.fname), silent = TRUE) if (inherits(check, "try-error")) { lav_msg_stop(gettextf("unknown rotation method: %s", method.fname)) } # 1. compute row weigths # 1.a cov -> cor? if (ropts$std.ov) { A <- A * 1 / sqrt(this.ov.var) } if (ropts$row.weights == "none") { weights <- rep(1.0, P) } else if (ropts$row.weights == "kaiser") { weights <- lav_matrix_rotate_kaiser_weights(A) } else if (ropts$row.weights == "cureton-mulaik") { weights <- lav_matrix_rotate_cm_weights(A) } else { lav_msg_stop(gettextf("row.weights can be", lav_msg_view(c("none", "kaiser", "cureton-mulaik"), "or"))) } A <- A * weights # evaluate rotation criterion, extract GRAD Q <- do.call( method.fname, c(list(LAMBDA = A), this.method.args, list(grad = TRUE)) ) Gq <- attr(Q, "grad") attr(Q, "grad") <- NULL # compute 'Z' Z <- crossprod(A, Gq) # compute constraints if (ropts$orthogonal) { # the constraint: Z == diagonal # or in other words, the non-diagonal elements of # Z - t(Z) are all zero tmp <- Z - t(Z) this.res <- lav_matrix_vech(tmp, diagonal = FALSE) } else { PSI.z <- PSI * diag(Z) # rescale rows only tmp <- Z - PSI.z out1 <- lav_matrix_vech(tmp, diagonal = FALSE) out2 <- lav_matrix_vechu(tmp, diagonal = FALSE) this.res <- c(out1, out2) } res <- c(res, this.res) } # set } # group # return constraint vector res } lavaan/R/lav_syntax_mlist.R0000644000176200001440000000713214627656441015462 0ustar liggesusers# generate lavaan model syntax from a list of model matrices # # YR -- 4 Dec 2021 # # - currently for a single group/level only # - continuous setting only; the model matrices are LAMBDA, PSI, THETA and # optionally BETA # # we return a single string lav_syntax_mlist <- function(MLIST, ov.prefix = "y", lv.prefix = "f", include.values = TRUE) { # model matrices LAMBDA <- MLIST$lambda THETA <- MLIST$theta PSI <- MLIST$psi BETA <- MLIST$beta # check prefix if (ov.prefix == lv.prefix) { lav_msg_stop(gettext("ov.prefix can not be the same as lv.prefix")) } header <- "# syntax generated by lav_syntax_mlist()" # LAMBDA if (!is.null(LAMBDA)) { IDXV <- row(LAMBDA)[(LAMBDA != 0)] IDXF <- col(LAMBDA)[(LAMBDA != 0)] # lambda.txt <- character(nfactors) # for(f in seq_len(nfactors)) { # var.idx <- which(LAMBDA[,f] != 0.0) # lambda.vals <- LAMBDA[var.idx, f] # lambda.txt[f] <- paste( paste0(lv.prefix, f), "=~", # paste(lambda.vals, "*", # paste0(ov.prefix, var.idx), # sep = "", collapse = " + ") ) # } nel <- length(IDXF) lambda.txt <- character(nel) for (i in seq_len(nel)) { if (include.values) { lambda.txt[i] <- paste0( paste0(lv.prefix, IDXF[i]), " =~ ", LAMBDA[IDXV[i], IDXF[i]], "*", paste0(ov.prefix, IDXV[i]) ) } else { lambda.txt[i] <- paste0( paste0(lv.prefix, IDXF[i]), " =~ ", paste0(ov.prefix, IDXV[i]) ) } } } else { lambda.txt <- character(0L) } # THETA if (!is.null(THETA)) { IDX1 <- row(THETA)[(THETA != 0) & upper.tri(THETA, diag = TRUE)] IDX2 <- col(THETA)[(THETA != 0) & upper.tri(THETA, diag = TRUE)] nel <- length(IDX1) theta.txt <- character(nel) for (i in seq_len(nel)) { if (include.values) { theta.txt[i] <- paste0( paste0(ov.prefix, IDX1[i]), " ~~ ", THETA[IDX1[i], IDX2[i]], "*", paste0(ov.prefix, IDX2[i]) ) } else { theta.txt[i] <- paste0( paste0(ov.prefix, IDX1[i]), " ~~ ", paste0(ov.prefix, IDX2[i]) ) } } } else { theta.txt <- character(0L) } # PSI if (!is.null(PSI)) { IDX1 <- row(PSI)[(PSI != 0) & upper.tri(PSI, diag = TRUE)] IDX2 <- col(PSI)[(PSI != 0) & upper.tri(PSI, diag = TRUE)] nel <- length(IDX1) psi.txt <- character(nel) for (i in seq_len(nel)) { if (include.values) { psi.txt[i] <- paste0( paste0(lv.prefix, IDX1[i]), " ~~ ", PSI[IDX1[i], IDX2[i]], "*", paste0(lv.prefix, IDX2[i]) ) } else { psi.txt[i] <- paste0( paste0(lv.prefix, IDX1[i]), " ~~ ", paste0(lv.prefix, IDX2[i]) ) } } } else { psi.txt <- character(0L) } # BETA if (!is.null(BETA)) { IDX1 <- row(BETA)[(BETA != 0)] IDX2 <- col(BETA)[(BETA != 0)] nel <- length(IDX1) beta.txt <- character(nel) for (i in seq_len(nel)) { if (include.values) { beta.txt[i] <- paste0( paste0(lv.prefix, IDX1[i]), " ~ ", BETA[IDX1[i], IDX2[i]], "*", paste0(lv.prefix, IDX2[i]) ) } else { beta.txt[i] <- paste0( paste0(lv.prefix, IDX1[i]), " ~ ", paste0(lv.prefix, IDX2[i]) ) } } } else { beta.txt <- character(0L) } # assemble syntax <- paste(c(header, lambda.txt, theta.txt, psi.txt, beta.txt, ""), collapse = "\n" ) syntax } lavaan/R/lav_partable_bounds.R0000644000176200001440000004256114627656441016075 0ustar liggesusers# add parameter bounds to the parameter table # lavoptions$optim.bounds lav_partable_add_bounds <- function(partable = NULL, lavh1 = NULL, lavdata = NULL, lavsamplestats = NULL, lavoptions = NULL) { # no support (yet) for multilevel if (lav_partable_nlevels(partable) > 1L) { return(partable) } # check optim.bounds if (is.null(lavoptions$optim.bounds)) { # <0.6-6 version return(partable) } else if (!is.null(lavoptions$samplestats) && !lavoptions$samplestats) { # no sample statistics return(partable) } else { if (!is.null(lavoptions$bounds) && lavoptions$bounds == "none") { # no bounds needed return(partable) } # no support from effect.coding (for now) if (!is.null(lavoptions$effect.coding) && nchar(lavoptions$effect.coding[1L]) > 0L) { lav_msg_warn(gettext( "automatic bounds not available (yet) if effect.coding is used" )) return(partable) } optim.bounds <- lavoptions$optim.bounds # check the elements if (is.null(optim.bounds$lower)) { optim.bounds$lower <- character(0L) } else { optim.bounds$lower <- as.character(optim.bounds$lower) } if (is.null(optim.bounds$upper)) { optim.bounds$upper <- character(0L) } else { optim.bounds$upper <- as.character(optim.bounds$upper) } if (is.null(optim.bounds$min.reliability.marker)) { optim.bounds$min.reliability.marker <- 0.0 } else { if (optim.bounds$min.reliability.marker < 0 || optim.bounds$min.reliability.marker > 1.0) { lav_msg_stop(gettextf( "optim.bounds$min.reliability.marker is out of range: %s", optim.bounds$min.reliability.marker )) } } if (is.null(optim.bounds$min.var.ov)) { optim.bounds$min.var.ov <- -Inf } if (is.null(optim.bounds$min.var.lv.exo)) { optim.bounds$min.var.lv.exo <- 0.0 } if (is.null(optim.bounds$min.var.lv.endo)) { optim.bounds$min.var.lv.endo <- 0.0 } if (is.null(optim.bounds$max.r2.lv.endo)) { optim.bounds$max.r2.lv.endo <- 1.0 } if (is.null(optim.bounds$lower.factor)) { optim.bounds$lower.factor <- rep(1.0, length(optim.bounds$lower)) } else { if (length(optim.bounds$lower.factor) == 1L && is.numeric(optim.bounds$lower.factor)) { optim.bounds$lower.factor <- rep( optim.bounds$lower.factor, length(optim.bounds$lower) ) } else if (length(optim.bounds$lower.factor) != length(optim.bounds$lower)) { lav_msg_stop( gettext("length(optim.bounds$lower.factor) is not equal to length(optim.bounds$lower)") ) } } lower.factor <- optim.bounds$lower.factor if (is.null(optim.bounds$upper.factor)) { optim.bounds$upper.factor <- rep(1.0, length(optim.bounds$upper)) } else { if (length(optim.bounds$upper.factor) == 1L && is.numeric(optim.bounds$upper.factor)) { optim.bounds$upper.factor <- rep( optim.bounds$upper.factor, length(optim.bounds$upper) ) } else if (length(optim.bounds$upper.factor) != length(optim.bounds$upper)) { lav_msg_stop( gettext("length(optim.bounds$lower.factor) is not equal to length(optim.bounds$upper)") ) } } upper.factor <- optim.bounds$upper.factor } # new in 0.6-17: check if we have theta parameterization theta.parameterization.flag <- FALSE if (any(partable$op == "~*~") && lavoptions$parameterization == "theta") { # some fixed-to-1 theta elements? ov.scaled <- partable$lhs[partable$op == "~*~"] ov.var.idx <- which(partable$op == "~~" & partable$lhs %in% ov.scaled & partable$free == 0L & partable$ustart == 1) if (length(ov.var.idx) > 0L) { theta.parameterization.flag <- TRUE theta.parameterization.names <- partable$lhs[ov.var.idx] } } # shortcut REL <- optim.bounds$min.reliability.marker # nothing to do if (length(optim.bounds$lower) == 0L && length(optim.bounds$upper) == 0L) { return(partable) } else { # we compute ALL bounds, then we select what we need # (otherwise, we can not use the 'factor') if (!is.null(partable$lower)) { lower.user <- partable$lower } else { partable$lower <- lower.user <- rep(-Inf, length(partable$lhs)) } if (!is.null(partable$upper)) { upper.user <- partable$upper } else { partable$upper <- upper.user <- rep(+Inf, length(partable$lhs)) } # the 'automatic' bounds lower.auto <- rep(-Inf, length(partable$lhs)) upper.auto <- rep(+Inf, length(partable$lhs)) } lavpta <- lav_partable_attributes(partable) # check blocks if (is.null(partable$block)) { partable$block <- rep(1L, length(partable$lhs)) } block.values <- lav_partable_block_values(partable) # check groups if (is.null(partable$group)) { partable$group <- rep(1L, length(partable$lhs)) } group.values <- lav_partable_group_values(partable) ngroups <- length(group.values) # compute bounds per group ### TODO: add levels/classes/... b <- 0L for (g in seq_len(ngroups)) { # next block b <- b + 1L # for this block ov.names <- lavpta$vnames$ov[[b]] lv.names <- lavpta$vnames$lv[[b]] lv.names.x <- lavpta$vnames$lv.x[[b]] if (length(lv.names.x) > 0L) { lv.names.endo <- lv.names[!lv.names %in% lv.names.x] } else { lv.names.endo <- lv.names } lv.marker <- lavpta$vnames$lv.marker[[b]] # OV.VAR for this group if (lavsamplestats@missing.flag && lavdata@nlevels == 1L) { OV.VAR <- diag(lavsamplestats@missing.h1[[g]]$sigma) } else { if (lavoptions$conditional.x) { OV.VAR <- diag(lavsamplestats@res.cov[[g]]) } else { OV.VAR <- diag(lavsamplestats@cov[[g]]) } } # new in 0.6-17: increase observed variances for 'scaled' parameters # if theta parameterization if (theta.parameterization.flag) { sc.idx <- match(theta.parameterization.names, ov.names) OV.VAR[sc.idx] <- OV.VAR[sc.idx] / REL } # we 'process' the parameters per 'type', so we can choose # to apply (or not) upper/lower bounds for each type separately ################################ ## 1. (residual) ov variances ## ################################ par.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs %in% ov.names & partable$lhs == partable$rhs) if (length(par.idx) > 0L) { # lower == 0 lower.auto[par.idx] <- 0 # upper == var(ov) var.idx <- match(partable$lhs[par.idx], ov.names) upper.auto[par.idx] <- OV.VAR[var.idx] # if reliability > 0, adapt marker indicators only if (REL > 0) { marker.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs %in% lv.marker & partable$lhs == partable$rhs) marker.var.idx <- match(partable$lhs[marker.idx], ov.names) # upper = (1-REL)*OVAR upper.auto[marker.idx] <- (1 - REL) * OV.VAR[marker.var.idx] } # range bound.range <- upper.auto[par.idx] - pmax(lower.auto[par.idx], 0) # enlarge lower? if ("ov.var" %in% optim.bounds$lower) { factor <- lower.factor[which(optim.bounds$lower == "ov.var")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor diff <- abs(new.range - bound.range) lower.auto[par.idx] <- lower.auto[par.idx] - diff } } # enlarge upper? if ("ov.var" %in% optim.bounds$upper) { factor <- upper.factor[which(optim.bounds$upper == "ov.var")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor diff <- abs(new.range - bound.range) upper.auto[par.idx] <- upper.auto[par.idx] + diff } } # min.var.ov? min.idx <- which(lower.auto[par.idx] < optim.bounds$min.var.ov) if (length(min.idx) > 0L) { lower.auto[par.idx[min.idx]] <- optim.bounds$min.var.ov } # requested? if ("ov.var" %in% optim.bounds$lower) { partable$lower[par.idx] <- lower.auto[par.idx] } if ("ov.var" %in% optim.bounds$upper) { partable$upper[par.idx] <- upper.auto[par.idx] } } # (res) ov variances ################################ ## 2. (residual) lv variances ## ################################ # first collect lower/upper bounds for TOTAL variances in lv.names LV.VAR.LB <- numeric(length(lv.names)) LV.VAR.UB <- numeric(length(lv.names)) if (lavoptions$std.lv) { LV.VAR.LB <- rep(1.0, length(lv.names)) LV.VAR.UB <- rep(1.0, length(lv.names)) } else { for (i in seq_len(length(lv.names))) { this.lv.name <- lv.names[i] this.lv.marker <- lv.marker[i] if (nchar(this.lv.marker) > 0L && this.lv.marker %in% ov.names) { marker.var <- OV.VAR[match(this.lv.marker, ov.names)] LOWER <- marker.var - (1 - REL) * marker.var LV.VAR.LB[i] <- max(LOWER, optim.bounds$min.var.lv.exo) # LV.VAR.UB[i] <- marker.var - REL*marker.var LV.VAR.UB[i] <- marker.var # new in 0.6-17 if (theta.parameterization.flag) { LV.VAR.LB[i] <- REL } } else { LV.VAR.LB[i] <- optim.bounds$min.var.lv.exo LV.VAR.UB[i] <- max(OV.VAR) } } } # use these bounds for the free parameters par.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs %in% lv.names & partable$lhs == partable$rhs) if (length(par.idx) > 0L) { # adjust for endogenenous lv LV.VAR.LB2 <- LV.VAR.LB endo.idx <- which(lv.names %in% lv.names.endo) if (length(endo.idx) > 0L) { LV.VAR.LB2[endo.idx] <- optim.bounds$min.var.lv.endo if (optim.bounds$max.r2.lv.endo != 1) { LV.VAR.LB2[endo.idx] <- (1 - optim.bounds$max.r2.lv.endo) * LV.VAR.UB[endo.idx] } } exo.idx <- which(!lv.names %in% lv.names.endo) if (length(exo.idx) > 0L && optim.bounds$min.var.lv.exo != 0) { LV.VAR.LB2[exo.idx] <- optim.bounds$min.var.lv.exo } lower.auto[par.idx] <- LV.VAR.LB2[match( partable$lhs[par.idx], lv.names )] upper.auto[par.idx] <- LV.VAR.UB[match( partable$lhs[par.idx], lv.names )] # range bound.range <- upper.auto[par.idx] - pmax(lower.auto[par.idx], 0) # enlarge lower? if ("lv.var" %in% optim.bounds$lower) { factor <- lower.factor[which(optim.bounds$lower == "lv.var")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor diff <- abs(new.range - bound.range) lower.auto[par.idx] <- lower.auto[par.idx] - diff } } # enlarge upper? if ("lv.var" %in% optim.bounds$upper) { factor <- upper.factor[which(optim.bounds$upper == "lv.var")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor diff <- abs(new.range - bound.range) upper.auto[par.idx] <- upper.auto[par.idx] + diff } } # requested? if ("lv.var" %in% optim.bounds$lower) { partable$lower[par.idx] <- lower.auto[par.idx] } if ("lv.var" %in% optim.bounds$upper) { partable$upper[par.idx] <- upper.auto[par.idx] } } # lv variances ############################################# ## 3. factor loadings (ov indicators only) ## ############################################# # lambda_p^(u) = sqrt( upper(res.var.indicators_p) / # lower(var.factor) ) ov.ind.names <- lavpta$vnames$ov.ind[[b]] par.idx <- which(partable$group == group.values[g] & partable$op == "=~" & partable$lhs %in% lv.names & partable$rhs %in% ov.ind.names) if (length(par.idx) > 0L) { # if negative LV variances are allowed (due to factor > 1) # make them equal to zero LV.VAR.LB[LV.VAR.LB < 0] <- 0.0 var.all <- OV.VAR[match(partable$rhs[par.idx], ov.names)] tmp <- LV.VAR.LB[match(partable$lhs[par.idx], lv.names)] tmp[is.na(tmp)] <- 0 # just in case... lower.auto[par.idx] <- -1 * sqrt(var.all / tmp) # -Inf if tmp==0 upper.auto[par.idx] <- +1 * sqrt(var.all / tmp) # +Inf if tmp==0 # if std.lv = TRUE, force 'first' loading to be positive? # if(lavoptions$std.lv) { # # get index 'first' indicators # first.idx <- which(!duplicated(partable$lhs[par.idx])) # lower.auto[par.idx][first.idx] <- 0 # } # range bound.range <- upper.auto[par.idx] - lower.auto[par.idx] # enlarge lower? if ("loadings" %in% optim.bounds$lower) { factor <- lower.factor[which(optim.bounds$lower == "loadings")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor ok.idx <- is.finite(new.range) if (length(ok.idx) > 0L) { diff <- abs(new.range[ok.idx] - bound.range[ok.idx]) lower.auto[par.idx][ok.idx] <- lower.auto[par.idx][ok.idx] - diff } } } # enlarge upper? if ("loadings" %in% optim.bounds$upper) { factor <- upper.factor[which(optim.bounds$upper == "loadings")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor ok.idx <- is.finite(new.range) if (length(ok.idx) > 0L) { diff <- abs(new.range[ok.idx] - bound.range[ok.idx]) upper.auto[par.idx][ok.idx] <- upper.auto[par.idx][ok.idx] + diff } } } # requested? if ("loadings" %in% optim.bounds$lower) { partable$lower[par.idx] <- lower.auto[par.idx] } if ("loadings" %in% optim.bounds$upper) { partable$upper[par.idx] <- upper.auto[par.idx] } } # lambda #################### ## 4. covariances ## #################### # | sqrt(var(x)) sqrt(var(y)) | <= cov(x,y) par.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs != partable$rhs) if (length(par.idx) > 0L) { for (i in seq_len(length(par.idx))) { # this lhs/rhs this.lhs <- partable$lhs[par.idx[i]] this.rhs <- partable$rhs[par.idx[i]] # 2 possibilities: # - variances are free parameters # - variances are fixed (eg std.lv = TRUE) # var idx lhs.var.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs == this.lhs & partable$lhs == partable$rhs) rhs.var.idx <- which(partable$group == group.values[g] & partable$op == "~~" & partable$lhs == this.rhs & partable$lhs == partable$rhs) # upper bounds lhs.upper <- upper.auto[lhs.var.idx] rhs.upper <- upper.auto[rhs.var.idx] # compute upper bounds for this cov (assuming >0 vars) if (is.finite(lhs.upper) && is.finite(rhs.upper)) { upper.cov <- sqrt(lhs.upper) * sqrt(rhs.upper) upper.auto[par.idx[i]] <- +1 * upper.cov lower.auto[par.idx[i]] <- -1 * upper.cov } } # range bound.range <- upper.auto[par.idx] - lower.auto[par.idx] # enlarge lower? if ("covariances" %in% optim.bounds$lower) { factor <- lower.factor[which(optim.bounds$lower == "covariances")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor ok.idx <- is.finite(new.range) if (length(ok.idx) > 0L) { diff <- new.range[ok.idx] - bound.range[ok.idx] lower.auto[par.idx][ok.idx] <- lower.auto[par.idx][ok.idx] - diff } } } # enlarge upper? if ("covariances" %in% optim.bounds$upper) { factor <- upper.factor[which(optim.bounds$upper == "covariances")] if (is.finite(factor) && factor != 1.0) { new.range <- bound.range * factor ok.idx <- is.finite(new.range) if (length(ok.idx) > 0L) { diff <- new.range[ok.idx] - bound.range[ok.idx] upper.auto[par.idx][ok.idx] <- upper.auto[par.idx][ok.idx] + diff } } } # requested? if ("covariances" %in% optim.bounds$lower) { partable$lower[par.idx] <- lower.auto[par.idx] } if ("covariances" %in% optim.bounds$upper) { partable$upper[par.idx] <- upper.auto[par.idx] } } # covariances } # g # overwrite with lower.user (except -Inf) not.inf.idx <- which(lower.user > -Inf) if (length(not.inf.idx) > 0L) { partable$lower[not.inf.idx] <- lower.user[not.inf.idx] } # overwrite with upper.user (except +Inf) not.inf.idx <- which(upper.user < +Inf) if (length(not.inf.idx) > 0L) { partable$upper[not.inf.idx] <- upper.user[not.inf.idx] } # non-free non.free.idx <- which(partable$free == 0L) if (length(non.free.idx) > 0L && !is.null(partable$ustart)) { partable$lower[non.free.idx] <- partable$ustart[non.free.idx] partable$upper[non.free.idx] <- partable$ustart[non.free.idx] } partable } lavaan/R/lav_tables.R0000644000176200001440000011506514627656441014203 0ustar liggesusers# construct 1D, 2D or pattern-based frequency tables # YR. 10 April 2013 # Notes: # - we do NOT make a distinction here between unordered and ordered categorical # variables # - object can be a matrix (most likely with integers), a full data frame, # a fitted lavaan object, or a lavData object # - 11 May 2013: added collapse=TRUE, min.std.resid options (suggested # by Myrsini Katsikatsou # - 11 June 2013: added dimension, to get one-way and two-way (three-way?) # tables # - 20 Sept 2013: - allow for sample-based or model-based cell probabilities # re-organize/re-name to provide a more consistent interface # rows in the output can be either: cells, tables or patterns # - dimension=0 equals type="pattern # - collapse=TRUE is replaced by type="table" # - changed names of statistics: std.resid is now GR.average # - added many more statistics; some based on the model, some # on the unrestricted model # - 8 Nov 2013: - skip empty cells for G2, instead of adding 0.5 to obs # - 7 Feb 2016: - take care of conditional.x = TRUE lavTables <- function(object, # what type of table? dimension = 2L, type = "cells", # if raw data, additional attributes categorical = NULL, group = NULL, # which statistics / fit indices? statistic = "default", G2.min = 3.0, # needed for G2.{p/n}large X2.min = 3.0, # needed for X2.{p/n}large # pvalues for statistics? p.value = FALSE, # Bonferonni # alpha.adj = FALSE, # output format output = "data.frame", patternAsString = TRUE) { # check input if (!(dimension == 0L || dimension == 1L || dimension == 2L)) { lav_msg_stop(gettext( "dimension must be 0, 1 or 2 for pattern, one-way or two-way tables")) } stopifnot(type %in% c("cells", "table", "pattern")) if (type == "pattern") { dimension <- 0L } # extract or create lavdata lavdata <- lavData(object, ordered = categorical, group = group) # is 'object' a lavaan object? lavobject <- NULL if (inherits(object, "lavaan")) { lavobject <- object } # case 1: response patterns if (dimension == 0L) { out <- lav_tables_pattern( lavobject = lavobject, lavdata = lavdata, statistic = statistic, patternAsString = patternAsString ) # output format if (output == "data.frame") { class(out) <- c("lavaan.data.frame", "data.frame") } else { lav_msg_warn(gettextf("output option `%s' is not available; ignored.", output)) } # case 2: one-way/univariate } else if (dimension == 1L) { out <- lav_tables_oneway( lavobject = lavobject, lavdata = lavdata, statistic = statistic ) # output format if (output == "data.frame") { class(out) <- c("lavaan.data.frame", "data.frame") } else { lav_msg_warn(gettextf("output option `%s' is not available; ignored.", output)) } # case 3a: two-way/pairwise/bivariate + cells } else if (dimension == 2L && type == "cells") { out <- lav_tables_pairwise_cells( lavobject = lavobject, lavdata = lavdata, statistic = statistic ) # output format if (output == "data.frame") { class(out) <- c("lavaan.data.frame", "data.frame") } else if (output == "table") { out <- lav_tables_cells_format(out, lavdata = lavdata) } else { lav_msg_warn(gettextf("output option `%s' is not available; ignored.", output)) } # case 3b: two-way/pairwise/bivariate + collapsed table } else if (dimension == 2L && (type == "table" || type == "tables")) { out <- lav_tables_pairwise_table( lavobject = lavobject, lavdata = lavdata, statistic = statistic, G2.min = G2.min, X2.min = X2.min, p.value = p.value ) # output format if (output == "data.frame") { class(out) <- c("lavaan.data.frame", "data.frame") } else if (output == "table") { out <- lav_tables_table_format(out, lavdata = lavdata, lavobject = lavobject ) } else { lav_msg_warn(gettext("output option `%s' is not available; ignored.", output)) } } if ((is.data.frame(out) && nrow(out) == 0L) || (is.list(out) && length(out) == 0L)) { # empty table (perhaps, no categorical variables) return(invisible(out)) } out } # shortcut, always dim=2, type="cells" # lavTablesFit <- function(object, # # if raw data, additional attributes # categorical = NULL, # group = NULL, # # which statistics / fit indices? # statistic = "default", # G2.min = 3.0, # X2.min = 3.0, # # pvalues for statistics? # p.value = FALSE, # # output format # output = "data.frame") { # # lavTables(object = object, dimension = 2L, type = "table", # categorical = categorical, group = group, # statistic = statistic, # G2.min = G2.min, X2.min = X2.min, p.value = p.value, # output = output, patternAsString = FALSE) # } # lavTables1D <- function(object, # # if raw data, additional attributes # categorical = NULL, # group = NULL, # # which statistics / fit indices? # statistic = "default", # # output format # output = "data.frame") { # # lavTables(object = object, dimension = 1L, # categorical = categorical, group = group, # statistic = statistic, p.value = FALSE, # output = output, patternAsString = FALSE) # } lav_tables_pattern <- function(lavobject = NULL, lavdata = NULL, statistic = NULL, patternAsString = TRUE) { # this only works if we have 'categorical' variables cat.idx <- which(lavdata@ov$type %in% c("ordered", "factor")) if (length(cat.idx) == 0L) { lav_msg_warn(gettext("no categorical variables are found")) return(data.frame( pattern = character(0L), nobs = integer(0L), obs.freq = integer(0L), obs.prop = numeric(0L) )) } # no support yet for mixture of endogenous ordered + numeric variables if (!is.null(lavobject) && length(lavNames(lavobject, "ov.nox")) > length(cat.idx)) { lav_msg_warn(gettext("some endogenous variables are not categorical")) return(data.frame( pattern = character(0L), nobs = integer(0L), obs.freq = integer(0L), obs.prop = numeric(0L) )) } # default statistics if (!is.null(lavobject)) { if (length(statistic) == 1L && statistic == "default") { statistic <- c("G2", "X2") } else { stopifnot(statistic %in% c("G2.un", "X2.un", "G2", "X2")) } } else { # only data if (length(statistic) == 1L && statistic == "default") { # if data, none by default statistic <- character(0L) } else { stopifnot(statistic %in% c("G2.un", "X2.un")) } } # first, create basic table with response patterns for (g in 1:lavdata@ngroups) { pat <- lav_data_resp_patterns(lavdata@X[[g]])$pat obs.freq <- as.integer(rownames(pat)) if (patternAsString) { pat <- data.frame( pattern = apply(pat, 1, paste, collapse = ""), stringsAsFactors = FALSE ) } else { pat <- as.data.frame(pat, stringsAsFactors = FALSE) names(pat) <- lavdata@ov.names[[g]] } # pat$id <- 1:nrow(pat) if (lavdata@ngroups > 1L) { pat$group <- rep(g, nrow(pat)) } NOBS <- sum(obs.freq) pat$nobs <- rep(NOBS, nrow(pat)) pat$obs.freq <- obs.freq rownames(pat) <- NULL if (g == 1L) { out <- pat } else { out <- rbind(out, pat) } } out$obs.prop <- out$obs.freq / out$nobs if (any(c("X2.un", "G2.un") %in% statistic)) { # not a good statistic... we only have uni+bivariate information lav_msg_warn(gettext( "limited information used for thresholds and correlations; but X2/G2 assumes full information")) PI <- lav_tables_resp_pi( lavobject = lavobject, lavdata = lavdata, est = "h1" ) out$est.prop.un <- unlist(PI) if ("G2.un" %in% statistic) { out$G2.un <- lav_tables_stat_G2( out$obs.prop, out$est.prop.un, out$nobs ) } if ("X2.un" %in% statistic) { out$X2.un <- lav_tables_stat_X2( out$obs.prop, out$est.prop.un, out$nobs ) } } if (any(c("X2", "G2") %in% statistic)) { if (lavobject@Options$estimator %in% c("FML")) { # ok, nothing to say } else if (lavobject@Options$estimator %in% c("WLS", "DWLS", "PML", "ULS")) { lav_msg_warn(gettextf( "estimator %s is not using full information while est.prop is using full information", lavobject@Options$estimator)) } else { lav_msg_stop(gettextf( "estimator %s is not supported.", lavobject@Options$estimator)) } PI <- lav_tables_resp_pi( lavobject = lavobject, lavdata = lavdata, est = "h0" ) out$est.prop <- unlist(PI) if ("G2" %in% statistic) { out$G2 <- lav_tables_stat_G2( out$obs.prop, out$est.prop, out$nobs ) } if ("X2" %in% statistic) { out$X2 <- lav_tables_stat_X2( out$obs.prop, out$est.prop, out$nobs ) } } # remove nobs? # out$nobs <- NULL out } # pairwise tables, rows = table cells lav_tables_pairwise_cells <- function(lavobject = NULL, lavdata = NULL, statistic = character(0L)) { # this only works if we have at least two 'categorical' variables cat.idx <- which(lavdata@ov$type %in% c("ordered", "factor")) if (length(cat.idx) == 0L) { lav_msg_warn(gettext("no categorical variables are found")) return(data.frame( id = integer(0L), lhs = character(0L), rhs = character(0L), nobs = integer(0L), row = integer(0L), col = integer(0L), obs.freq = integer(0L), obs.prop = numeric(0L) )) } if (length(cat.idx) == 1L) { lav_msg_warn(gettext("at least two categorical variables are needed")) return(data.frame( id = integer(0L), lhs = character(0L), rhs = character(0L), nobs = integer(0L), row = integer(0L), col = integer(0L), obs.freq = integer(0L), obs.prop = numeric(0L) )) } # default statistics if (!is.null(lavobject)) { if (length(statistic) == 1L && statistic == "default") { statistic <- c("X2") } else { stopifnot(statistic %in% c( "cor", "th", "X2", "G2", "cor.un", "th.un", "X2.un", "G2.un" )) } } else { if (length(statistic) == 1L && statistic == "default") { # if data, none by default statistic <- character(0L) } else { stopifnot(statistic %in% c("cor.un", "th.un", "X2.un", "G2.un")) } } # initial table, observed cell frequencies out <- lav_tables_pairwise_freq_cell( lavdata = lavdata, as.data.frame. = TRUE ) out$obs.prop <- out$obs.freq / out$nobs if (any(c("cor.un", "th.un", "X2.un", "G2.un") %in% statistic)) { PI <- lav_tables_pairwise_sample_pi( lavobject = lavobject, lavdata = lavdata ) out$est.prop.un <- unlist(PI) if ("G2.un" %in% statistic) { out$G2.un <- lav_tables_stat_G2( out$obs.prop, out$est.prop.un, out$nobs ) } if ("X2.un" %in% statistic) { out$X2.un <- lav_tables_stat_X2( out$obs.prop, out$est.prop.un, out$nobs ) } if ("cor.un" %in% statistic) { COR <- attr(PI, "COR") cor.all <- unlist(lapply(COR, function(x) { x[lower.tri(x, diag = FALSE)] })) out$cor.un <- cor.all[out$id] } } if (any(c("cor", "th", "X2", "G2") %in% statistic)) { PI <- lav_tables_pairwise_model_pi(lavobject = lavobject) out$est.prop <- unlist(PI) if ("G2" %in% statistic) { out$G2 <- lav_tables_stat_G2( out$obs.prop, out$est.prop, out$nobs ) } if ("X2" %in% statistic) { out$X2 <- lav_tables_stat_X2( out$obs.prop, out$est.prop, out$nobs ) } if ("cor" %in% statistic) { COR <- attr(PI, "COR") cor.all <- unlist(lapply(COR, function(x) { x[lower.tri(x, diag = FALSE)] })) out$cor <- cor.all[out$id] } } out } # G2 statistic lav_tables_stat_G2 <- function(obs.prop = NULL, est.prop = NULL, nobs = NULL) { # not defined if out$obs.prop is (close to) zero zero.idx <- which(obs.prop < .Machine$double.eps) if (length(zero.idx)) { obs.prop[zero.idx] <- as.numeric(NA) } # the usual G2 formula G2 <- 2 * nobs * (obs.prop * log(obs.prop / est.prop)) G2 } # X2 (aka X2) statistic lav_tables_stat_X2 <- function(obs.prop = NULL, est.prop = NULL, nobs = NULL) { res.prop <- obs.prop - est.prop X2 <- nobs * (res.prop * res.prop) / est.prop X2 } # pairwise tables, rows = tables lav_tables_pairwise_table <- function(lavobject = NULL, lavdata = NULL, statistic = character(0L), G2.min = 3.0, X2.min = 3.0, p.value = FALSE) { # default statistics if (!is.null(lavobject)) { if (length(statistic) == 1L && statistic == "default") { statistic <- c("X2", "X2.average") } else { stopifnot(statistic %in% c( "X2", "G2", "X2.un", "G2.un", "cor", "cor.un", "RMSEA.un", "RMSEA", "G2.average", "G2.nlarge", "G2.plarge", "X2.average", "X2.nlarge", "X2.plarge" )) } } else { if (length(statistic) == 1L && statistic == "default") { # if data, none by default statistic <- character(0L) } else { stopifnot(statistic %in% c( "cor.un", "X2.un", "G2.un", "RMSEA.un" )) } } # identify 'categorical' variables # cat.idx <- which(lavdata@ov$type %in% c("ordered","factor")) # pairwise tables # pairwise.tables <- utils::combn(vartable$name[cat.idx], m=2L) # pairwise.tables <- rbind(seq_len(ncol(pairwise.tables)), # pairwise.tables) # ntables <- ncol(pairwise.tables) # initial table, observed cell frequencies # out <- as.data.frame(t(pairwise.tables)) # names(out) <- c("id", "lhs", "rhs") # collapse approach stat.cell <- character(0) if (any(c("G2", "G2.average", "G2.plarge", "G2.nlarge") %in% statistic)) { stat.cell <- c(stat.cell, "G2") } if (any(c("X2", "X2.average", "X2.plarge", "X2.nlarge") %in% statistic)) { stat.cell <- c(stat.cell, "X2") } if ("G2" %in% statistic || "RMSEA" %in% statistic) { stat.cell <- c(stat.cell, "G2") } if ("X2.un" %in% statistic) { stat.cell <- c(stat.cell, "X2.un") } if ("G2.un" %in% statistic || "RMSEA.un" %in% statistic) { stat.cell <- c(stat.cell, "G2.un") } if ("cor.un" %in% statistic) { stat.cell <- c(stat.cell, "cor.un") } if ("cor" %in% statistic) { stat.cell <- c(stat.cell, "cor") } # get table with table cells out.cell <- lav_tables_pairwise_cells( lavobject = lavobject, lavdata = lavdata, statistic = stat.cell ) # only 1 row per table row.idx <- which(!duplicated(out.cell$id)) if (is.null(out.cell$group)) { out <- out.cell[row.idx, c("lhs", "rhs", "nobs"), drop = FALSE] } else { out <- out.cell[row.idx, c("lhs", "rhs", "group", "nobs"), drop = FALSE] } # df if (length(statistic) > 0L) { nrow <- tapply(out.cell$row, INDEX = out.cell$id, FUN = max) ncol <- tapply(out.cell$col, INDEX = out.cell$id, FUN = max) out$df <- nrow * ncol - nrow - ncol } # cor if ("cor" %in% statistic) { out$cor <- out.cell[row.idx, "cor"] } # cor.un if ("cor.un" %in% statistic) { out$cor.un <- out.cell[row.idx, "cor.un"] } # X2 if ("X2" %in% statistic) { out$X2 <- tapply(out.cell$X2, INDEX = out.cell$id, FUN = sum, na.rm = TRUE ) if (p.value) { out$X2.pval <- pchisq(out$X2, df = out$df, lower.tail = FALSE) } } if ("X2.un" %in% statistic) { out$X2.un <- tapply(out.cell$X2.un, INDEX = out.cell$id, FUN = sum, na.rm = TRUE ) if (p.value) { out$X2.un.pval <- pchisq(out$X2.un, df = out$df, lower.tail = FALSE) } } # G2 if ("G2" %in% statistic) { out$G2 <- tapply(out.cell$G2, INDEX = out.cell$id, FUN = sum, na.rm = TRUE ) if (p.value) { out$G2.pval <- pchisq(out$G2, df = out$df, lower.tail = FALSE) } } if ("G2.un" %in% statistic) { out$G2.un <- tapply(out.cell$G2.un, INDEX = out.cell$id, FUN = sum, na.rm = TRUE ) if (p.value) { out$G2.un.pval <- pchisq(out$G2.un, df = out$df, lower.tail = FALSE) } } if ("RMSEA" %in% statistic) { G2 <- tapply(out.cell$G2, INDEX = out.cell$id, FUN = sum, na.rm = TRUE) # note: there seems to be a mistake in Appendix 1 eqs 43/44 of Joreskog # SSI paper (2005) 'SEM with ordinal variables using LISREL' # 2*N*d should N*d out$RMSEA <- sqrt(pmax(0, (G2 - out$df) / (out$nobs * out$df))) if (p.value) { # note: MUST use 1 - pchisq (instead of lower.tail = FALSE) # because for ncp > 80, routine only computes lower tail out$RMSEA.pval <- 1.0 - pchisq(G2, ncp = 0.1 * 0.1 * out$nobs * out$df, df = out$df, lower.tail = TRUE ) } } if ("RMSEA.un" %in% statistic) { G2 <- tapply(out.cell$G2.un, INDEX = out.cell$id, FUN = sum, na.rm = TRUE ) # note: there seems to be a mistake in Appendix 1 eqs 43/44 of Joreskog # SSI paper (2005) 'SEM with ordinal variables using LISREL' # 2*N*d should N*d out$RMSEA.un <- sqrt(pmax(0, (G2 - out$df) / (out$nobs * out$df))) if (p.value) { # note: MUST use 1 - pchisq (instead of lower.tail = FALSE) # because for ncp > 80, routine only computes lower tail out$RMSEA.un.pval <- 1.0 - pchisq(G2, ncp = 0.1 * 0.1 * out$nobs * out$df, df = out$df, lower.tail = TRUE ) } } if ("G2.average" %in% statistic) { out$G2.average <- tapply(out.cell$G2, INDEX = out.cell$id, FUN = mean, na.rm = TRUE ) } if ("G2.nlarge" %in% statistic) { out$G2.min <- rep(G2.min, length(out$lhs)) out$G2.nlarge <- tapply(out.cell$G2, INDEX = out.cell$id, FUN = function(x) sum(x > G2.min, na.rm = TRUE) ) } if ("G2.plarge" %in% statistic) { out$G2.min <- rep(G2.min, length(out$lhs)) out$G2.plarge <- tapply(out.cell$G2, INDEX = out.cell$id, FUN = function(x) sum(x > G2.min, na.rm = TRUE) / length(x) ) } if ("X2.average" %in% statistic) { out$X2.average <- tapply(out.cell$X2, INDEX = out.cell$id, FUN = mean, na.rm = TRUE ) } if ("X2.nlarge" %in% statistic) { out$X2.min <- rep(X2.min, length(out$lhs)) out$X2.nlarge <- tapply(out.cell$X2, INDEX = out.cell$id, FUN = function(x) sum(x > X2.min, na.rm = TRUE) ) } if ("X2.plarge" %in% statistic) { out$X2.min <- rep(X2.min, length(out$lhs)) out$X2.plarge <- tapply(out.cell$X2, INDEX = out.cell$id, FUN = function(x) sum(x > X2.min, na.rm = TRUE) / length(x) ) } out } lav_tables_oneway <- function(lavobject = NULL, lavdata = NULL, statistic = NULL) { # shortcuts vartable <- lavdata@ov X <- lavdata@X # identify 'categorical' variables cat.idx <- which(vartable$type %in% c("ordered", "factor")) ncat <- length(cat.idx) # do we have any categorical variables? if (length(cat.idx) == 0L) { lav_msg_warn(gettext("no categorical variables are found")) return(data.frame( id = integer(0L), lhs = character(0L), rhs = character(0L), nobs = integer(0L), obs.freq = integer(0L), obs.prop = numeric(0L), est.prop = numeric(0L), X2 = numeric(0L) )) } else { labels <- strsplit(vartable$lnam[cat.idx], "\\|") } # ok, we have an overview of all categorical variables in the data ngroups <- length(X) # for each group, for each categorical variable, collect information TABLES <- vector("list", length = ngroups) for (g in 1:ngroups) { TABLES[[g]] <- lapply(seq_len(ncat), FUN = function(x) { idx <- cat.idx[x] nrow <- vartable$nlev[idx] ncell <- nrow nvar <- length(lavdata@ov.names[[g]]) id <- (g - 1) * nvar + x # compute observed frequencies FREQ <- tabulate(X[[g]][, idx], nbins = ncell) list( id = rep.int(id, ncell), lhs = rep.int(vartable$name[idx], ncell), # op = rep.int("freq", ncell), rhs = labels[[x]], group = rep.int(g, ncell), nobs = rep.int(sum(FREQ), ncell), obs.freq = FREQ, obs.prop = FREQ / sum(FREQ) ) } ) } for (g in 1:ngroups) { TABLE <- TABLES[[g]] TABLE <- lapply(TABLE, as.data.frame, stringsAsFactors = FALSE) if (g == 1L) { out <- do.call(rbind, TABLE) } else { out <- rbind(out, do.call(rbind, TABLE)) } } if (g == 1) { # remove group column out$group <- NULL } # default statistics if (!is.null(lavobject)) { if (length(statistic) == 1L && statistic == "default") { statistic <- c("X2") } else { stopifnot(statistic %in% c( "th.un", "th", "G2", "X2" )) } # sample based # note, there is no G2.un or X2.un: always saturated! if ("th.un" %in% statistic) { # sample based th <- unlist(lapply(1:lavdata@ngroups, function(x) { if (lavobject@Model@conditional.x) { TH <- lavobject@SampleStats@res.th[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] } else { TH <- lavobject@SampleStats@th[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] } TH.IDX <- lavobject@SampleStats@th.idx[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] unname(unlist(tapply(TH, INDEX = TH.IDX, function(y) c(y, Inf) ))) })) # overwrite obs.prop # NOTE: if we have exogenous variables, obs.prop will NOT # correspond with qnorm(th) out$obs.prop <- unname(unlist(tapply(th, INDEX = out$id, FUN = function(x) { (pnorm(c(x, Inf)) - pnorm(c(-Inf, x)))[-(length(x) + 1)] } ))) out$th.un <- th } # model based if (any(c("th", "G2", "X2") %in% statistic)) { # model based th.h0 <- unlist(lapply(1:lavdata@ngroups, function(x) { if (lavobject@Model@conditional.x) { TH <- lavobject@implied$res.th[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] } else { TH <- lavobject@implied$th[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] } TH.IDX <- lavobject@SampleStats@th.idx[[x]][ lavobject@SampleStats@th.idx[[x]] > 0 ] unname(unlist(tapply(TH, INDEX = TH.IDX, function(x) c(x, Inf) ))) })) est.prop <- unname(unlist(tapply(th.h0, INDEX = out$id, FUN = function(x) { (pnorm(c(x, Inf)) - pnorm(c(-Inf, x)))[-(length(x) + 1)] } ))) out$est.prop <- est.prop if ("th" %in% statistic) { out$th <- th.h0 } if ("G2" %in% statistic) { out$G2 <- lav_tables_stat_G2( out$obs.prop, out$est.prop, out$nobs ) } if ("X2" %in% statistic) { out$X2 <- lav_tables_stat_X2( out$obs.prop, out$est.prop, out$nobs ) } } } else { if (length(statistic) == 1L && statistic == "default") { # if data, none by default statistic <- character(0L) } else { stopifnot(statistic %in% c("th.un")) } if ("th.un" %in% statistic) { out$th.un <- unlist(tapply(out$obs.prop, INDEX = out$id, FUN = function(x) qnorm(cumsum(x)) )) } } out } # HJ 15/1/2023 MODIFIED to add sampling weights # compute pairwise (two-way) frequency tables lav_tables_pairwise_freq_cell <- function(lavdata = NULL, as.data.frame. = TRUE) { # shortcuts vartable <- as.data.frame(lavdata@ov, stringsAsFactors = FALSE) X <- lavdata@X ov.names <- lavdata@ov.names ngroups <- lavdata@ngroups wt <- lavdata@weights # identify 'categorical' variables cat.idx <- which(vartable$type %in% c("ordered", "factor")) # do we have any categorical variables? if (length(cat.idx) == 0L) { lav_msg_stop(gettext("no categorical variables are found")) } else if (length(cat.idx) == 1L) { lav_msg_stop(gettext("at least two categorical variables are needed")) } # pairwise tables pairwise.tables <- utils::combn(vartable$name[cat.idx], m = 2L) pairwise.tables <- rbind(pairwise.tables, seq_len(ncol(pairwise.tables))) ntables <- ncol(pairwise.tables) # for each group, for each pairwise table, collect information TABLES <- vector("list", length = ngroups) for (g in 1:ngroups) { TABLES[[g]] <- apply(pairwise.tables, MARGIN = 2, FUN = function(x) { idx1 <- which(vartable$name == x[1]) idx2 <- which(vartable$name == x[2]) id <- (g - 1) * ntables + as.numeric(x[3]) nrow <- vartable$nlev[idx1] ncol <- vartable$nlev[idx2] ncell <- nrow * ncol # compute two-way observed frequencies Y1 <- X[[g]][, idx1] Y2 <- X[[g]][, idx2] # FREQ <- table(Y1, Y2) # we loose missings; useNA is ugly FREQ <- lav_bvord_freq(Y1, Y2) # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # If we want to use weighted frequencies we can use the code # below. However, it will probably make sense only when the # weights are normalised. If they're not, we may get quite ugly # and nonsensical numbers here. So for now, just keep the # lavtables as is (using non-weighted frequencies). # # FREQ <- lav_bvord_freq(Y1, Y2, wt[[g]]) # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> list( id = rep.int(id, ncell), lhs = rep.int(x[1], ncell), # op = rep.int("table", ncell), rhs = rep.int(x[2], ncell), group = rep.int(g, ncell), nobs = rep.int(sum(FREQ), ncell), row = rep.int(seq_len(nrow), times = ncol), col = rep(seq_len(ncol), each = nrow), obs.freq = lav_matrix_vec(FREQ) # col by col! ) } ) } if (as.data.frame.) { for (g in 1:ngroups) { TABLE <- TABLES[[g]] TABLE <- lapply(TABLE, as.data.frame, stringsAsFactors = FALSE) if (g == 1) { out <- do.call(rbind, TABLE) } else { out <- rbind(out, do.call(rbind, TABLE)) } } if (g == 1) { # remove group column out$group <- NULL } } else { if (ngroups == 1L) { out <- TABLES[[1]] } else { out <- TABLES } } out } # low-level function to compute expected proportions per cell # object lav_tables_pairwise_model_pi <- function(lavobject = NULL) { stopifnot(lavobject@Model@categorical) # shortcuts lavmodel <- lavobject@Model implied <- lavobject@implied ngroups <- lavobject@Data@ngroups ov.types <- lavobject@Data@ov$type th.idx <- lavobject@Model@th.idx Sigma.hat <- if (lavmodel@conditional.x) implied$res.cov else implied$cov TH <- if (lavmodel@conditional.x) implied$res.th else implied$th PI <- vector("list", length = ngroups) for (g in 1:ngroups) { Sigmahat <- Sigma.hat[[g]] cors <- Sigmahat[lower.tri(Sigmahat)] if (any(abs(cors) > 1)) { lav_msg_warn(gettext( "some model-implied correlations are larger than 1.0")) } nvar <- nrow(Sigmahat) # shortcut for all ordered - tablewise if (all(ov.types == "ordered") && !is.null(lavobject@Cache[[g]]$long)) { # FREQ.OBS <- c(FREQ.OBS, lavobject@Cache[[g]]$bifreq) long2 <- LongVecTH.Rho( no.x = nvar, all.thres = TH[[g]], index.var.of.thres = th.idx[[g]], rho.xixj = cors ) # get expected probability per table, per pair PI[[g]] <- pairwiseExpProbVec( ind.vec = lavobject@Cache[[g]]$long, th.rho.vec = long2 ) } else { PI.group <- integer(0) # order! first i, then j, lav_matrix_vec(table)! for (i in seq_len(nvar - 1L)) { for (j in (i + 1L):nvar) { if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { PI.table <- lav_bvord_noexo_pi( rho = Sigmahat[i, j], th.y1 = TH[[g]][th.idx[[g]] == i], th.y2 = TH[[g]][th.idx[[g]] == j] ) PI.group <- c(PI.group, lav_matrix_vec(PI.table)) } } } PI[[g]] <- PI.group } } # g # add COR/TH/TH.IDX attr(PI, "COR") <- Sigma.hat attr(PI, "TH") <- TH attr(PI, "TH.IDX") <- th.idx PI } # low-level function to compute expected proportions per cell # using sample-based correlations + thresholds # # object can be either lavData or lavaan class lav_tables_pairwise_sample_pi <- function(lavobject = NULL, lavdata = NULL) { # get COR, TH and th.idx if (!is.null(lavobject)) { if (lavobject@Model@conditional.x) { COR <- lavobject@SampleStats@res.cov TH <- lavobject@SampleStats@res.th } else { COR <- lavobject@SampleStats@cov TH <- lavobject@SampleStats@th } TH.IDX <- lavobject@SampleStats@th.idx } else if (!is.null(lavdata)) { fit.un <- lavCor(object = lavdata, se = "none", output = "fit") if (fit.un@Model@conditional.x) { COR <- fit.un@SampleStats@res.cov TH <- fit.un@SampleStats@res.th } else { COR <- fit.un@SampleStats@cov TH <- fit.un@SampleStats@th } TH.IDX <- fit.un@SampleStats@th.idx } else { lav_msg_stop(gettext("both lavobject and lavdata are NULL")) } lav_tables_pairwise_sample_pi_cor( COR = COR, TH = TH, TH.IDX = TH.IDX ) } # low-level function to compute expected proportions per cell lav_tables_pairwise_sample_pi_cor <- function(COR = NULL, TH = NULL, TH.IDX = NULL) { ngroups <- length(COR) PI <- vector("list", length = ngroups) for (g in 1:ngroups) { Sigmahat <- COR[[g]] cors <- Sigmahat[lower.tri(Sigmahat)] if (any(abs(cors) > 1)) { lav_msg_warn(gettext( "some model-implied correlations are larger than 1.0")) } nvar <- nrow(Sigmahat) th.idx <- TH.IDX[[g]] # reconstruct ov.types ov.types <- rep("numeric", nvar) ord.idx <- unique(th.idx[th.idx > 0]) ov.types[ord.idx] <- "ordered" PI.group <- integer(0) # order! first i, then j, lav_matrix_vec(table)! for (i in seq_len(nvar - 1L)) { for (j in (i + 1L):nvar) { if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { PI.table <- lav_bvord_noexo_pi( rho = Sigmahat[i, j], th.y1 = TH[[g]][th.idx == i], th.y2 = TH[[g]][th.idx == j] ) PI.group <- c(PI.group, lav_matrix_vec(PI.table)) } } } PI[[g]] <- PI.group } # g # add COR/TH/TH.IDX attr(PI, "COR") <- COR attr(PI, "TH") <- TH attr(PI, "TH.IDX") <- TH.IDX PI } # low-level function to compute expected proportions per PATTERN # using sample-based correlations + thresholds # # object can be either lavData or lavaan class # # only valid if estimator = FML, POM or NOR # lav_tables_resp_pi <- function(lavobject = NULL, lavdata = NULL, est = "h0") { # shortcuts if (!is.null(lavobject)) { lavmodel <- lavobject@Model implied <- lavobject@implied } ngroups <- lavdata@ngroups # h0 or unrestricted? if (est == "h0") { Sigma.hat <- if (lavmodel@conditional.x) implied$res.cov else implied$cov TH <- if (lavmodel@conditional.x) implied$res.th else implied$th TH.IDX <- lavobject@SampleStats@th.idx } else { if (is.null(lavobject)) { fit.un <- lavCor(object = lavdata, se = "none", output = "fit") Sigma.hat <- if (fit.un@Model@conditional.x) fit.un@implied$res.cov else fit.un@implied$cov TH <- if (fit.un@Model@conditional.x) fit.un@implied$res.th else fit.un@implied$th TH.IDX <- fit.un@SampleStats@th.idx } else { if (lavobject@Model@conditional.x) { Sigma.hat <- lavobject@SampleStats@res.cov TH <- lavobject@SampleStats@res.th } else { Sigma.hat <- lavobject@SampleStats@cov TH <- lavobject@SampleStats@th } TH.IDX <- lavobject@SampleStats@th.idx } } PI <- vector("list", length = ngroups) for (g in 1:ngroups) { Sigmahat <- Sigma.hat[[g]] cors <- Sigmahat[lower.tri(Sigmahat)] if (any(abs(cors) > 1)) { lav_msg_warn(gettext( "some model-implied correlations are larger than 1.0")) } nvar <- nrow(Sigmahat) th.idx <- TH.IDX[[g]] MEAN <- rep(0, nvar) # reconstruct ov.types ov.types <- rep("numeric", nvar) ord.idx <- unique(th.idx[th.idx > 0]) ov.types[ord.idx] <- "ordered" if (all(ov.types == "ordered")) { # get patterns ## FIXME GET it if (!is.null(lavdata@Rp[[g]]$pat)) { PAT <- lavdata@Rp[[g]]$pat } else { PAT <- lav_data_resp_patterns(lavdata@X[[g]])$pat } npatterns <- nrow(PAT) freq <- as.numeric(rownames(PAT)) PI.group <- numeric(npatterns) TH.VAR <- lapply( 1:nvar, function(x) c(-Inf, TH[[g]][th.idx == x], +Inf) ) # FIXME!!! ok to set diagonal to 1.0? diag(Sigmahat) <- 1.0 for (r in 1:npatterns) { # compute probability for each pattern lower <- sapply(1:nvar, function(x) TH.VAR[[x]][PAT[r, x]]) upper <- sapply(1:nvar, function(x) TH.VAR[[x]][PAT[r, x] + 1L]) # handle missing values na.idx <- which(is.na(PAT[r, ])) if (length(na.idx) > 0L) { lower <- lower[-na.idx] upper <- upper[-na.idx] MEAN.r <- MEAN[-na.idx] Sigmahat.r <- Sigmahat[-na.idx, -na.idx, drop = FALSE] } else { MEAN.r <- MEAN Sigmahat.r <- Sigmahat } PI.group[r] <- sadmvn(lower, upper, mean = MEAN.r, varcov = Sigmahat.r ) } } else { # case-wise PI.group <- rep(as.numeric(NA), lavdata@nobs[[g]]) lav_msg_warn(gettext("casewise PI not implemented")) } PI[[g]] <- PI.group } # g PI } lav_tables_table_format <- function(out, lavdata = lavdata, lavobject = lavobject) { # determine column we need NAMES <- names(out) stat.idx <- which(NAMES %in% c( "cor", "cor.un", "G2", "G2.un", "X2", "X2.un", "RMSEA", "RMSEA.un", "G2.average", "G2.plarge", "G2.nlarge", "X2.average", "X2.plarge", "X2.nlarge" )) if (length(stat.idx) == 0) { if (!is.null(out$obs.freq)) { stat.idx <- which(NAMES == "obs.freq") } else if (!is.null(out$nobs)) { stat.idx <- which(NAMES == "nobs") } UNI <- NULL } else if (length(stat.idx) > 1) { lav_msg_stop(gettext( "more than one statistic for table output:"), paste(NAMES[stat.idx], collapse = " ") ) } else { # univariate version of same statistic if (NAMES[stat.idx] == "G2.average") { UNI <- lavTables(lavobject, dimension = 1L, statistic = "G2") } else if (NAMES[stat.idx] == "X2.average") { UNI <- lavTables(lavobject, dimension = 1L, statistic = "X2") } else { UNI <- NULL } } OUT <- vector("list", length = lavdata@ngroups) for (g in 1:lavdata@ngroups) { if (lavdata@ngroups == 1L) { # no group column STAT <- out[[stat.idx]] } else { STAT <- out[[stat.idx]][out$group == g] } RN <- lavdata@ov.names[[g]] OUT[[g]] <- getCov(STAT, diagonal = FALSE, lower = FALSE, names = RN) # change diagonal elements: replace by univariate stat # if possible diag(OUT[[g]]) <- as.numeric(NA) if (!is.null(UNI)) { if (!is.null(UNI$group)) { idx <- which(UNI$group == g) } else { idx <- 1:length(UNI$lhs) } if (NAMES[stat.idx] == "G2.average") { diag(OUT[[g]]) <- tapply(UNI$G2[idx], INDEX = UNI$id[idx], FUN = mean ) } else if (NAMES[stat.idx] == "X2.average") { diag(OUT[[g]]) <- tapply(UNI$X2[idx], INDEX = UNI$id[idx], FUN = mean ) } } else if (NAMES[stat.idx] %in% c("cor", "cor.un")) { diag(OUT[[g]]) <- 1 } class(OUT[[g]]) <- c("lavaan.matrix.symmetric", "matrix") } if (lavdata@ngroups > 1L) { names(OUT) <- lavdata@group.label out <- OUT } else { out <- OUT[[1]] } out } lav_tables_cells_format <- function(out, lavdata = lavdata, drop.list.single.group = FALSE) { OUT <- vector("list", length = lavdata@ngroups) if (is.null(out$group)) { out$group <- rep(1L, length(out$lhs)) } # do we have a statistic? # determine column we need NAMES <- names(out) stat.idx <- which(NAMES %in% c( "cor", "cor.un", "G2", "G2.un", "X2", "X2.un", "RMSEA", "RMSEA.un", "G2.average", "G2.plarge", "G2.nlarge", "X2.average", "X2.plarge", "X2.nlarge" )) if (length(stat.idx) == 0) { statistic <- "obs.freq" } else if (length(stat.idx) > 1) { lav_msg_stop(gettext( "more than one statistic for table output:"), paste(NAMES[stat.idx], collapse = " ") ) } else { statistic <- NAMES[stat.idx] } for (g in 1:lavdata@ngroups) { case.idx <- which(out$group == g) ID.group <- unique(out$id[out$group == g]) TMP <- lapply(ID.group, function(x) { Tx <- out[out$id == x, ] M <- matrix( Tx[, statistic], max(Tx$row), max(Tx$col) ) rownames(M) <- unique(Tx$row) colnames(M) <- unique(Tx$col) class(M) <- c("lavaan.matrix", "matrix") M }) names(TMP) <- unique(paste(out$lhs[case.idx], out$rhs[case.idx], sep = "_" )) OUT[[g]] <- TMP } if (lavdata@ngroups == 1L && drop.list.single.group) { OUT <- OUT[[1]] } else { if (length(lavdata@group.label) > 0L) { names(OUT) <- unlist(lavdata@group.label) } } OUT } lavaan/R/ctr_pairwise_fit.R0000644000176200001440000001252714627656441015423 0ustar liggesusers# This code is written by YR (using lavaan components), but based on # research code written by Mariska Barendse (Groningen/Amsterdam, NL) # # September 2013 # # Three fit indices for the PML estimator (if all categorical, no exo) # - Cp(max) # - CF # - CM # FIXME: how to handle multiple groups?? # Mariska Barendse Cp statistic # lav_tables_fit_Cp <- function(object, alpha = 0.05) { # # out <- lavTablesFit(object, statistic = "G2", p.value = TRUE) # # # Bonferonni adjusted p-value # ntests <- length(out$lhs) # out$alpha.adj <- alpha / ntests # #out$pval <- pchisq(out$G2, df=out$df, lower.tail = FALSE) # # # remove G2.h0.pval # #out$G2.h0.pval <- NULL # # out # } lavTablesFitCp <- function(object, alpha = 0.05) { lavdata <- object@Data if (!any(lavdata@ov$type == "ordered")) { return(list( G2 = as.numeric(NA), df = as.numeric(NA), p.value = as.numeric(NA), p.value.Bonferroni = as.numeric(NA) )) } TF <- lavTables(object, dimension = 2L, type = "table", statistic = "G2", p.value = TRUE ) # Bonferonni adjusted p-value ntests <- length(TF$lhs) TF$alpha.adj <- alpha / ntests out <- subset(TF, TF$G2.pval < TF$alpha.adj) # find largest G2 max.idx <- which(TF$G2 == max(TF$G2)) extra <- list( G2 = unname(TF$G2[max.idx]), df = unname(TF$df[max.idx]), lhs = TF$lhs[max.idx], rhs = TF$rhs[max.idx], group = TF$group[max.idx], p.value = unname(TF$G2.pval[max.idx]), ntests = ntests, p.value.Bonferroni = unname(TF$G2.pval[max.idx] * length(TF$lhs)) ) attr(out, "CpMax") <- extra class(out) <- c("lavaan.tables.fit.Cp", "lavaan.data.frame", "data.frame") out } print.lavaan.tables.fit.Cp <- function(x, ...) { cat("CP-values that are significant at a Bonferroni adjusted level of significance\n") tmp <- x class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp) } # Mariska Barendse CF statistic lavTablesFitCf <- function(object) { # check object class if (!inherits(object, "lavaan")) { lav_msg_stop(gettext("object must be an object of class lavaan")) } lavdata <- object@Data lavpta <- object@pta lavmodel <- object@Model lavcache <- object@Cache implied <- object@implied CF.group <- rep(as.numeric(NA), lavdata@ngroups) DF.group <- rep(as.numeric(NA), lavdata@ngroups) # check if all ordered if (!any(lavdata@ov$type == "ordered")) { CF <- as.numeric(NA) attr(CF, "CF.group") <- CF.group attr(CF, "DF.group") <- DF.group return(CF) } # ord var in this group ov.ord <- unique(unlist(lavpta$vnames$ov.ord)) ov.idx <- which(ov.ord %in% lavdata@ov$name) ov.nlev <- lavdata@ov$nlev[ov.idx] Sigma.hat <- if (lavmodel@conditional.x) implied$res.cov else implied$cov TH <- if (lavmodel@conditional.x) implied$res.th else implied$th DF <- prod(ov.nlev) - object@optim$npar - 1L for (g in seq_len(lavdata@ngroups)) { F.group <- estimator.FML( Sigma.hat = Sigma.hat[[g]], TH = TH[[g]], th.idx = lavmodel@th.idx[[g]], num.idx = lavmodel@num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache[[g]] ) CF.group[g] <- 2 * lavdata@nobs[[g]] * F.group } # check for negative values CF.group[CF.group < 0] <- 0.0 # global test statistic CF <- sum(CF.group) attr(CF, "CF.group") <- CF.group attr(CF, "DF") <- DF attr(CF, "rpat.observed") <- sapply(lavdata@Rp, "[[", "npatterns") attr(CF, "rpat.total") <- sapply(lavdata@Rp, "[[", "total.patterns") attr(CF, "rpat.empty") <- sapply(lavdata@Rp, "[[", "empty.patterns") class(CF) <- c("lavaan.tables.fit.Cf", "numeric") CF } print.lavaan.tables.fit.Cf <- function(x, ...) { cat("Total response patterns: ", attr(x, "rpat.total"), "\n") cat("Observed response patterns: ", attr(x, "rpat.observed"), "\n") cat("Empty response patterns: ", attr(x, "rpat.empty"), "\n") cat("Cf results may be biased because of large numbers of empty cells in the multivariate contingency table\n") cat("Cf-value, overall:\n") CF <- unclass(x) attributes(CF) <- NULL print(CF) CF.group <- attr(x, "CF.group") if (length(CF.group) > 1L) { cat("Cf-value, per group:\n") print(CF.group) } cat("Degrees of freedom\n") print(attr(x, "DF")) } lavTablesFitCm <- function(object) { lavdata <- object@Data lavoptions <- object@Options CF.h0 <- lavTablesFitCf(object) # fit unrestricted model h1 <- lavCor(lavdata, estimator = lavoptions$estimator, se = "none", test = "none", output = "lavaan" ) CF.h1 <- lavTablesFitCf(h1) CF.h0.group <- attr(CF.h0, "CF.group") CF.h1.group <- attr(CF.h1, "CF.group") DF.h0 <- attr(CF.h0, "DF") DF.h1 <- attr(CF.h1, "DF") attributes(CF.h0) <- NULL attributes(CF.h1) <- NULL CM <- CF.h0 - CF.h1 attr(CM, "CM.group") <- CF.h0.group - CF.h1.group attr(CM, "DF") <- DF.h0 - DF.h1 class(CM) <- c("lavaan.tables.fit.Cm", "numeric") CM } print.lavaan.tables.fit.Cm <- function(x, ...) { # cat("The percentage of empty cells\n") #weet niet goed want FML werkt niet # cat("CM results may be a little biased because of large numbers of empty cells in the multivariate contingency table\n") cat("Cm-value, overall:\n") CM <- unclass(x) attributes(CM) <- NULL print(CM) CM.group <- attr(x, "CM.group") if (length(CM.group) > 1L) { cat("Cm-value, per group:\n") print(CM.group) } cat("Degrees of freedom:\n") print(attr(x, "DF")) } lavaan/R/lav_options_se.R0000644000176200001440000000523014627656441015103 0ustar liggesusers# in separate file LDW 06/04/2024 # rename names of se values, and check for invalid values lav_options_check_se <- function(opt = NULL) { # unlike test=, se= should be a single character string if (length(opt$se) > 1L) opt$se <- opt$se[1] # backwards compatibility (0.4 -> 0.5) if (opt$se == "first.order") { opt$se <- "standard" opt$information[1] <- "first.order" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "first.order" } } else if (opt$se == "observed") { opt$se <- "standard" opt$information[1] <- "observed" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "observed" } } else if (opt$se == "expected") { opt$se <- "standard" opt$information[1] <- "expected" if (length(opt$information) > 1L && opt$information[2] == "default") { opt$information[2] <- "expected" } } # handle generic 'robust' (except clustered/multilvel) # else if(opt$se == "robust" && !opt$.clustered && !opt$.multilevel) { # if(opt$missing %in% c("ml", "ml.x")) { # opt$se <- "robust.huber.white" # } else if(opt$missing == "two.stage") { # opt$se <- "two.stage" # } else if(opt$missing == "robust.two.stage") { # opt$se <- "robust.two.stage" # } else { # # depends on estimator! # opt$se <- "robust.sem" # } # } # GLS, NTRLS, FML, UMN ok.flag <- TRUE if (any(opt$estimator == c("gls", "ntrls", "fml"))) { ok.flag <- any(opt$se == c( "default", "none", "standard", "bootstrap", "external" )) } # WLS, DLS, DWLS, WLSM, WLSMV, WLSMVS, ULS, ULSM, ULSMV, ULSMVS else if (any(opt$estimator == c( "wls", "dls", "dwls", "wlsm", "wlsmv", "wlsmvs", "uls", "ulsm", "ulsmv", "ulsmvs" ))) { ok.flag <- any(opt$se == c( "default", "none", "standard", "bootstrap", "external", "robust", "robust.sem" )) } # PML else if (opt$estimator == "pml") { ok.flag <- any(opt$se == c( "default", "none", "standard", "bootstrap", "external", "robust.huber.white" )) } # FABIN, GUTTMAN1952, BENTLER1982, ... else if (any(opt$estimator == c( "fabin2", "fabin3", "mgm"))) { ok.flag <- any(opt$se == c("default", "none", "bootstrap", "external")) } # OTHERS else if (any(opt$estimator == c("fml", "mml", "reml"))) { ok.flag <- any(opt$se == c("default", "none", "standard", "external")) } if (!ok.flag) { lav_msg_stop(gettextf( "invalid value (%1$s) in se= argument for estimator %2$s.", opt$se, toupper(opt$estimator))) } opt } lavaan/R/lav_model_loglik.R0000644000176200001440000001623114627656441015365 0ustar liggesusers# compute the loglikelihood of the data, given the current values of the # model parameters lav_model_loglik <- function(lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, lavmodel = NULL, lavoptions = NULL) { ngroups <- lavdata@ngroups logl.group <- rep(as.numeric(NA), ngroups) # should compute logl, or return NA? logl.ok <- FALSE if (lavoptions$estimator %in% c("ML", "MML")) { # check if everything is numeric, OR if we have exogenous # factor with 2 levels only # if(all(lavdata@ov$type == "numeric")) { logl.ok <- TRUE # } else { if (lavoptions$fixed.x == FALSE) { exo.idx <- which(lavdata@ov$exo == 1L) for (i in exo.idx) { if (lavdata@ov$nlev[i] > 1L) { logl.ok <- FALSE } } } # nlevels + fiml # if(lavdata@nlevels > 1L && lavsamplestats@missing.flag) { # logl.ok <- FALSE # } } # lavsamplestats filled in? (not if no data, or samplestats = FALSE) if (length(lavsamplestats@ntotal) == 0L || (!is.null(lavoptions$samplestats) && !lavoptions$samplestats)) { logl.ok <- FALSE } if (logl.ok) { for (g in seq_len(ngroups)) { if (lavdata@nlevels > 1L) { # here, we assume only 2 levels, at [[1]] and [[2]] if (lavmodel@conditional.x) { Res.Sigma.W <- lavimplied$res.cov[[(g - 1) * 2 + 1]] Res.Int.W <- lavimplied$res.int[[(g - 1) * 2 + 1]] Res.Pi.W <- lavimplied$res.slopes[[(g - 1) * 2 + 1]] Res.Sigma.B <- lavimplied$res.cov[[(g - 1) * 2 + 2]] Res.Int.B <- lavimplied$res.int[[(g - 1) * 2 + 2]] Res.Pi.B <- lavimplied$res.slopes[[(g - 1) * 2 + 2]] } else { Sigma.W <- lavimplied$cov[[(g - 1) * 2 + 1]] Mu.W <- lavimplied$mean[[(g - 1) * 2 + 1]] Sigma.B <- lavimplied$cov[[(g - 1) * 2 + 2]] Mu.B <- lavimplied$mean[[(g - 1) * 2 + 2]] } if (lavsamplestats@missing.flag) { if (lavmodel@conditional.x) { # TODO logl.group[g] <- as.numeric(NA) } else { logl.group[g] <- lav_mvnorm_cluster_missing_loglik_samplestats_2l( Y1 = lavdata@X[[g]], Y2 = lavsamplestats@YLp[[g]][[2]]$Y2, Lp = lavdata@Lp[[g]], Mp = lavdata@Mp[[g]], Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, loglik.x = lavsamplestats@YLp[[g]][[2]]$loglik.x, log2pi = TRUE, minus.two = FALSE ) } } else { # complete case if (lavmodel@conditional.x) { logl.group[g] <- lav_mvreg_cluster_loglik_samplestats_2l( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Res.Sigma.W = Res.Sigma.W, Res.Int.W = Res.Int.W, Res.Pi.W = Res.Pi.W, Res.Sigma.B = Res.Sigma.B, Res.Int.B = Res.Int.B, Res.Pi.B = Res.Pi.B, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) } else { logl.group[g] <- lav_mvnorm_cluster_loglik_samplestats_2l( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], Mu.W = Mu.W, Sigma.W = Sigma.W, Mu.B = Mu.B, Sigma.B = Sigma.B, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE ) } } # complete # end multilevel } else if (lavsamplestats@missing.flag) { x.idx <- lavsamplestats@x.idx[[g]] X.MEAN <- X.COV <- NULL if (length(x.idx) > 0L) { X.MEAN <- lavsamplestats@missing.h1[[g]]$mu[x.idx] X.COV <- lavsamplestats@missing.h1[[g]]$sigma[x.idx, x.idx, drop = FALSE ] } logl.group[g] <- lav_mvnorm_missing_loglik_samplestats( Yp = lavsamplestats@missing[[g]], Mu = lavimplied$mean[[g]], Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]], x.mean = X.MEAN, # not needed? should be part of Sigma x.cov = X.COV ) # not needed at all! # x.mean = lavsamplestats@mean.x[[g]], # x.cov = lavsamplestats@cov.x[[g]]) } else { # single-level, complete data if (lavoptions$conditional.x) { logl.group[g] <- lav_mvreg_loglik_samplestats( sample.res.int = lavsamplestats@res.int[[g]], sample.res.slopes = lavsamplestats@res.slopes[[g]], sample.res.cov = lavsamplestats@res.cov[[g]], sample.mean.x = lavsamplestats@mean.x[[g]], sample.cov.x = lavsamplestats@cov.x[[g]], sample.nobs = lavsamplestats@nobs[[g]], res.int = lavimplied$res.int[[g]], res.slopes = lavimplied$res.slopes[[g]], res.cov = lavimplied$res.cov[[g]], Sinv.method = "eigen" ) } else { if (lavoptions$meanstructure) { Mu <- lavimplied$mean[[g]] } else { Mu <- lavsamplestats@mean[[g]] } logl.group[g] <- lav_mvnorm_loglik_samplestats( sample.mean = lavsamplestats@mean[[g]], sample.cov = lavsamplestats@cov[[g]], sample.nobs = lavsamplestats@nobs[[g]], Mu = Mu, Sigma = lavimplied$cov[[g]], x.idx = lavsamplestats@x.idx[[g]], x.mean = lavsamplestats@mean.x[[g]], x.cov = lavsamplestats@cov.x[[g]], Sinv.method = "eigen", Sigma.inv = NULL ) } } # complete } # g } # logl.ok is TRUE # logl logl <- sum(logl.group) # number of parameters, taking into account any equality constraints npar <- lavmodel@nx.free if (nrow(lavmodel@con.jac) > 0L) { ceq.idx <- attr(lavmodel@con.jac, "ceq.idx") if (length(ceq.idx) > 0L) { neq <- qr(lavmodel@con.jac[ceq.idx, , drop = FALSE])$rank npar <- npar - neq } } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { npar <- lavmodel@nx.free } # logl logl <- sum(logl.group) if (logl.ok) { # AIC AIC <- lav_fit_aic(logl = logl, npar = npar) # BIC BIC <- lav_fit_bic(logl = logl, npar = npar, N = lavsamplestats@ntotal) # BIC2 BIC2 <- lav_fit_sabic( logl = logl, npar = npar, N = lavsamplestats@ntotal ) } else { AIC <- BIC <- BIC2 <- as.numeric(NA) } out <- list( loglik = logl, loglik.group = logl.group, npar = npar, ntotal = lavsamplestats@ntotal, AIC = AIC, BIC = BIC, BIC2 = BIC2, estimator = lavoptions$estimator, conditional.x = lavoptions$conditional.x, fixed.x = lavoptions$fixed.x ) out } lavaan/R/lav_lavaanList_multipleImputation.R0000644000176200001440000000161214627656441021004 0ustar liggesusers# lavMultipleImputation: fit the *same* model, on a set of imputed datasets # YR - 11 July 2016 lavMultipleImputation <- function(model = NULL, dataList = NULL, ndat = length(dataList), cmd = "sem", ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL) { # dotdotdot dotdotdot <- list() # fit multiple times fit <- do.call("lavaanList", args = c(list( model = model, dataList = dataList, ndat = ndat, cmd = cmd, store.slots = store.slots, FUN = FUN, show.progress = show.progress, parallel = parallel, ncpus = ncpus, cl = cl ), dotdotdot)) # flag multiple imputation fit@meta$lavMultipleImputation <- TRUE fit } lavaan/R/xxx_sam.R0000644000176200001440000002272114627656441013552 0ustar liggesusers# SAM: a Structural After Measurement approach # # Yves Rosseel & Wen-Wei Loh, Feb-May 2019 # local vs global sam # local sam = alternative for FSR+Croon # - but no need to compute factor scores or corrections # gloal sam = (old) twostep # - but we can also take a 'local' perspective # restrictions: # # local and global: # - all (measured) latent variables must have indicators that are observed # local: # - only if LAMBDA is of full column rank (eg no SRM, no bi-factor, no MTMM) # - if multiple groups: each group has the same set of latent variables! # - global approach is used to compute corrected two-step standard errors # YR 12 May 2019 - first version # YR 22 May 2019 - merge sam/twostep (call it 'local' vs 'global' sam) # YR 27 June 2021 - prepare for `public' release # - add Fuller (1987) 'lambda' correction if (MSM - MTM) is not # positive definite # - se = "none" now works # - store 'local' information in @internal slot (for printing) # YR 16 Oct 2021 - if an indicator is also a predictor/outcome in the # structural part, treat it as an observed predictor # without measurement error in the second step # (ie, set THETA element to zero) # YR 03 Dec 2022 - allow for sam.method = "fsr" and se = "naive" # - add alpha.correction= argument (for small sample correction) # YR 21 May 2023 - allow for latent quadratic/interaction terms in the # structural part (assuming the errors are normal, for now) # YR 25 May 2023 - restructure code into multiple files # - rename veta.force.pd -> lambda.correction # - move alpha.correction= argument to local.options # twostep = wrapper for global sam twostep <- function(model = NULL, data = NULL, cmd = "sem", mm.list = NULL, mm.args = list(), struc.args = list(), ..., # global options output = "lavaan") { sam( model = model, data = data, cmd = cmd, mm.list = mm.list, mm.args = mm.args, struc.args = struc.args, sam.method = "global", # or global ..., # global options output = output ) } # fsr = wrapper for local sam # TODO sam <- function(model = NULL, data = NULL, cmd = "sem", se = "twostep", mm.list = NULL, mm.args = list(bounds = "wide.zerovar"), struc.args = list(estimator = "ML"), sam.method = "local", # or "global", or "fsr" ..., # common options local.options = list( M.method = "ML", # mapping matrix lambda.correction = TRUE, alpha.correction = 0L, # 0 -> (N-1) twolevel.method = "h1" ), # h1, anova, mean global.options = list(), # not used for now output = "lavaan") { # ------------- handling of warn/debug/verbose switches ---------- dotdotdot <- list(...) if (!is.null(dotdotdot$debug)) { current.debug <- lav_debug() if (lav_debug(dotdotdot$debug)) on.exit(lav_debug(current.debug), TRUE) dotdotdot$debug <- NULL if (lav_debug()) { dotdotdot$warn <- TRUE # force warnings if debug dotdotdot$verbose <- TRUE # force verbose if debug } } if (!is.null(dotdotdot$warn)) { current.warn <- lav_warn() if (lav_warn(dotdotdot$warn)) on.exit(lav_warn(current.warn), TRUE) dotdotdot$warn <- NULL } if (!is.null(dotdotdot$verbose)) { current.verbose <- lav_verbose() if (lav_verbose(dotdotdot$verbose)) on.exit(lav_verbose(current.verbose), TRUE) dotdotdot$verbose <- NULL } # output output <- tolower(output) if (output == "list" || output == "lavaan") { # nothing to do } else { lav_msg_stop(gettext("output should be \"list\" or \"lavaan.\"")) } # check se= argument if (!se %in% c("standard", "naive", "twostep", "twostep2", "bootstrap", "none")) { lav_msg_stop(gettext( "se= argument must be twostep, bootstrap, naive, standard or none.")) } if (!is.null(dotdotdot$conditional.x)) { lav_msg_warn(gettext( "sam() does not support conditional.x = TRUE (yet) -> switching to conditional.x = FALSE")) dotdotdot$conditional.x <- FALSE } ############################################### # STEP 0: process full model, without fitting # ############################################### FIT <- lav_sam_step0( cmd = cmd, model = model, data = data, se = se, sam.method = sam.method, dotdotdot = dotdotdot ) # check for data.type == "none" if (FIT@Data@data.type == "none") { lav_msg_stop(gettext("no data or sample statistics are provided.")) } lavoptions <- lavInspect(FIT, "options") if (lav_verbose()) { cat("This is sam using sam.method = ", sam.method, ".\n", sep = "") } ############################################## # STEP 1: fit each measurement model (block) # ############################################## if (lav_verbose()) { cat("Fitting the measurement part:\n") } STEP1 <- lav_sam_step1( cmd = cmd, mm.list = mm.list, mm.args = mm.args, FIT = FIT, data = data, sam.method = sam.method ) ################################################## # STEP 1b: compute Var(eta) and E(eta) per block # # only needed for local approach! # ################################################## if (sam.method %in% c("local", "fsr")) { # default local.options local.opt <- list( M.method = "ML", lambda.correction = TRUE, alpha.correction = 0L, twolevel.method = "h1" ) local.options <- modifyList(local.opt, local.options, keep.null = FALSE ) STEP1 <- lav_sam_step1_local( STEP1 = STEP1, FIT = FIT, sam.method = sam.method, local.options = local.options ) } #################################### # STEP 2: estimate structural part # #################################### STEP2 <- lav_sam_step2( STEP1 = STEP1, FIT = FIT, sam.method = sam.method, struc.args = struc.args ) # make sure step1.free.idx and step2.free.idx are disjoint both.idx <- which(STEP1$step1.free.idx %in% STEP2$step2.free.idx) if (length(both.idx) > 0L) { STEP1$step1.free.idx <- STEP1$step1.free.idx[-both.idx] # STEP1$Sigma.11[both.idx,] <- 0 # STEP1$Sigma.11[,both.idx] <- 0 STEP1$Sigma.11 <- STEP1$Sigma.11[-both.idx, -both.idx] } if (output == "list" && lavoptions$se == "none") { return(c(STEP1, STEP2)) } ################################################################ # Step 3: assemble results in a 'dummy' JOINT model for output # ################################################################ if (lav_verbose()) { cat("Assembling results for output ... ") } JOINT <- lav_sam_step3_joint( FIT = FIT, PT = STEP2$PT, sam.method = sam.method ) # fill information from FIT.PA JOINT@Options$optim.method <- STEP2$FIT.PA@Options$optim.method JOINT@Model@estimator <- FIT@Options$estimator # could be DWLS! if (sam.method %in% c("local", "fsr")) { JOINT@optim <- STEP2$FIT.PA@optim JOINT@test <- STEP2$FIT.PA@test } # fill in vcov/se information from step 1 if (lavoptions$se != "none") { JOINT@Options$se <- lavoptions$se # naive/twostep/none if (JOINT@Model@ceq.simple.only) { VCOV.ALL <- matrix( 0, JOINT@Model@nx.unco, JOINT@Model@nx.unco ) } else { VCOV.ALL <- matrix( 0, JOINT@Model@nx.free, JOINT@Model@nx.free ) } VCOV.ALL[STEP1$step1.free.idx, STEP1$step1.free.idx] <- STEP1$Sigma.11 JOINT@vcov <- list( se = lavoptions$se, information = lavoptions$information[1], vcov = VCOV.ALL ) # no need to fill @ParTable$se, as step1 SE values should already # be in place } if (lav_verbose()) { cat("done.\n") } ############################################## # Step 4: compute standard errors for step 2 # ############################################## if (lavoptions$se == "bootstrap") { # # TODO # #VCOV <- lav_sam_boot_se(FIT = FIT, JOINT = JOINT, STEP1 = STEP1, # STEP2 = STEP2, local.options = local.options #) # } else { VCOV <- lav_sam_step2_se( FIT = FIT, JOINT = JOINT, STEP1 = STEP1, STEP2 = STEP2, local.options = local.options ) # fill in twostep standard errors if (lavoptions$se != "none") { PT <- JOINT@ParTable JOINT@Options$se <- lavoptions$se JOINT@vcov$se <- lavoptions$se JOINT@vcov$vcov[STEP2$step2.free.idx, STEP2$step2.free.idx] <- VCOV$VCOV PT$se <- lav_model_vcov_se( lavmodel = JOINT@Model, lavpartable = PT, VCOV = JOINT@vcov$vcov ) JOINT@ParTable <- PT } } ################## # Step 5: Output # ################## # assemble pieces to assemble final lavaan object if (output == "lavaan") { if (lav_verbose()) { cat("Assembling results for output ... ") } SAM <- lav_sam_table( JOINT = JOINT, STEP1 = STEP1, FIT.PA = STEP2$FIT.PA, mm.args = mm.args, struc.args = struc.args, sam.method = sam.method, local.options = local.options, global.options = global.options ) res <- JOINT res@internal <- SAM if (lav_verbose()) { cat("done.\n") } } else { res <- c(STEP1, STEP2, VCOV) } if (lav_verbose()) { cat("End of sam.\n") } res } lavaan/R/lav_print.R0000644000176200001440000013460514627656441014066 0ustar liggesusers## NOTE: ## round(1.2355, 3) = 1.236 ## but ## round(1.2345, 3) = 1.234 ## ## perhaps we should add 0.0005 or something to avoid this? print.lavaan.data.frame <- function(x, ..., nd = 3L) { ROW.NAMES <- rownames(x) y <- as.data.frame(lapply(x, function(x) { if (is.numeric(x)) round(x, nd) else x })) rownames(y) <- ROW.NAMES if (!is.null(attr(x, "header"))) { cat("\n", attr(x, "header"), "\n\n", sep = "") } print(y, ...) if (!is.null(attr(x, "footer"))) { cat("\n", attr(x, "footer"), "\n\n", sep = "") } invisible(x) } print.lavaan.list <- function(x, ...) { y <- unclass(x) attr(y, "header") <- NULL header <- attr(x, "header") if (!is.null(header)) { if (is.character(header)) { cat("\n", header, "\n\n", sep = "") } else { print(header) cat("\n") } } print(y, ...) invisible(x) } # prints only lower triangle of a symmetric matrix print.lavaan.matrix.symmetric <- function(x, ..., nd = 3L, shift = 0L, diag.na.dot = TRUE) { # print only lower triangle of a symmetric matrix # this function was inspired by the `print.correlation' function # in package nlme x <- as.matrix(x) # just in case y <- x y <- unclass(y) attributes(y)[c("header", "footer")] <- NULL ll <- lower.tri(x, diag = TRUE) y[ll] <- format(round(x[ll], digits = nd)) y[!ll] <- "" if (diag.na.dot) { # print a "." instead of NA on the main diagonal (eg summary.efaList) diag.idx <- lav_matrix_diag_idx(ncol(x)) tmp <- x[diag.idx] if (all(is.na(tmp))) { y[diag.idx] <- paste(strrep(" ", nd + 2L), ".", sep = "") } } if (!is.null(colnames(x))) { colnames(y) <- abbreviate(colnames(x), minlength = nd + 3L) } if (shift > 0L) { empty.string <- rep(strrep(x = " ", times = shift), times = nrow(x)) if (!is.null(rownames(x))) { rownames(y) <- paste(empty.string, rownames(x), sep = "") } else { rownames(y) <- empty.string } } if (!is.null(attr(x, "header"))) { cat("\n", attr(x, "header"), "\n\n", sep = "") } print(y, ..., quote = FALSE, right = TRUE) if (!is.null(attr(x, "footer"))) { cat("\n", attr(x, "footer"), "\n\n", sep = "") } invisible(x) } print.lavaan.matrix <- function(x, ..., nd = 3L, shift = 0L) { x <- as.matrix(x) # just in case y <- unclass(x) attributes(y)[c("header", "footer")] <- NULL if (!is.null(colnames(x))) { colnames(y) <- abbreviate(colnames(x), minlength = nd + 3L) } if (shift > 0L) { empty.string <- rep(strrep(x = " ", times = shift), times = nrow(x)) if (!is.null(rownames(x))) { rownames(y) <- paste(empty.string, rownames(x), sep = "") } else { rownames(y) <- empty.string } } if (!is.null(attr(x, "header"))) { cat("\n", attr(x, "header"), "\n\n", sep = "") } print(round(y, nd), right = TRUE, ...) if (!is.null(attr(x, "footer"))) { cat("\n", attr(x, "footer"), "\n\n", sep = "") } invisible(x) } print.lavaan.vector <- function(x, ..., nd = 3L, shift = 0L) { y <- unclass(x) attributes(y)[c("header", "footer")] <- NULL # if(!is.null(names(x))) { # names(y) <- abbreviate(names(x), minlength = nd + 3) # } if (!is.null(attr(x, "header"))) { cat("\n", attr(x, "header"), "\n\n", sep = "") } if (shift > 0L) { empty.string <- strrep(x = " ", times = shift) tmp <- format(y, digits = nd, width = 2L + nd) tmp[1] <- paste(empty.string, tmp[1], sep = "") print(tmp, quote = FALSE, ...) } else { print(round(y, nd), right = TRUE, ...) } if (!is.null(attr(x, "footer"))) { cat("\n", attr(x, "footer"), "\n\n", sep = "") } invisible(x) } print.lavaan.character <- function(x, ...) { cat(x) invisible(x) } print.lavaan.parameterEstimates <- function(x, ..., nd = 3L) { # format for numeric values num.format <- paste("%", max(8L, nd + 5L), ".", nd, "f", sep = "") int.format <- paste("%", max(8L, nd + 5L), "d", sep = "") char.format <- paste("%", max(8L, nd + 5L), "s", sep = "") # output sections GSECTIONS <- c( "Latent Variables", "Composites", "Regressions", "Covariances", "Intercepts", "Thresholds", "Variances", "Scales y*", "Group Weight", "R-Square" ) ASECTIONS <- c( "Defined Parameters", "Constraints" ) # header? header <- attr(x, "header") if (is.null(header)) { header <- FALSE } if (header) { cat("\nParameter Estimates:\n\n") # info about parameterization (if categorical only) categorical.flag <- attr(x, "categorical") if (categorical.flag) { # container c1 <- c2 <- character(0L) # which parameterization? c1 <- c(c1, "Parameterization") tmp.txt <- attr(x, "parameterization") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) # format c1/c2 c1 <- format(c1, width = 38L) c2 <- format(c2, width = 13L + max(0, (nd - 3L)) * 4L, justify = "right" ) # create character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # info about standard errors (if we have x$se only) # 1. information # 2. se # 3. bootstrap requested/successful draws if (!is.null(x$se)) { # container c1 <- c2 <- character(0L) # which type of standard errors? c1 <- c(c1, "Standard errors") if (attr(x, "se") == "robust.huber.white") { tmp.txt <- "sandwich" # since 0.6-6 } else { tmp.txt <- attr(x, "se") } c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) # information if (attr(x, "se") != "bootstrap") { # type for information if (attr(x, "se") == "robust.huber.white") { c1 <- c(c1, "Information bread") } else { c1 <- c(c1, "Information") } tmp.txt <- attr(x, "information") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) # if observed, which type? (hessian of h1) if (attr(x, "information") == "observed") { c1 <- c(c1, "Observed information based on") tmp.txt <- attr(x, "observed.information") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } # if h1 is involved, structured or unstructured? if (attr(x, "information") %in% c("expected", "first.order") || attr(x, "observed.information") == "h1") { if (attr(x, "se") == "robust.huber.white" && attr(x, "h1.information") != attr(x, "h1.information.meat")) { c1 <- c(c1, "Information bread saturated (h1) model") } else { c1 <- c(c1, "Information saturated (h1) model") } tmp.txt <- attr(x, "h1.information") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } # if sandwich, which information for the meat? (first.order) # only print if it is NOT first.order if (attr(x, "se") == "robust.huber.white" && attr(x, "information.meat") != "first.order") { c1 <- c(c1, "Information meat") tmp.txt <- attr(x, "information.meat") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } # if sandwich, structured or unstructured for the meat? # only print if it is not the same as h1.information if (attr(x, "se") == "robust.huber.white" && attr(x, "h1.information.meat") != attr(x, "h1.information")) { c1 <- c(c1, "Information meat saturated (h1) model") tmp.txt <- attr(x, "h1.information.meat") c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } } # no bootstrap #TDJ: Pooling options for lavaan.mi-class objects (which NEVER bootstrap) if (isTRUE(attr(x, "pooled"))) { ## add an empty element for a space before pooling section c1 <- c(c1, "", "Pooled across imputations") c2 <- c(c2, "", "Rubin's (1987) rules") } if (!is.null(attr(x, "scale.W"))) { c1 <- c(c1, "Augment within-imputation variance") if (attr(x, "scale.W")) { c2 <- c(c2, "Scale by average RIV") } else { c2 <- c(c2, "Add between component") } } if (!is.null(attr(x, "asymptotic"))) { c1 <- c(c1, "Wald test for pooled parameters") if (attr(x, "asymptotic")) { c2 <- c(c2, "Normal (z) distribution") } else { c2 <- c(c2, "t(df) distribution") } } # 4. if (attr(x, "se") == "bootstrap" && !is.null(attr(x, "bootstrap"))) { c1 <- c(c1, "Number of requested bootstrap draws") c2 <- c(c2, attr(x, "bootstrap")) c1 <- c(c1, "Number of successful bootstrap draws") c2 <- c(c2, attr(x, "bootstrap.successful")) } # format c1/c2 c1 <- format(c1, width = 38L) c2 <- format(c2, width = 13L + max(0, (nd - 3L)) * 4L, justify = "right" ) # create character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) #TDJ: Message for lavaan.mi-class objects when df > 1000 for t test if (isTRUE(attr(x, "infDF"))) { cat(c("\n Pooled t statistics with df >= 1000 are displayed with", "\n df = Inf(inity) to save space. Although the t distribution", "\n with large df closely approximates a standard normal", "\n distribution, exact df for reporting these t tests can be", "\n obtained from parameterEstimates.mi() \n\n"), sep = "") } } } # number of groups if (is.null(x$group)) { ngroups <- 1L x$group <- rep(1L, length(x$lhs)) } else { ngroups <- lav_partable_ngroups(x) } # number of levels if (is.null(x$level)) { nlevels <- 1L x$level <- rep(1L, length(x$lhs)) } else { nlevels <- lav_partable_nlevels(x) } # block column if (is.null(x$block)) { x$block <- rep(1L, length(x$lhs)) } # step column (SAM) # if(!is.null(x$step)) { # tmp.LABEL <- rep("", length(x$lhs)) # p1.idx <- which(x$step == 1L) # p2.idx <- which(x$step == 2L) # tmp.LABEL[p1.idx] <- "1" # tmp.LABEL[p2.idx] <- "2" # # if(is.null(x$label)) { # x$label <- tmp.LABEL # } else { # x$label <- paste(x$label, tmp.LABEL, sep = "") # } # # x$step <- NULL # } # round to 3 digits after the decimal point y <- as.data.frame( lapply(x, function(x) { if (is.integer(x)) { sprintf(int.format, x) } else if (is.numeric(x)) { sprintf(num.format, x) } else { x } }), stringsAsFactors = FALSE ) # always remove /block/level/group/op/rhs/label/exo columns y$op <- y$group <- y$rhs <- y$label <- y$exo <- NULL y$block <- y$level <- NULL y$efa <- NULL # if standardized, remove std.nox column (space reasons only) # unless, std.all is already removed if (!is.null(y$std.all)) { y$std.nox <- NULL } # convert to character matrix m <- as.matrix(format.data.frame(y, na.encode = FALSE, justify = "right" )) # use empty row names rownames(m) <- rep("", nrow(m)) # handle se == 0.0 if (!is.null(x$se)) { se.idx <- which(x$se == 0) if (length(se.idx) > 0L) { m[se.idx, "se"] <- "" if (!is.null(x$z)) { m[se.idx, "z"] <- "" } if (!is.null(x$pvalue)) { m[se.idx, "pvalue"] <- "" } ## for lavaan.mi-class objects (semTools) if (!is.null(x$t)) { m[se.idx, "t"] <- "" } if (!is.null(x$df)) { m[se.idx, "df"] <- "" } } # handle se == NA se.idx <- which(is.na(x$se)) if (length(se.idx) > 0L) { if (!is.null(x$z)) { m[se.idx, "z"] <- "" } if (!is.null(x$pvalue)) { m[se.idx, "pvalue"] <- "" } ## for lavaan.mi-class objects (semTools) if (!is.null(x$t)) { m[se.idx, "t"] <- "" } if (!is.null(x$df)) { m[se.idx, "df"] <- "" } } } # handle lower/upper boundary points if (!is.null(x$lower)) { b.idx <- which(abs(x$lower - x$est) < sqrt(.Machine$double.eps) & (is.na(x$se) | (is.finite(x$se) & x$se != 0.0))) if (length(b.idx) > 0L && !is.null(x$pvalue)) { m[b.idx, "pvalue"] <- "" if (is.null(x$label)) { x$label <- rep("", length(x$lhs)) } x$label[b.idx] <- ifelse(nchar(x$label[b.idx]) > 0L, paste(x$label[b.idx], "+lb", sep = ""), "lb" ) } # remove lower column m <- m[, colnames(m) != "lower"] } if (!is.null(x$upper)) { b.idx <- which(abs(x$upper - x$est) < sqrt(.Machine$double.eps) & is.finite(x$se) & x$se != 0.0) if (length(b.idx) > 0L && !is.null(x$pvalue)) { m[b.idx, "pvalue"] <- "" if (is.null(x$label)) { x$label <- rep("", length(x$lhs)) } x$label[b.idx] <- ifelse(nchar(x$label[b.idx]) > 0L, paste(x$label[b.idx], "+ub", sep = ""), "ub" ) } # remove upper column m <- m[, colnames(m) != "upper"] } # handle fmi if (!is.null(x$fmi)) { se.idx <- which(x$se == 0) if (length(se.idx) > 0L) { m[se.idx, "fmi"] <- "" ## for lavaan.mi-class objects (semTools) if (!is.null(x$riv)) m[se.idx, "riv"] <- "" } not.idx <- which(x$op %in% c(":=", "<", ">", "==")) if (length(not.idx) > 0L) { if (!is.null(x$fmi)) { m[not.idx, "fmi"] <- "" ## for lavaan.mi-class objects (semTools) if (!is.null(x$riv)) m[not.idx, "riv"] <- "" } } } # for blavaan, handle Post.SD and PSRF if (!is.null(x$Post.SD)) { se.idx <- which(x$Post.SD == 0) if (length(se.idx) > 0L) { m[se.idx, "Post.SD"] <- "" if (!is.null(x$psrf)) { m[se.idx, "psrf"] <- "" } if (!is.null(x$PSRF)) { m[se.idx, "PSRF"] <- "" } } # handle psrf for defined parameters not.idx <- which(x$op %in% c(":=", "<", ">", "==")) if (length(not.idx) > 0L) { if (!is.null(x$psrf)) { m[not.idx, "psrf"] <- "" } if (!is.null(x$PSRF)) { m[not.idx, "PSRF"] <- "" } } } # rename some column names colnames(m)[colnames(m) == "lhs"] <- "" colnames(m)[colnames(m) == "op"] <- "" colnames(m)[colnames(m) == "rhs"] <- "" colnames(m)[colnames(m) == "step"] <- "Step" colnames(m)[colnames(m) == "est"] <- "Estimate" colnames(m)[colnames(m) == "se"] <- "Std.Err" colnames(m)[colnames(m) == "z"] <- "z-value" colnames(m)[colnames(m) == "pvalue"] <- "P(>|z|)" colnames(m)[colnames(m) == "std.lv"] <- "Std.lv" colnames(m)[colnames(m) == "std.all"] <- "Std.all" colnames(m)[colnames(m) == "std.nox"] <- "Std.nox" colnames(m)[colnames(m) == "prior"] <- "Prior" colnames(m)[colnames(m) == "fmi"] <- "FMI" ## for lavaan.mi-class objects (semTools) if ("t" %in% colnames(m)) { colnames(m)[colnames(m) == "t"] <- "t-value" colnames(m)[colnames(m) == "P(>|z|)"] <- "P(>|t|)" colnames(m)[colnames(m) == "riv"] <- "RIV" } # format column names colnames(m) <- sprintf(char.format, colnames(m)) # exceptions for blavaan: Post.Mean (width = 9), Prior (width = 14) if (!is.null(x$Post.Mean)) { tmp <- gsub("[ \t]+", "", colnames(m), perl = TRUE) # reformat "Post.Mean" column col.idx <- which(tmp == "Post.Mean") if (length(col.idx) > 0L) { tmp.format <- paste("%", max(9, nd + 5), "s", sep = "") colnames(m)[col.idx] <- sprintf(tmp.format, colnames(m)[col.idx]) m[, col.idx] <- sprintf(tmp.format, m[, col.idx]) } # reformat "Prior" column col.idx <- which(tmp == "Prior") if (length(col.idx) > 0L) { MAX <- max(nchar(m[, col.idx])) + 1L tmp.format <- paste("%", max(MAX, nd + 5), "s", sep = "") colnames(m)[col.idx] <- sprintf(tmp.format, colnames(m)[col.idx]) m[, col.idx] <- sprintf(tmp.format, m[, col.idx]) } } b <- 0L # group-specific sections for (g in 1:ngroups) { # group header if (ngroups > 1L) { group.label <- attr(x, "group.label") cat("\n\n") cat("Group ", g, " [", group.label[g], "]:\n", sep = "") } for (l in 1:nlevels) { # block number b <- b + 1L # ov/lv names ov.names <- lavNames(x, "ov", block = b) lv.names <- lavNames(x, "lv", block = b) # level header if (nlevels > 1L) { level.label <- attr(x, "level.label") cat("\n\n") cat("Level ", l, " [", level.label[l], "]:\n", sep = "") } # group-specific sections for (s in GSECTIONS) { if (s == "Latent Variables") { row.idx <- which(x$op == "=~" & !x$lhs %in% ov.names & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "Composites") { row.idx <- which(x$op == "<~" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "Regressions") { row.idx <- which(x$op == "~" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "Covariances") { row.idx <- which(x$op == "~~" & x$lhs != x$rhs & !x$exo & x$block == b) if (length(row.idx) == 0L) next # make distinction between residual and plain y.names <- unique(c( lavNames(x, "eqs.y"), lavNames(x, "ov.ind"), lavNames(x, "lv.ind") )) PREFIX <- rep("", length(row.idx)) PREFIX[x$rhs[row.idx] %in% y.names] <- " ." m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], PREFIX = PREFIX ) # m[row.idx,1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "Intercepts") { row.idx <- which(x$op == "~1" & !x$exo & x$block == b) if (length(row.idx) == 0L) next # make distinction between intercepts and means y.names <- unique(c( lavNames(x, "eqs.y"), lavNames(x, "ov.ind"), lavNames(x, "lv.ind") )) PREFIX <- rep("", length(row.idx)) PREFIX[x$lhs[row.idx] %in% y.names] <- " ." m[row.idx, 1] <- .makeNames(x$lhs[row.idx], x$label[row.idx], PREFIX = PREFIX ) # m[row.idx,1] <- .makeNames(x$lhs[row.idx], x$label[row.idx]) } else if (s == "Thresholds") { row.idx <- which(x$op == "|" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(paste(x$lhs[row.idx], "|", x$rhs[row.idx], sep = "" ), x$label[row.idx]) } else if (s == "Variances") { row.idx <- which(x$op == "~~" & x$lhs == x$rhs & !x$exo & x$block == b) if (length(row.idx) == 0L) next # make distinction between residual and plain y.names <- unique(c( lavNames(x, "eqs.y"), lavNames(x, "ov.ind"), lavNames(x, "lv.ind") )) PREFIX <- rep("", length(row.idx)) PREFIX[x$rhs[row.idx] %in% y.names] <- " ." m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx], PREFIX = PREFIX ) } else if (s == "Scales y*") { row.idx <- which(x$op == "~*~" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "Group Weight") { row.idx <- which(x$lhs == "group" & x$op == "%" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else if (s == "R-Square") { row.idx <- which(x$op == "r2" & x$block == b) if (length(row.idx) == 0L) next m[row.idx, 1] <- .makeNames(x$rhs[row.idx], x$label[row.idx]) } else { row.idx <- integer(0L) } # do we need special formatting for this section? # three types: # - regular (nothing to do, except row/colnames) # - R-square # - Latent Variables (and Composites), Regressions and Covariances # 'bundle' the output per lhs element # bundling if (s %in% c( "Latent Variables", "Composites", "Regressions", "Covariances" )) { nel <- length(row.idx) M <- matrix("", nrow = nel * 2, ncol = ncol(m)) colnames(M) <- colnames(m) rownames(M) <- rep("", NROW(M)) # colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) if (is.null(x$efa)) { LHS <- paste(x$lhs[row.idx], x$op[row.idx]) } else { LHS <- paste( x$lhs[row.idx], x$op[row.idx], x$efa[row.idx] ) } lhs.idx <- seq(1, nel * 2L, 2L) rhs.idx <- seq(1, nel * 2L, 2L) + 1L if (s == "Covariances") { # make distinction between residual and plain y.names <- unique(c( lavNames(x, "eqs.y"), lavNames(x, "ov.ind"), lavNames(x, "lv.ind") )) PREFIX <- rep("", length(row.idx)) PREFIX[x$lhs[row.idx] %in% y.names] <- "." } else { PREFIX <- rep("", length(LHS)) } M[lhs.idx, 1] <- sprintf("%1s%-15s", PREFIX, LHS) M[rhs.idx, ] <- m[row.idx, ] # avoid duplicated LHS labels if (nel > 1L) { del.idx <- integer(0) old.lhs <- "" for (i in 1:nel) { if (LHS[i] == old.lhs) { del.idx <- c(del.idx, lhs.idx[i]) } old.lhs <- LHS[i] } if (length(del.idx) > 0L) { M <- M[-del.idx, , drop = FALSE] } } cat("\n", s, ":\n", sep = "") # cat("\n") print(M, quote = FALSE) # R-square } else if (s == "R-Square") { M <- m[row.idx, 1:2, drop = FALSE] colnames(M) <- colnames(m)[1:2] rownames(M) <- rep("", NROW(M)) # colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) cat("\n", s, ":\n", sep = "") # cat("\n") print(M, quote = FALSE) # Regular } else { # M <- rbind(matrix("", nrow = 1L, ncol = ncol(m)), # m[row.idx,]) M <- m[row.idx, , drop = FALSE] colnames(M) <- colnames(m) rownames(M) <- rep("", NROW(M)) # colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) cat("\n", s, ":\n", sep = "") # cat("\n") print(M, quote = FALSE) } } # GSECTIONS } # levels } # groups # asections for (s in ASECTIONS) { if (s == "Defined Parameters") { row.idx <- which(x$op == ":=") m[row.idx, 1] <- .makeNames(x$lhs[row.idx], "") M <- m[row.idx, , drop = FALSE] colnames(M) <- colnames(m) } else if (s == "Constraints") { row.idx <- which(x$op %in% c("==", "<", ">")) if (length(row.idx) == 0) next m[row.idx, 1] <- .makeConNames(x$lhs[row.idx], x$op[row.idx], x$rhs[row.idx], nd = nd ) m[row.idx, 2] <- sprintf(num.format, abs(x$est[row.idx])) M <- m[row.idx, 1:2, drop = FALSE] colnames(M) <- c("", sprintf(char.format, "|Slack|")) } else { row.idx <- integer(0L) } if (length(row.idx) == 0L) { next } rownames(M) <- rep("", NROW(M)) # colnames(M)[1] <- sprintf("%-17s", paste(s, ":", sep = "")) # cat("\n") cat("\n", s, ":\n", sep = "") print(M, quote = FALSE) } cat("\n") invisible(m) } .makeNames <- function(NAMES, LABELS, PREFIX = NULL) { W <- 14 if (is.null(PREFIX)) { PREFIX <- rep("", length(NAMES)) } multiB <- FALSE if (any(nchar(NAMES) != nchar(NAMES, "bytes"))) { multiB <- TRUE } if (any(nchar(LABELS) != nchar(LABELS, "bytes"))) { multiB <- TRUE } # labels? l.idx <- which(nchar(LABELS) > 0L) if (length(l.idx) > 0L) { if (!multiB) { LABELS <- abbreviate(LABELS, 4) LABELS[l.idx] <- paste(" (", LABELS[l.idx], ")", sep = "") MAX.L <- max(nchar(LABELS)) NAMES <- abbreviate(NAMES, minlength = (W - MAX.L), strict = TRUE ) } else { # do not abbreviate anything (eg in multi-byte locales) MAX.L <- 4L } NAMES <- sprintf(paste("%-", (W - MAX.L), "s%", MAX.L, "s", sep = "" ), NAMES, LABELS) } else { if (!multiB) { NAMES <- abbreviate(NAMES, minlength = W, strict = TRUE) } else { NAMES <- sprintf(paste("%-", W, "s", sep = ""), NAMES) } } char.format <- paste("%3s%-", W, "s", sep = "") sprintf(char.format, PREFIX, NAMES) } .makeConNames <- function(lhs, op, rhs, nd) { nd <- max(nd, 3) W <- 41 + (nd - 3) * 3 nel <- length(lhs) if (length(nel) == 0L) { return(character(0)) } NAMES <- character(nel) for (i in 1:nel) { if (rhs[i] == "0" && op[i] == ">") { con.string <- paste(lhs[i], " - 0", sep = "") } else if (rhs[i] == "0" && op[i] == "<") { con.string <- paste(rhs[i], " - (", lhs[i], ")", sep = "") } else if (rhs[i] != "0" && op[i] == ">") { con.string <- paste(lhs[i], " - (", rhs[i], ")", sep = "") } else if (rhs[i] != "0" && op[i] == "<") { con.string <- paste(rhs[i], " - (", lhs[i], ")", sep = "") } else if (rhs[i] == "0" && op[i] == "==") { con.string <- paste(lhs[i], " - 0", sep = "") } else if (rhs[i] != "0" && op[i] == "==") { con.string <- paste(lhs[i], " - (", rhs[i], ")", sep = "") } con.string <- abbreviate(con.string, W, strict = TRUE) char.format <- paste(" %-", W, "s", sep = "") NAMES[i] <- sprintf(char.format, con.string) } NAMES } summary.lavaan.fsr <- function(object, ...) { dotdotdot <- list(...) if (!is.null(dotdotdot$nd)) { nd <- dotdotdot$nd } else { nd <- 3L } print.lavaan.fsr(x = object, nd = nd, mm = TRUE, struc = TRUE) } print.lavaan.fsr <- function(x, ..., nd = 3L, mm = FALSE, struc = FALSE) { y <- unclass(x) # print header if (!is.null(y$header)) { cat(y$header) cat("\n") } if (mm && !is.null(y$MM.FIT)) { cat("\n") nblocks <- length(y$MM.FIT) for (b in seq_len(nblocks)) { cat( "Measurement block for latent variable(s):", paste(lavNames(y$MM.FIT[[b]], "lv")), "\n" ) # fit measures? b.options <- lavInspect(y$MM.FIT[[b]], "options") if (!(length(b.options$test) == 1L && b.options$test == "none")) { cat("\n") print(fitMeasures(y$MM.FIT[[b]], c("chisq", "df", "pvalue", "cfi", "rmsea", "srmr"))) } # parameter estimates PE <- parameterEstimates(y$MM.FIT[[b]], ci = FALSE, output = "text", header = TRUE ) print.lavaan.parameterEstimates(PE, ..., nd = nd) cat("\n") } } # print PE if (struc) { cat("Structural Part\n") cat("\n") print(summary(y$STRUC.FIT, fit.measures = FALSE, estimates = FALSE, modindices = FALSE )) FIT <- fitMeasures(y$STRUC.FIT, fit.measures = "default") if (FIT["df"] > 0) { print.lavaan.fitMeasures(FIT, add.h0 = FALSE) } } PE <- parameterEstimates(y$STRUC.FIT, ci = FALSE, remove.eq = FALSE, remove.system.eq = TRUE, remove.ineq = FALSE, remove.def = FALSE, remove.nonfree = FALSE, remove.unused = TRUE, output = "text", header = TRUE ) print.lavaan.parameterEstimates(PE, ..., nd = nd) invisible(y) } # new in 0.6-12 print.lavaan.summary <- function(x, ..., nd = 3L) { y <- unclass(x) # change to ordinary list # get nd, if it is stored as an attribute ND <- attr(y, "nd") if (!is.null(ND) && is.numeric(ND)) { nd <- as.integer(ND) } # header if (!is.null(y$header)) { lavaan.version <- y$header$lavaan.version sam.approach <- y$header$sam.approach optim.method <- y$header$optim.method optim.iterations <- y$header$optim.iterations optim.converged <- y$header$optim.converged # sam or sem? if (sam.approach) { cat("This is ", sprintf("lavaan %s", lavaan.version), " -- using the SAM approach to SEM\n", sep = "" ) } else { cat(sprintf("lavaan %s ", lavaan.version)) # Convergence or not? if (optim.method == "none") { cat("-- DRY RUN with 0 iterations --\n") } else if (optim.iterations > 0) { if (optim.converged) { if (optim.iterations == 1L) { cat("ended normally after 1 iteration\n") } else { cat(sprintf( "ended normally after %i iterations\n", optim.iterations )) } } else { if (optim.iterations == 1L) { cat("did NOT end normally after 1 iteration\n") } else { cat(sprintf( "did NOT end normally after %i iterations\n", optim.iterations )) } cat("** WARNING ** Estimates below are most likely unreliable\n") } } else { cat("did not run (perhaps do.fit = FALSE)?\n") cat("** WARNING ** Estimates below are simply the starting values\n") } } } #TDJ: print header for lavaan.mi object (or nothing when NULL) cat(y$top_of_lavaanmi) # optim if (!is.null(y$optim)) { estimator <- y$optim$estimator estimator.args <- y$optim$estimator.args optim.method <- y$optim$optim.method npar <- y$optim$npar eq.constraints <- y$optim$eq.constraints nrow.ceq.jac <- y$optim$nrow.ceq.jac nrow.cin.jac <- y$optim$nrow.cin.jac nrow.con.jac <- y$optim$nrow.con.jac con.jac.rank <- y$optim$con.jac.rank cat("\n") # cat("Optimization information:\n\n") c1 <- c("Estimator") # second column tmp.est <- toupper(estimator) if (tmp.est == "DLS") { dls.first.letter <- substr( estimator.args$dls.GammaNT, 1L, 1L ) tmp.est <- paste("DLS-", toupper(dls.first.letter), sep = "") } c2 <- tmp.est # additional estimator args if (!is.null(estimator.args) && length(estimator.args) > 0L) { if (estimator == "DLS") { c1 <- c(c1, "Estimator DLS value for a") c2 <- c(c2, estimator.args$dls.a) } } # optimization method + npar c1 <- c(c1, "Optimization method", "Number of model parameters") c2 <- c(c2, toupper(optim.method), npar) # optional output if (eq.constraints) { c1 <- c(c1, "Number of equality constraints") c2 <- c(c2, nrow.ceq.jac) } if (nrow.cin.jac > 0L) { c1 <- c(c1, "Number of inequality constraints") c2 <- c(c2, nrow.cin.jac) } if (nrow.con.jac > 0L) { if (con.jac.rank == (nrow.ceq.jac + nrow.cin.jac)) { # nothing to do (don't print, as this is redundant information) } else { c1 <- c(c1, "Row rank of the constraints matrix") c2 <- c(c2, con.jac.rank) } } # format c1 <- format(c1, width = 40L) c2 <- format(c2, width = 11L + max(0, (nd - 3L)) * 4L, justify = "right" ) # character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # sam header if (!is.null(y$sam.header)) { cat("\n") sam.method <- y$sam.header$sam.method sam.local.options <- y$sam.header$sam.local.options sam.mm.list <- y$sam.header$sam.mm.list sam.mm.estimator <- y$sam.header$sam.mm.estimator sam.struc.estimator <- y$sam.header$sam.struc.estimator # sam method c1 <- c("SAM method") c2 <- toupper(sam.method) # options if (sam.method == "local") { c1 <- c(c1, "Mapping matrix M method") c2 <- c(c2, sam.local.options$M.method) # TODo: more! } # number of measurement blocks c1 <- c(c1, "Number of measurement blocks") c2 <- c(c2, length(sam.mm.list)) # estimator measurement blocks c1 <- c(c1, "Estimator measurement part") c2 <- c(c2, sam.mm.estimator) # estimator structural part c1 <- c(c1, "Estimator structural part") c2 <- c(c2, sam.struc.estimator) # format c1 <- format(c1, width = 40L) c2 <- format(c2, width = 11L + max(0, (nd - 3L)) * 4L, justify = "right" ) # character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # efa/rotation if (!is.null(y$rotation)) { cat("\n") rotation <- y$rotation rotation.args <- y$rotation.args # cat("Rotation information:\n\n") # container c1 <- c2 <- character(0L) # rotation method c1 <- c(c1, "Rotation method") if (rotation$rotation == "none") { MM <- toupper(rotation$rotation) } else if (rotation$rotation.args$orthogonal) { MM <- paste(toupper(rotation$rotation), " ", "ORTHOGONAL", sep = "" ) } else { MM <- paste(toupper(rotation$rotation), " ", "OBLIQUE", sep = "" ) } c2 <- c(c2, MM) if (rotation$rotation != "none") { # method options if (rotation$rotation == "geomin") { c1 <- c(c1, "Geomin epsilon") c2 <- c(c2, rotation$rotation.args$geomin.epsilon) } else if (rotation$rotation == "orthomax") { c1 <- c(c1, "Orthomax gamma") c2 <- c(c2, rotation$rotation.args$orthomax.gamma) } else if (rotation$rotation == "cf") { c1 <- c(c1, "Crawford-Ferguson gamma") c2 <- c(c2, rotation$rotation.args$cf.gamma) } else if (rotation$rotation == "oblimin") { c1 <- c(c1, "Oblimin gamma") c2 <- c(c2, rotation$rotation.args$oblimin.gamma) } else if (rotation$rotation == "promax") { c1 <- c(c1, "Promax kappa") c2 <- c(c2, rotation$rotation.args$promax.kappa) } # rotation algorithm c1 <- c(c1, "Rotation algorithm (rstarts)") tmp <- paste(toupper(rotation$rotation.args$algorithm), " (", rotation$rotation.args$rstarts, ")", sep = "" ) c2 <- c(c2, tmp) # Standardized metric (or not) c1 <- c(c1, "Standardized metric") if (rotation$rotation.args$std.ov) { c2 <- c(c2, "TRUE") } else { c2 <- c(c2, "FALSE") } # Row weights c1 <- c(c1, "Row weights") tmp.txt <- rotation$rotation.args$row.weights c2 <- c(c2, paste(toupper(substring(tmp.txt, 1, 1)), substring(tmp.txt, 2), sep = "" )) } # format c1/c2 c1 <- format(c1, width = 33L) c2 <- format(c2, width = 18L + max(0, (nd - 3L)) * 4L, justify = "right" ) # create character matrix M <- cbind(c1, c2, deparse.level = 0) colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # data object if (!is.null(y$data)) { cat("\n") lav_data_print_short(y$data, nd = nd) } # sam local stats: measurement blocks + structural part if (!is.null(y$sam)) { cat("\n") sam.method <- y$sam$sam.method sam.mm.table <- y$sam$sam.mm.table sam.mm.rel <- y$sam$sam.mm.rel sam.struc.fit <- y$sam$sam.struc.fit ngroups <- y$sam$ngroups nlevels <- y$sam$nlevels group.label <- y$sam$group.label level.label <- y$sam$level.label block.label <- y$sam$block.label # measurement tmp <- sam.mm.table if (sam.method == "global") { cat("Summary Information Measurement Part:\n\n") } else { cat("Summary Information Measurement + Structural:\n\n") } print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) if (sam.method == "local") { # reliability information c1 <- c2 <- character(0L) if (ngroups == 1L && nlevels == 1L) { cat("\n") cat(" Model-based reliability latent variables:\n\n") tmp <- data.frame(as.list(sam.mm.rel[[1]])) class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) } else if (ngroups > 1L && nlevels == 1L) { cat("\n") cat(" Model-based reliability latent variables (per group):\n") for (g in 1:ngroups) { cat("\n") cat(" Group ", g, " [", group.label[g], "]:\n\n", sep = "" ) tmp <- data.frame(as.list(sam.mm.rel[[g]])) class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) } } else if (ngroups == 1L && nlevels > 1L) { cat("\n") cat(" Model-based reliability latent variables (per level):\n") for (g in 1:nlevels) { cat("\n") cat(" Level ", g, " [", level.label[g], "]:\n\n", sep = "" ) tmp <- data.frame(as.list(sam.mm.rel[[g]])) class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) } } else if (ngroups > 1L && nlevels > 1L) { cat("\n") cat(" Model-based reliability latent variables (per group/level):\n") for (g in 1:length(block.label)) { cat("\n") cat(" Group/Level ", g, " [", block.label[g], "]:\n\n", sep = "" ) tmp <- data.frame(as.list(sam.mm.rel[[g]])) class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) } } cat("\n") cat(" Summary Information Structural part:\n\n") tmp <- data.frame(as.list(sam.struc.fit)) class(tmp) <- c("lavaan.data.frame", "data.frame") print(tmp, row.names = rep(" ", nrow(tmp)), nd = nd) } } # test statistics if (!is.null(y$test)) { cat("\n") lav_test_print(y$test, nd = nd) } # extra fit measures (if present) if (!is.null(y$fit)) { add.h0 <- FALSE if (!is.null(attr(y$fit, "add.h0"))) { add.h0 <- isTRUE(attr(y$fit, "add.h0")) } print.lavaan.fitMeasures(y$fit, nd = nd, add.h0 = add.h0) } # efa output if (!is.null(y$efa)) { # get cutoff, if it is stored as an attribute CT <- attr(y, "cutoff") if (!is.null(CT) && is.numeric(CT)) { cutoff <- CT } else { cutoff <- 0.3 } # get dot.cutoff, if it is stored as an attribute DC <- attr(y, "dot.cutoff") if (!is.null(DC) && is.numeric(DC)) { dot.cutoff <- DC } else { dot.cutoff <- 0.1 } # get alpha.level, if it is stored as an attribute AL <- attr(y, "alpha.level") if (!is.null(AL) && is.numeric(AL)) { alpha.level <- AL } else { alpha.level <- 0.01 } for (b in seq_len(y$efa$nblocks)) { if (length(y$efa$block.label) > 0L) { cat("\n\n") cat(y$efa$block.label[[b]], ":\n\n", sep = "") } if (!is.null(y$efa$lambda[[b]])) { cat("\n") if (!is.null(y$efa$lambda.se[[b]]) && alpha.level > 0) { cat("Standardized loadings: (* = significant at ", round(alpha.level * 100), "% level)\n\n", sep = "" ) } else { cat("Standardized loadings:\n\n") } LAMBDA <- unclass(y$efa$lambda[[b]]) THETA <- unname(unclass(y$efa$theta[[b]])) lav_print_loadings(LAMBDA, nd = nd, cutoff = cutoff, dot.cutoff = dot.cutoff, alpha.level = alpha.level, resvar = THETA, # diag elements only x.se = y$efa$lambda.se[[b]] ) } if (!is.null(y$efa$sumsq.table[[b]])) { cat("\n") print(y$efa$sumsq.table[[b]], nd = nd) } # factor correlations: if (!y$efa$orthogonal && !is.null(y$efa$psi[[b]]) && ncol(y$efa$psi[[b]]) > 1L) { cat("\n") if (!is.null(y$efa$psi.se[[b]]) && alpha.level > 0) { cat("Factor correlations: (* = significant at ", round(alpha.level * 100), "% level)\n\n", sep = "" ) } else { cat("Factor correlations:\n\n") } lav_print_psi(y$efa$psi[[b]], nd = nd, alpha.level = alpha.level, x.se = y$efa$psi.se[[b]] ) } # factor score determinacy (for regression scores only!) if (!is.null(y$efa$fs.determinacy[[b]])) { cat("\n") cat("Correlation regression factor scores and factors (determinacy):\n\n") print(y$efa$fs.determinacy[[b]], nd = nd) cat("\n") cat("R2 regression factor scores (= squared correlations):\n\n") tmp <- y$efa$fs.determinacy[[b]] tmp2 <- tmp * tmp class(tmp2) <- c("lavaan.vector", "numeric") print(tmp2, nd = nd) } # lambda.structure if (!is.null(y$efa$lambda.structure[[b]])) { cat("\n") cat("Standardized structure (= LAMBDA %*% PSI):\n\n") print(y$efa$lambda.structure[[b]], nd = nd) } # standard errors lambda if (!is.null(y$efa$theta.se[[b]])) { # we check for theta.se # as lambda.se is needed for '*' cat("\n") cat("Standard errors standardized loadings:\n\n") print(y$efa$lambda.se[[b]], nd = nd) } # z-statistics lambda if (!is.null(y$efa$lambda.zstat[[b]])) { cat("\n") cat("Z-statistics standardized loadings:\n\n") print(y$efa$lambda.zstat[[b]], nd = nd) } # pvalues lambda if (!is.null(y$efa$lambda.pvalue[[b]])) { cat("\n") cat("P-values standardized loadings:\n\n") print(y$efa$lambda.pvalue[[b]], nd = nd) } # standard errors theta if (!is.null(y$efa$theta.se[[b]])) { cat("\n") cat("Standard errors unique variances:\n\n") print(y$efa$theta.se[[b]], nd = nd) } # z-statistics theta if (!is.null(y$efa$theta.zstat[[b]])) { cat("\n") cat("Z-statistics unique variances:\n\n") print(y$efa$theta.zstat[[b]], nd = nd) } # pvalues theta if (!is.null(y$efa$theta.pvalue[[b]])) { cat("\n") cat("P-values unique variances:\n\n") print(y$efa$theta.pvalue[[b]], nd = nd) } # standard errors psi if (!is.null(y$efa$theta.se[[b]])) { # we check for theta.se # as psi.se is needed for '*' cat("\n") cat("Standard errors factor correlations:\n\n") print(y$efa$psi.se[[b]], nd = nd) } # z-statistics psi if (!is.null(y$efa$psi.zstat[[b]])) { cat("\n") cat("Z-statistics factor correlations:\n\n") print(y$efa$psi.zstat[[b]], nd = nd) } # pvalues psi if (!is.null(y$efa$psi.pvalue[[b]])) { cat("\n") cat("P-values factor correlations:\n\n") print(y$efa$psi.pvalue[[b]], nd = nd) } } # blocks cat("\n") } # efa # parameter table if (!is.null(y$pe) && is.null(y$efa)) { PE <- y$pe class(PE) <- c( "lavaan.parameterEstimates", "lavaan.data.frame", "data.frame" ) print(PE, nd = nd) } # modification indices if (!is.null(y$mi)) { cat("Modification Indices:\n\n") MI <- y$mi rownames(MI) <- NULL print(MI, nd = nd) } invisible(y) } # helper function to print the loading matrix, masking small loadings lav_print_loadings <- function(x, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, resvar = NULL, x.se = NULL) { # unclass y <- unclass(x) # round, and create a character matriy y <- format(round(y, nd), width = 3L + nd, justify = "right") # right-align column names colnames(y) <- format(colnames(y), width = 3L + nd, justify = "right") # create dot/empty string dot.string <- format(".", width = 3L + nd, justify = "right") empty.string <- format(" ", width = 3L + nd) # print a 'dot' if dot.cutoff < |loading| < cutoff if (dot.cutoff < cutoff) { y[abs(x) < cutoff & abs(x) > dot.cutoff] <- dot.string } # print nothing if |loading| < dot.cutoff y[abs(x) < min(dot.cutoff, cutoff)] <- empty.string # add 'star' for significant loadings (if provided) using alpha = 0.01 if (!is.null(x.se) && !any(is.na(x.se))) { colNAMES <- colnames(y) rowNAMES <- rownames(y) x.se[x.se < sqrt(.Machine$double.eps)] <- 1 # to avoid NA zstat <- x / x.se z.cutoff <- qnorm(1 - (alpha.level / 2)) zstat.string <- ifelse(abs(zstat) > z.cutoff, "*", " ") y <- matrix(paste(y, zstat.string, sep = ""), nrow(y), ncol(y)) colnames(y) <- colNAMES rownames(y) <- rowNAMES } # add resvar if (!is.null(resvar)) { NAMES <- colnames(y) y <- cbind(y, format(round(cbind(resvar, 1 - resvar), nd), width = 12L + nd, justify = "right" )) resvar.names <- format(c("unique.var", "communalities"), width = 12L + nd, justify = "right" ) colnames(y) <- c(NAMES, resvar.names) } # print print(y, quote = FALSE) } # helper function to print the psi matrix, showing signif stars lav_print_psi <- function(x, nd = 3L, alpha.level = 0.01, x.se = NULL) { # unclass y <- unclass(x) # round, and create a character matriy y <- format(round(y, nd), width = 3L + nd, justify = "right") # right-align column names colnames(y) <- format(colnames(y), width = 3L + nd, justify = "right") # add 'star' for significant loadings (if provided) using alpha = 0.01 if (!is.null(x.se) && !any(is.na(x.se))) { colNAMES <- colnames(y) rowNAMES <- rownames(y) x.se[x.se < sqrt(.Machine$double.eps)] <- 1 # to avoid NA zstat <- x / x.se z.cutoff <- qnorm(1 - (alpha.level / 2)) zstat.string <- ifelse(abs(zstat) > z.cutoff, "*", " ") y <- matrix(paste(y, zstat.string, sep = ""), nrow(y), ncol(y)) colnames(y) <- colNAMES rownames(y) <- rowNAMES } # remove upper part ll <- upper.tri(x, diag = FALSE) y[ll] <- "" # print print(y, quote = FALSE) } lavaan/R/xxx_lavaan.R0000644000176200001440000003643114627656441014237 0ustar liggesusers# # # # # # # # # # # # # # # # # lavaan main function # # # # # # # # # # # # # # # # # # # main user-visible cfa/sem/growth functions # # initial version: YR 25/03/2009 # added lavoptions YR 02/08/2010 # major revision: YR 9/12/2010: - new workflow (since 0.4-5) # - merge cfa/sem/growth functions # YR 25/02/2012: changed data slot (from list() to S4); data@X contains data # YR 26 Jan 2017: use '...' to capture the never-ending list of options # YR 07 Feb 2023: add ov.order= argument # HJ 18 Oct 2023: extend PML to allow sampling weights # LDW 26 Feb 2024: split lavaan in smaller steps # lavaan <- function( # user specified model: can be syntax, parameter Table, ... model = NULL, # data (second argument, most used) data = NULL, # variable information ordered = NULL, # sampling weights sampling.weights = NULL, # summary data sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, # multiple groups? group = NULL, # multiple levels? cluster = NULL, # constraints constraints = "", # user-specified variance matrices WLS.V = NULL, # nolint NACOV = NULL, # nolint # internal order of ov.names ov.order = "model", # full slots from previous fits slotOptions = NULL, # nolint slotParTable = NULL, # nolint slotSampleStats = NULL, # nolint slotData = NULL, # nolint slotModel = NULL, # nolint slotCache = NULL, # nolint sloth1 = NULL, # options (dotdotdot) ...) { # start timer start.time0 <- proc.time()[3] timing <- list() timing$start.time <- start.time0 # ------------- adapt parameters ----------------- mc <- match.call(expand.dots = TRUE) temp <- lav_lavaan_step00_parameters( matchcall = mc, syscall = sys.call(), # to get main arguments without partial matching dotdotdot = list(...) ) lavmc <- temp$mc dotdotdot <- temp$dotdotdot cluster <- lavmc$cluster rm(mc) # ------------- handling of warn/debug/verbose switches ---------- if (!is.null(dotdotdot$debug)) { current.debug <- lav_debug() if (lav_debug(dotdotdot$debug)) on.exit(lav_debug(current.debug), TRUE) dotdotdot$debug <- NULL if (lav_debug()) { dotdotdot$warn <- TRUE # force warnings if debug dotdotdot$verbose <- TRUE # force verbose if debug } } if (!is.null(dotdotdot$warn)) { current.warn <- lav_warn() if (lav_warn(dotdotdot$warn)) on.exit(lav_warn(current.warn), TRUE) dotdotdot$warn <- NULL } if (!is.null(dotdotdot$verbose)) { current.verbose <- lav_verbose() if (lav_verbose(dotdotdot$verbose)) on.exit(lav_verbose(current.verbose), TRUE) dotdotdot$verbose <- NULL } # ------------ check data ------------------------ temp <- lav_lavaan_step00_checkdata( data = data, dotdotdot = dotdotdot, sample.cov = sample.cov, sample.nobs = sample.nobs, sample.mean = sample.mean, sample.th = sample.th, NACOV = NACOV, WLS.V = WLS.V, ov.order = ov.order ) data <- temp$data dotdotdot <- temp$dotdotdot sample.cov <- temp$sample.cov sample.nobs <- temp$sample.nobs sample.mean <- temp$sample.mean sample.th <- temp$sample.th NACOV <- temp$NACOV # nolint WLS.V <- temp$WLS.V # nolint ov.order <- temp$ov.order timing <- ldw_add_timing(timing, "init") # ------------ ov.names 1 ----- initial flat model -------------------- flat.model <- lav_lavaan_step01_ovnames_initflat( slotParTable = slotParTable, model = model, dotdotdot.parser = dotdotdot$parser ) # ------------ ov.names 2 ------ handle ov.order ----------------------- flat.model <- lav_lavaan_step01_ovnames_ovorder( flat.model = flat.model, ov.order = ov.order, data = data, sample.cov = sample.cov, slotData = slotData ) # ------------ ov.names 3 ------- group blocks ------------------ ngroups <- 1L # default value temp <- lav_lavaan_step01_ovnames_group( flat.model = flat.model, ngroups = ngroups ) flat.model <- temp$flat.model ov.names <- temp$ov.names ov.names.x <- temp$ov.names.x ov.names.y <- temp$ov.names.y lv.names <- temp$lv.names group.values <- temp$group.values ngroups <- temp$ngroups # ------------ ov.names 4 ------ sanity checks ------------------ lav_lavaan_step01_ovnames_checklv( lv.names = lv.names, data = data, sample.cov = sample.cov, dotdotdot = dotdotdot, slotOptions = slotOptions ) # ------------ ov.names 5 ------ handle ov.names.l -------------- temp <- lav_lavaan_step01_ovnames_namesl( data = data, cluster = cluster, flat.model = flat.model, group.values = group.values, ngroups = ngroups ) flat.model <- temp$flat.model ov.names.l <- temp$ov.names.l # ------------ ov.names 6 ------ sanity check ordered -------------- ordered <- lav_lavaan_step01_ovnames_ordered( ordered = ordered, flat.model = flat.model, data = data ) timing <- ldw_add_timing(timing, "ov.names") # ------------ lavoptions -------------------- lavoptions <- lav_lavaan_step02_options( slotOptions = slotOptions, slotData = slotData, flat.model = flat.model, ordered = ordered, sample.cov = sample.cov, sample.mean = sample.mean, sample.th = sample.th, sample.nobs = sample.nobs, ov.names.l = ov.names.l, sampling.weights = sampling.weights, constraints = constraints, group = group, ov.names.x = ov.names.x, ov.names.y = ov.names.y, dotdotdot = dotdotdot, cluster = cluster, data = data ) # fixed.x = FALSE? set ov.names.x = character(0L) # new in 0.6-1 if (!lavoptions$fixed.x) { ov.names.x <- character(0L) } timing <- ldw_add_timing(timing, "Options") # ------------ lavdata ------------------------ temp <- lav_lavaan_step03_data( slotData = slotData, lavoptions = lavoptions, ov.names = ov.names, ov.names.y = ov.names.y, group = group, data = data, cluster = cluster, ov.names.x = ov.names.x, ov.names.l = ov.names.l, ordered = ordered, sampling.weights = sampling.weights, sample.cov = sample.cov, sample.mean = sample.mean, sample.th = sample.th, sample.nobs = sample.nobs, slotParTable = slotParTable, ngroups = ngroups, dotdotdot = dotdotdot, flat.model = flat.model, model = model, # in case model is a lavaan object NACOV = NACOV, WLS.V = WLS.V ) lavdata <- temp$lavdata lavoptions <- temp$lavoptions timing <- ldw_add_timing(timing, "Data") # ------------ lavpartable ------------------- temp <- lav_lavaan_step04_partable( slotParTable = slotParTable, model = model, flat.model = flat.model, lavoptions = lavoptions, lavdata = lavdata, constraints = constraints ) lavoptions <- temp$lavoptions lavpartable <- temp$lavpartable timing <- ldw_add_timing(timing, "ParTable") # ------------ lavpta ------------------------ # lavpta <- lav_lavaan_step04_pta( # lavpartable = lavpartable, # lavoptions = lavoptions # ) # timing <- ldw_add_timing(timing, "lavpta") # ------------ lavsamplestats --------------- lavsamplestats <- lav_lavaan_step05_samplestats( slotSampleStats = slotSampleStats, lavdata = lavdata, lavoptions = lavoptions, WLS.V = WLS.V, NACOV = NACOV, sample.cov = sample.cov, sample.mean = sample.mean, sample.th = sample.th, sample.nobs = sample.nobs, ov.names = ov.names, ov.names.x = ov.names.x, lavpartable = lavpartable ) timing <- ldw_add_timing(timing, "SampleStats") # ------------ lavh1 ------------------------ lavh1 <- lav_lavaan_step06_h1( sloth1 = sloth1, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavdata = lavdata, lavpartable = lavpartable ) timing <- ldw_add_timing(timing, "h1") # ------------ bounds ------------------------ lavpartable <- lav_lavaan_step07_bounds( lavoptions = lavoptions, lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavpartable = lavpartable ) timing <- ldw_add_timing(timing, "bounds") # ------------ lavstart ---------------------- lavpartable <- lav_lavaan_step08_start( slotModel = slotModel, lavoptions = lavoptions, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavh1 = lavh1 ) timing <- ldw_add_timing(timing, "start") # ------------ model ------------------------- temp <- lav_lavaan_step09_model( slotModel = slotModel, lavoptions = lavoptions, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata ) lavpartable <- temp$lavpartable lavmodel <- temp$lavmodel timing <- ldw_add_timing(timing, "Model") # -------- lavcache ---------------------------------- lavcache <- lav_lavaan_step10_cache( slotCache = slotCache, lavdata = lavdata, lavmodel = lavmodel, lavpartable = lavpartable, lavoptions = lavoptions, sampling.weights = sampling.weights ) timing <- ldw_add_timing(timing, "cache") # -------- est + lavoptim ---------------------------- temp <- lav_lavaan_step11_estoptim( lavdata = lavdata, lavmodel = lavmodel, lavcache = lavcache, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavpartable = lavpartable ) lavoptim <- temp$lavoptim lavmodel <- temp$lavmodel lavpartable <- temp$lavpartable x <- temp$x timing <- ldw_add_timing(timing, "optim") # -------- lavimplied + lavloglik -------------------- lavimplied <- lav_lavaan_step12_implied( lavoptions = lavoptions, lavmodel = lavmodel ) timing <- ldw_add_timing(timing, "implied") lavloglik <- lav_lavaan_step12_loglik( lavoptions = lavoptions, lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavmodel = lavmodel ) timing <- ldw_add_timing(timing, "loglik") # ----------- lavvcov + lavboot ------------------- temp <- lav_lavaan_step13_vcov_boot( lavoptions = lavoptions, lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, x = x ) lavpartable <- temp$lavpartable lavvcov <- temp$lavvcov VCOV <- temp$VCOV # nolint lavmodel <- temp$lavmodel lavboot <- temp$lavboot timing <- ldw_add_timing(timing, "vcov") # ----------- lavtest ---------- lavtest <- lav_lavaan_step14_test( lavoptions = lavoptions, lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1, x = x, VCOV = VCOV, lavloglik = lavloglik ) timing <- ldw_add_timing(timing, "test") # ----------- lavfit ---------- lavfit <- lav_lavaan_step14_fit( lavpartable = lavpartable, lavmodel = lavmodel, lavimplied = lavimplied, x = x, VCOV = VCOV, lavtest = lavtest ) timing <- ldw_add_timing(timing, "Fit") # ----------- baseline ---------------------------- lavbaseline <- lav_lavaan_step15_baseline( lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavh1 = lavh1, lavpartable = lavpartable ) timing <- ldw_add_timing(timing, "baseline") # ----------- rotation --------------------------- temp <- lav_lavaan_step16_rotation( lavoptions = lavoptions, lavmodel = lavmodel, lavpartable = lavpartable, lavh1 = lavh1, lavdata = lavdata, x = x, lavvcov = lavvcov, VCOV = VCOV, lavcache = lavcache, lavimplied = lavimplied, lavsamplestats = lavsamplestats ) lavpartable <- temp$lavpartable lavmodel <- temp$lavmodel lavvcov <- temp$lavvcov timing <- ldw_add_timing(timing, "rotation") # ------ lavaan result ---------------- out <- lav_lavaan_step17_lavaan( lavmc = lavmc, timing = timing, lavoptions = lavoptions, lavpartable = lavpartable, lavdata = lavdata, lavsamplestats = lavsamplestats, lavmodel = lavmodel, lavcache = lavcache, lavfit = lavfit, lavboot = lavboot, lavoptim = lavoptim, lavimplied = lavimplied, lavloglik = lavloglik, lavvcov = lavvcov, lavtest = lavtest, lavh1 = lavh1, lavbaseline = lavbaseline, start.time0 = start.time0 ) out } # # # # # # # # cfa # # # # # # # # cfa <- function( model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, # nolint NACOV = NULL, # nolint ov.order = "model", ...) { sc <- sys.call() sc[["model.type"]] <- quote("cfa") # call mother function sc[[1L]] <- quote(lavaan::lavaan) eval(sc, parent.frame()) } # # # # # # # # sem # # # # # # # # sem <- function( model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, # nolint NACOV = NULL, # nolint ov.order = "model", ...) { sc <- sys.call() sc[["model.type"]] <- quote("sem") # call mother function sc[[1L]] <- quote(lavaan::lavaan) eval(sc, parent.frame()) } # # # # # # # # # # growth # # # # # # # # # # growth <- function( model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, # nolint NACOV = NULL, # nolint ov.order = "model", ...) { sc <- sys.call() sc[["model.type"]] <- quote("growth") # call mother function sc[[1L]] <- quote(lavaan::lavaan) eval(sc, parent.frame()) } # # # # # # # # # # # # # # # # # # # # # help function ldw_add_timing # # # # # # # # # # # # # # # # # # # # # ldw_add_timing <- function(timing, part) { # timing is a list with element start.time # this function adds an element with name as specified in parameter part # and the duration of the interval from start.time upto now # thereafter the element start.time is set to now (prepare for next call) # the adapted list is returned timenow <- proc.time()[3] timing[[part]] <- (timenow - timing$start.time) timing$start.time <- timenow timing } lavaan/R/lav_test_browne.R0000644000176200001440000002110014627656441015246 0ustar liggesusers# Browne's residual test statistic # see Browne (1984) eq 2.20a # T.B = (N-1) * t(RES) %*% Delta.c %*% # solve(t(Delta.c) %*% Gamma %*% Delta.c) %*% # t(Delta.c) %*% RES # # = (N-1) * t(RES) %*% (Gamma.inv - # Gamma.inv %*% Delta %*% # solve(t(Delta) %*% Gamma.inv %*% Delta) %*% # t(Delta) %*% Gamma.inv) %*% RES # Note: if Gamma == solve(Weight matrix), then: # t(Delta) %*% solve(Gamma) %*% RES == 0-vector! # # Therefore: # - if estimator = "WLS", X2 == Browne's residual ADF statistic # - if estimator = "GLS", X2 == Browne's residual NT statistic # # - if estimator = "NTRLS", X2 == Browne's residual NT statistic (model-based) # also known as the RLS test statistic # ... except in multigroup + equality constraints, where # t(Delta) %*% solve(Gamma) %*% RES not zero everywhere!? # YR 26 July 2022: add alternative slots, if lavobject = NULL # YR 22 Jan 2023: allow for model-based 'structured' Sigma # TODo: - allow for non-linear equality constraints # (see Browne, 1982, eq 1.7.19; although we may face singular matrices) lav_test_browne <- function(lavobject = NULL, # or lavdata = NULL, lavsamplestats = NULL, # WLS.obs, NACOV lavmodel = NULL, lavpartable = NULL, # DF lavoptions = NULL, lavh1 = NULL, lavimplied = NULL, # further options: n.minus.one = "default", ADF = TRUE, model.based = FALSE) { if (!is.null(lavobject)) { # check input if (!inherits(lavobject, "lavaan")) { lav_msg_stop(gettext("object is not a lavaan object.")) } # slots lavdata <- lavobject@Data lavsamplestats <- lavobject@SampleStats lavmodel <- lavobject@Model lavpartable <- lavobject@ParTable lavoptions <- lavobject@Options lavh1 <- lavobject@h1 lavimplied <- lavobject@implied } if (!ADF && lavmodel@categorical) { lav_msg_stop(gettext("normal theory version not available in the categorical setting.")) } if (lavdata@missing != "listwise" && !model.based) { lav_msg_stop(gettext("Browne's test is not available when data is missing")) } if (lavdata@nlevels > 1L) { lav_msg_stop(gettext("Browne's test is not available when data is multilevel.")) } if (length(lavmodel@ceq.nonlinear.idx) > 0L) { lav_msg_stop(gettext("Browne's test is not available (yet) when nonlinear equality constraints are involved.")) } if (!is.logical(n.minus.one)) { if (lavoptions$estimator == "ML" && lavoptions$likelihood == "normal") { n.minus.one <- FALSE } else { n.minus.one <- TRUE } } # ingredients Delta <- computeDelta(lavmodel) if (ADF) { # ADF version if (!is.null(lavsamplestats@NACOV[[1]])) { Gamma <- lavsamplestats@NACOV } else { if (!is.null(lavobject)) { if (lavobject@Data@data.type != "full") { lav_msg_stop(gettext("ADF version not available without full data or user-provided Gamma/NACOV matrix")) } Gamma <- lav_object_gamma(lavobject, ADF = TRUE, model.based = model.based ) } else { if (lavdata@data.type != "full") { lav_msg_stop(gettext("ADF version not available without full data or user-provided Gamma/NACOV matrix")) } Gamma <- lav_object_gamma( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = lavh1, lavimplied = lavimplied, ADF = TRUE, model.based = model.based ) } } } else { # NT version if (!is.null(lavobject)) { Gamma <- lav_object_gamma(lavobject, ADF = FALSE, model.based = model.based ) } else { Gamma <- lav_object_gamma( lavobject = NULL, lavdata = lavdata, lavoptions = lavoptions, lavsamplestats = lavsamplestats, lavh1 = lavh1, lavimplied = lavimplied, ADF = FALSE, model.based = model.based ) } } WLS.obs <- lavsamplestats@WLS.obs WLS.est <- lav_model_wls_est(lavmodel) nobs <- lavsamplestats@nobs ntotal <- lavsamplestats@ntotal # linear equality constraints? lineq.flag <- FALSE if (lavmodel@eq.constraints) { lineq.flag <- TRUE } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { lineq.flag <- TRUE } # compute T.B per group ngroups <- length(WLS.obs) stat.group <- numeric(ngroups) # 1. standard setting: no equality constraints if (!lineq.flag) { for (g in seq_len(ngroups)) { RES <- WLS.obs[[g]] - WLS.est[[g]] Delta.g <- Delta[[g]] Delta.c <- lav_matrix_orthogonal_complement(Delta.g) tDGD <- crossprod(Delta.c, Gamma[[g]]) %*% Delta.c # if fixed.x = TRUE, Gamma[[g]] may contain zero col/rows tDGD.inv <- lav_matrix_symmetric_inverse(tDGD) if (n.minus.one) { Ng <- nobs[[g]] - 1L } else { Ng <- nobs[[g]] } tResDelta.c <- crossprod(RES, Delta.c) stat.group[g] <- Ng * drop(tResDelta.c %*% tDGD.inv %*% t(tResDelta.c)) } STAT <- sum(stat.group) # 2. linear equality constraint } else if (lineq.flag) { RES.all <- do.call("c", WLS.obs) - do.call("c", WLS.est) Delta.all <- do.call("rbind", Delta) if (lavmodel@eq.constraints) { Delta.g <- Delta.all %*% lavmodel@eq.constraints.K } else if (.hasSlot(lavmodel, "ceq.simple.only") && lavmodel@ceq.simple.only) { Delta.g <- Delta.all %*% lavmodel@ceq.simple.K } Gamma.inv.weighted <- vector("list", ngroups) for (g in seq_len(ngroups)) { if (n.minus.one) { Ng <- nobs[[g]] - 1L } else { Ng <- nobs[[g]] } Gamma.inv.temp <- try(solve(Gamma[[g]]), silent = TRUE) if (inherits(Gamma.inv.temp, "try-error")) { # TDJ: This will happen whenever an (otherwise) unrestricted # covariance matrix has a structure to it, such as equal # variances (and certain covariances) for 2 members of an # indistinguishable dyad (represented as 2 columns). In # such cases, their (N)ACOV elements are also identical. Gamma.inv.temp <- MASS::ginv(Gamma[[g]]) } Gamma.inv.weighted[[g]] <- Gamma.inv.temp * Ng / ntotal } GI <- lav_matrix_bdiag(Gamma.inv.weighted) tDGiD <- t(Delta.g) %*% GI %*% Delta.g tDGiD.inv <- MASS::ginv(tDGiD) # GI may be rank-deficient q1 <- drop(t(RES.all) %*% GI %*% RES.all) q2 <- drop(t(RES.all) %*% GI %*% Delta.g %*% tDGiD.inv %*% t(Delta.g) %*% GI %*% RES.all) STAT <- ntotal * (q1 - q2) stat.group <- STAT * unlist(nobs) / ntotal # proxy only # 3. nonlinear equality constraints } else { # TODO } # DF if (!is.null(lavobject)) { DF <- lavobject@test[[1]]$df } else { # same approach as in lav_test.R df <- lav_partable_df(lavpartable) if (nrow(lavmodel@con.jac) > 0L) { ceq.idx <- attr(lavmodel@con.jac, "ceq.idx") if (length(ceq.idx) > 0L) { neq <- qr(lavmodel@con.jac[ceq.idx, , drop = FALSE])$rank df <- df + neq } } else if (lavmodel@ceq.simple.only) { # needed?? ndat <- lav_partable_ndat(lavpartable) npar <- max(lavpartable$free) df <- ndat - npar } DF <- df } if (ADF) { if (model.based) { # using model-based Gamma NAME <- "browne.residual.adf.model" LABEL <- "Browne's residual (ADF model-based) test" } else { # regular one NAME <- "browne.residual.adf" LABEL <- "Browne's residual-based (ADF) test" } } else { if (model.based) { # using model-implied Sigma (instead of S) # also called the 'reweighted least-squares (RLS)' version NAME <- "browne.residual.nt.model" LABEL <- "Browne's residual (NT model-based) test" } else { # regular one NAME <- "browne.residual.nt" LABEL <- "Browne's residual-based (NT) test" } } out <- list( test = NAME, stat = STAT, stat.group = stat.group, df = DF, refdistr = "chisq", pvalue = 1 - pchisq(STAT, DF), label = LABEL ) out } lavaan/R/ctr_pml_plrt_nested.R0000644000176200001440000005137614627656441016136 0ustar liggesusers# All code below is written by Myrsini Katsikatsou (Feb 2015) # The following function refers to PLRT for nested models and equality constraints. # Namely, it is developed to test either of the following hypotheses: # a) H0 states that some parameters are equal to 0 # b) H0 states that some parameters are equal to some others. # Note that for the latter I haven't checked if it is ok when equality constraints # are imposed on parameters that refer to different groups in a multi-group # analysis. All the code below has been developed for a single-group analysis. # Let fit_objH0 and fit_objH1 be the outputs of lavaan() function when we fit # a model under the null hypothesis and under the alternative, respectively. # The argument equalConstr is logical (T/F) and it is TRUE if equality constraints # are imposed on subsets of the parameters. # The main idea of the code below is that we consider the parameter vector # under the alternative H1 evaluated at the values derived under H0 and for these # values we should evaluate the Hessian, the variability matrix (denoted by J) # and Godambe matrix. ctr_pml_plrt_nested <- function(fit_objH0, fit_objH1) { # sanity check, perhaps we misordered H0 and H1 in the function call?? if (fit_objH1@test[[1]]$df > fit_objH0@test[[1]]$df) { tmp <- fit_objH0 fit_objH0 <- fit_objH1 fit_objH1 <- tmp } # check if we have equality constraints if (fit_objH0@Model@eq.constraints) { equalConstr <- TRUE } else { equalConstr <- FALSE } nsize <- fit_objH0@SampleStats@ntotal PLRT <- 2 * (fit_objH1@optim$logl - fit_objH0@optim$logl) # create a new object 'objH1_h0': the object 'H1', but where # the parameter values are from H0 objH1_h0 <- lav_test_diff_m10(m1 = fit_objH1, m0 = fit_objH0, test = FALSE) # EqMat # YR: from 0.6-2, use lav_test_diff_A() (again) # this should allow us to test models that are # nested in the covariance matrix sense, but not # in the parameter (table) sense EqMat <- lav_test_diff_A(m1 = fit_objH1, m0 = fit_objH0) if (objH1_h0@Model@eq.constraints) { EqMat <- EqMat %*% t(objH1_h0@Model@eq.constraints.K) } # if (equalConstr == TRUE) { # EqMat <- fit_objH0@Model@ceq.JAC # } else { # PT0 <- fit_objH0@ParTable # PT1 <- fit_objH1@ParTable # h0.par.idx <- which(PT1$free > 0 & !(PT0$free > 0)) # tmp.ind <- PT1$free[ h0.par.idx ] # # no.par0 <- length(tmp.ind) # tmp.ind2 <- cbind(1:no.par0, tmp.ind ) # matrix indices # EqMat <- matrix(0, nrow=no.par0, ncol=fit_objH1@Model@nx.free) # EqMat[tmp.ind2] <- 1 # } # DEBUG YR -- eliminate the constraints also present in H1 # -- if we do this, there is no need to use MASS::ginv later # JAC0 <- fit_objH0@Model@ceq.JAC # JAC1 <- fit_objH1@Model@ceq.JAC # unique.idx <- which(apply(JAC0, 1, function(x) { # !any(apply(JAC1, 1, function(y) { all(x == y) })) })) # if(length(unique.idx) > 0L) { # EqMat <- EqMat[unique.idx,,drop = FALSE] # } # Observed information (= for PML, this is Hessian / N) Hes.theta0 <- lavTech(objH1_h0, "information.observed") # handle possible constraints in H1 (and therefore also in objH1_h0) Inv.Hes.theta0 <- lav_model_information_augment_invert( lavmodel = objH1_h0@Model, information = Hes.theta0, inverted = TRUE ) # the estimated variability matrix is given (=unit information first order) J.theta0 <- lavTech(objH1_h0, "first.order") # the Inverse of the G matrix Inv.G <- Inv.Hes.theta0 %*% J.theta0 %*% Inv.Hes.theta0 MInvGtM <- EqMat %*% Inv.G %*% t(EqMat) MinvHtM <- EqMat %*% Inv.Hes.theta0 %*% t(EqMat) # Inv_MinvHtM <- solve(MinvHtM) Inv_MinvHtM <- MASS::ginv(MinvHtM) tmp.prod <- MInvGtM %*% Inv_MinvHtM tmp.prod2 <- tmp.prod %*% tmp.prod sum.eig <- sum(diag(tmp.prod)) sum.eigsq <- sum(diag(tmp.prod2)) FSMA.PLRT <- (sum.eig / sum.eigsq) * PLRT adj.df <- (sum.eig * sum.eig) / sum.eigsq pvalue <- 1 - pchisq(FSMA.PLRT, df = adj.df) list(FSMA.PLRT = FSMA.PLRT, adj.df = adj.df, pvalue = pvalue) } # for testing: this is the 'original' (using m.el.idx and x.el.idx) ctr_pml_plrt_nested2 <- function(fit_objH0, fit_objH1) { if (fit_objH1@test[[1]]$df > fit_objH0@test[[1]]$df) { tmp <- fit_objH0 fit_objH0 <- fit_objH1 fit_objH1 <- tmp } if (fit_objH0@Model@eq.constraints) { equalConstr <- TRUE } else { equalConstr <- FALSE } nsize <- fit_objH0@SampleStats@ntotal PLRT <- 2 * nsize * (fit_objH0@optim$fx - fit_objH1@optim$fx) Npar <- fit_objH1@optim$npar MY.m.el.idx2 <- fit_objH1@Model@m.free.idx MY.x.el.idx2 <- fit_objH1@Model@x.free.idx MY.m.el.idx <- MY.m.el.idx2 MY.x.el.idx <- MY.x.el.idx2 # MY.m.el.idx2 <- fit_objH1@Model@m.free.idx # MY.m.el.idx2 gives the POSITION index of the free parameters within each # parameter matrix under H1 model. # The index numbering restarts from 1 when we move to a new parameter matrix. # Within each matrix the index numbering "moves" columnwise. # MY.x.el.idx2 <- fit_objH1@Model@x.free.idx # MY.x.el.idx2 ENUMERATES the free parameters within each parameter matrix. # The numbering continues as we move from one parameter matrix to the next one. # In the case of the symmetric matrices, Theta and Psi,in some functions below # we need to give as input MY.m.el.idx2 and MY.x.el.idx2 after # we have eliminated the information about the redundant parameters # (those placed above the main diagonal). # That's why I do the following: # MY.m.el.idx <- MY.m.el.idx2 # MY.x.el.idx <- MY.x.el.idx2 # Psi, the variance - covariance matrix of factors # if( length(MY.x.el.idx2[[3]])!=0 & any(table(MY.x.el.idx2[[3]])>1)) { # nfac <- ncol(fit_objH1@Model@GLIST$lambda) #number of factors # tmp <- matrix(c(1:(nfac^2)), nrow= nfac, ncol= nfac ) # tmp_keep <- tmp[lower.tri(tmp, diag=TRUE)] # MY.m.el.idx[[3]] <- MY.m.el.idx[[3]][MY.m.el.idx[[3]] %in% tmp_keep] # MY.x.el.idx[[3]] <- unique( MY.x.el.idx2[[3]] ) # } # for Theta, the variance-covariance matrix of measurement errors # if( length(MY.x.el.idx2[[2]])!=0 & any(table(MY.x.el.idx2[[2]])>1)) { # nvar <- fit_objH1@Model@nvar #number of indicators # tmp <- matrix(c(1:(nvar^2)), nrow= nvar, ncol= nvar ) # tmp_keep <- tmp[lower.tri(tmp, diag=TRUE)] # MY.m.el.idx[[2]] <- MY.m.el.idx[[2]][MY.m.el.idx[[2]] %in% tmp_keep] # MY.x.el.idx[[2]] <- unique( MY.x.el.idx2[[2]] ) # } # below the commands to find the row-column indices of the Hessian that correspond to # the parameters to be tested equal to 0 # tmp.ind contains these indices # MY.m.el.idx2.H0 <- fit_objH0@Model@m.free.idx # tmp.ind <- c() # for(i in 1:6) { # tmp.ind <- c(tmp.ind , # MY.x.el.idx2[[i]] [!(MY.m.el.idx2[[i]] %in% # MY.m.el.idx2.H0[[i]] ) ] ) # } # next line added by YR # tmp.ind <- unique(tmp.ind) # YR: use partable to find which parameters are restricted in H0 # (this should work in multiple groups too) # h0.par.idx <- which( PT.H1.extended$free[PT.H1.extended$user < 2] > 0 & # !(PT.H0.extended$free[PT.H0.extended$user < 2] > 0) ) # tmp.ind <- PT.H1.extended$free[ h0.par.idx ] # print(tmp.ind) if (length(MY.x.el.idx2[[3]]) != 0 & any(table(MY.x.el.idx2[[3]]) > 1)) { nfac <- ncol(fit_objH1@Model@GLIST$lambda) tmp <- matrix(c(1:(nfac * nfac)), nrow = nfac, ncol = nfac) tmp_keep <- tmp[lower.tri(tmp, diag = TRUE)] MY.m.el.idx[[3]] <- MY.m.el.idx[[3]][MY.m.el.idx[[3]] %in% tmp_keep] MY.x.el.idx[[3]] <- unique(MY.x.el.idx2[[3]]) } if (length(MY.x.el.idx2[[2]]) != 0 & any(table(MY.x.el.idx2[[2]]) > 1)) { nvar <- fit_objH1@Model@nvar tmp <- matrix(c(1:(nvar * nvar)), nrow = nvar, ncol = nvar) tmp_keep <- tmp[lower.tri(tmp, diag = TRUE)] MY.m.el.idx[[2]] <- MY.m.el.idx[[2]][MY.m.el.idx[[2]] %in% tmp_keep] MY.x.el.idx[[2]] <- unique(MY.x.el.idx2[[2]]) } MY.m.el.idx2.H0 <- fit_objH0@Model@m.free.idx tmp.ind <- c() for (i in 1:6) { tmp.ind <- c(tmp.ind, MY.x.el.idx2[[i]][!(MY.m.el.idx2[[i]] %in% MY.m.el.idx2.H0[[i]])]) } tmp.ind <- unique(tmp.ind) # if the models are nested because of equality constraints among the parameters, we need # to construct the matrix of derivatives of function g(theta) with respect to theta # where g(theta) is the function that represents the equality constraints. g(theta) is # an rx1 vector where r are the equality constraints. In the null hypothesis # we test H0: g(theta)=0. The matrix of derivatives is of dimension: # nrows= number of free non-redundant parameters under H0, namely # NparH0 <- fit_objH0[[1]]@optim$npar , and ncols= number of free non-redundant # parameters under H1, namely NparH1 <- fit_objH0[[1]]@optim$npar. # The matrix of derivatives of g(theta) is composed of 0's, 1's, -1's, and # in the rows that refer to odd number of parameters that are equal there is one -2. # The 1's, -1's (and possibly -2) are the contrast coefficients of the parameters. # The sum of the rows should be equal to 0. # if(equalConstr==TRUE) { # EqMat <- fit_objH0@Model@ceq.JAC # } else { # no.par0 <- length(tmp.ind) # tmp.ind2 <- cbind(1:no.par0, tmp.ind) # EqMat <- matrix(0, nrow = no.par0, ncol = Npar) # EqMat[tmp.ind2] <- 1 # } if (equalConstr == TRUE) { EqMat <- fit_objH0@Model@ceq.JAC } else { no.par0 <- length(tmp.ind) tmp.ind2 <- cbind(1:no.par0, tmp.ind) EqMat <- matrix(0, nrow = no.par0, ncol = Npar) EqMat[tmp.ind2] <- 1 } obj <- fit_objH0 # Compute the sum of the eigenvalues and the sum of the squared eigenvalues # so that the adjustment to PLRT can be applied. # Here a couple of functions (e.g. MYgetHessian) which are modifications of # lavaan functions (e.g. getHessian) are needed. These are defined in the end of the file. # the quantity below follows the same logic as getHessian of lavaan 0.5-18 # and it actually gives N*Hessian. That's why the command following the command below. # NHes.theta0 <- MYgetHessian (object = obj@Model, # samplestats = obj@SampleStats , # X = obj@Data@X , # estimator = "PML", # lavcache = obj@Cache, # MY.m.el.idx = MY.m.el.idx, # MY.x.el.idx = MY.x.el.idx, # MY.m.el.idx2 = MY.m.el.idx2, # input for MYx2GLIST # MY.x.el.idx2 = MY.x.el.idx2, # input for MYx2GLIST # Npar = Npar, # equalConstr=equalConstr) NHes.theta0 <- MYgetHessian( object = obj@Model, samplestats = obj@SampleStats, X = obj@Data@X, estimator = "PML", lavcache = obj@Cache, MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, MY.m.el.idx2 = MY.m.el.idx2, MY.x.el.idx2 = MY.x.el.idx2, Npar = Npar, equalConstr = equalConstr ) Hes.theta0 <- NHes.theta0 / nsize # Inv.Hes.theta0 <- solve(Hes.theta0) Inv.Hes.theta0 <- MASS::ginv(Hes.theta0) NJ.theta0 <- MYgetVariability( object = obj, MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) J.theta0 <- NJ.theta0 / (nsize * nsize) Inv.G <- Inv.Hes.theta0 %*% J.theta0 %*% Inv.Hes.theta0 MInvGtM <- EqMat %*% Inv.G %*% t(EqMat) MinvHtM <- EqMat %*% Inv.Hes.theta0 %*% t(EqMat) # Inv_MinvHtM <- solve(MinvHtM) #!!! change names Inv_MinvHtM <- MASS::ginv(MinvHtM) tmp.prod <- MInvGtM %*% Inv_MinvHtM # !!! change names tmp.prod2 <- tmp.prod %*% tmp.prod sum.eig <- sum(diag(tmp.prod)) sum.eigsq <- sum(diag(tmp.prod2)) FSMA.PLRT <- (sum.eig / sum.eigsq) * PLRT adj.df <- (sum.eig * sum.eig) / sum.eigsq pvalue <- 1 - pchisq(FSMA.PLRT, df = adj.df) list(FSMA.PLRT = FSMA.PLRT, adj.df = adj.df, pvalue = pvalue) } ################################################################################### # auxiliary functions used above, they are all copy from the corresponding functions # of lavaan where parts no needed were deleted and some parts were modified. # I mark the modifications with comments. # library(lavaan) # To run an example for the functions below the following input is needed. # obj <- fit.objH0[[i]] # object <- obj@Model # samplestats = obj@SampleStats # X = obj@Data@X # estimator = "PML" # lavcache = obj@Cache # MY.m.el.idx = MY.m.el.idx # MY.x.el.idx = MY.x.el.idx # MY.m.el.idx2 = MY.m.el.idx2 # input for MYx2GLIST # MY.x.el.idx2 = MY.x.el.idx2 # input for MYx2GLIST # Npar = Npar # equalConstr =TRUE MYgetHessian <- function(object, samplestats, X, estimator = "PML", lavcache, MY.m.el.idx, MY.x.el.idx, MY.m.el.idx2, MY.x.el.idx2, # input for MYx2GLIST Npar, # Npar is the number of parameters under H1 equalConstr) { # takes TRUE/ FALSE if (equalConstr) { # !!! added line } Hessian <- matrix(0, Npar, Npar) # # !!!! MYfunction below x <- MYgetModelParameters( object = object, GLIST = NULL, N = Npar, # N the number of parameters to consider MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx ) for (j in 1:Npar) { h.j <- 1e-05 x.left <- x.left2 <- x.right <- x.right2 <- x x.left[j] <- x[j] - h.j x.left2[j] <- x[j] - 2 * h.j x.right[j] <- x[j] + h.j x.right2[j] <- x[j] + 2 * h.j # !!!! MYfunction below : MYcomputeGradient and MYx2GLIST g.left <- MYcomputeGradient( object = object, GLIST = MYx2GLIST( object = object, x = x.left, MY.m.el.idx = MY.m.el.idx2, MY.x.el.idx = MY.x.el.idx2 ), samplestats = samplestats, X = X, lavcache = lavcache, estimator = "PML", MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) g.left2 <- MYcomputeGradient( object = object, GLIST = MYx2GLIST( object = object, x = x.left2, MY.m.el.idx = MY.m.el.idx2, MY.x.el.idx = MY.x.el.idx2 ), samplestats = samplestats, X = X, lavcache = lavcache, estimator = "PML", MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) g.right <- MYcomputeGradient( object = object, GLIST = MYx2GLIST( object = object, x = x.right, MY.m.el.idx = MY.m.el.idx2, MY.x.el.idx = MY.x.el.idx2 ), samplestats = samplestats, X = X, lavcache = lavcache, estimator = "PML", MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) g.right2 <- MYcomputeGradient( object = object, GLIST = MYx2GLIST( object = object, x = x.right2, MY.m.el.idx = MY.m.el.idx2, MY.x.el.idx = MY.x.el.idx2 ), samplestats = samplestats, X = X, lavcache = lavcache, estimator = "PML", MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) Hessian[, j] <- (g.left2 - 8 * g.left + 8 * g.right - g.right2) / (12 * h.j) } Hessian <- (Hessian + t(Hessian)) / 2 # (-1) * Hessian Hessian } ############################################################################# ################################## MYgetModelParameters # different input arguments: MY.m.el.idx, MY.x.el.idx MYgetModelParameters <- function(object, GLIST = NULL, N, # N the number of parameters to consider MY.m.el.idx, MY.x.el.idx) { if (is.null(GLIST)) { GLIST <- object@GLIST } x <- numeric(N) for (mm in 1:length(object@GLIST)) { # mm<-1 m.idx <- MY.m.el.idx[[mm]] # !!!!! different here and below x.idx <- MY.x.el.idx[[mm]] x[x.idx] <- GLIST[[mm]][m.idx] } x } ############################################################################# ############################# MYcomputeGradient # the difference are the input arguments MY.m.el.idx, MY.x.el.idx # used in lavaan:::computeDelta MYcomputeGradient <- function(object, GLIST, samplestats = NULL, X = NULL, lavcache = NULL, estimator = "PML", MY.m.el.idx, MY.x.el.idx, equalConstr) { if (equalConstr) { # added line } num.idx <- object@num.idx th.idx <- object@th.idx if (is.null(GLIST)) { GLIST <- object@GLIST } Sigma.hat <- computeSigmaHat(object, GLIST = GLIST, extra = (estimator == "ML")) Mu.hat <- computeMuHat(object, GLIST = GLIST) TH <- computeTH(object, GLIST = GLIST) g <- 1 d1 <- pml_deriv1( Sigma.hat = Sigma.hat[[g]], Mu.hat = Mu.hat[[g]], TH = TH[[g]], th.idx = th.idx[[g]], num.idx = num.idx[[g]], X = X[[g]], lavcache = lavcache[[g]] ) # !? if(equalConstr) { #delete the following three commented lines, wrong # Delta <- lavaan:::computeDelta (lavmodel= object, GLIST. = GLIST) # } else { Delta <- computeDelta( lavmodel = object, GLIST. = GLIST, m.el.idx. = MY.m.el.idx, x.el.idx. = MY.x.el.idx ) # } # !!!!! that was before: as.numeric(t(d1) %*% Delta[[g]])/samplestats@nobs[[g]] as.numeric(t(d1) %*% Delta[[g]]) # !!! modified to follow current computeGradient() function of lavaan # !!! which gives minus the gradient of PL-loglik } ############################################################################### ################################## MYx2GLIST # difference in input arguments MY.m.el.idx, MY.x.el.idx MYx2GLIST <- function(object, x = NULL, MY.m.el.idx, MY.x.el.idx) { GLIST <- object@GLIST for (mm in 1:length(GLIST)) { m.el.idx <- MY.m.el.idx[[mm]] x.el.idx <- MY.x.el.idx[[mm]] GLIST[[mm]][m.el.idx] <- x[x.el.idx] } GLIST } ############################################################################ ##### MYgetVariability function # difference from corresponding of lavaan: I use MYNvcov.first.order MYgetVariability <- function(object, MY.m.el.idx, MY.x.el.idx, equalConstr) { NACOV <- MYNvcov.first.order( lavmodel = object@Model, lavsamplestats = object@SampleStats, lavdata = object@Data, estimator = "PML", MY.m.el.idx = MY.m.el.idx, MY.x.el.idx = MY.x.el.idx, equalConstr = equalConstr ) if (equalConstr) { # added lines } B0 <- attr(NACOV, "B0") # !!!! Note below that I don't multiply with nsize # !!! so what I get is J matrix divided by n # if (object@Options$estimator == "PML") { # B0 <- B0 * object@SampleStats@ntotal # } # !!!!!!!!!!!!!!!!!!! added the following lines so that the output of # !!!!! MYgetVariability is in line with that of lavaan 0.5-18 getVariability # !! what's the purpose of the following lines? if (object@Options$estimator == "PML") { B0 <- B0 * object@SampleStats@ntotal } B0 } ############################################################################## # example # obj <- fit.objH0[[i]] # object <- obj@Model # samplestats = obj@SampleStats # X = obj@Data@X # estimator = "PML" # lavcache = obj@Cache # MY.m.el.idx = MY.m.el.idx # MY.x.el.idx = MY.x.el.idx # MY.m.el.idx2 = MY.m.el.idx2 # input for MYx2GLIST # MY.x.el.idx2 = MY.x.el.idx2 # input for MYx2GLIST # Npar = Npar # equalConstr =TRUE MYNvcov.first.order <- function(lavmodel, lavsamplestats = NULL, lavdata = NULL, lavcache = NULL, estimator = "PML", MY.m.el.idx, MY.x.el.idx, equalConstr) { # equalConstr takes TRUE/FALSE if (equalConstr) { # added lines } B0.group <- vector("list", lavsamplestats@ngroups) # in my case list of length 1 # !? if (equalConstr) { ###the following three lines are commented because they are wrong # Delta <- lavaan:::computeDelta(lavmodel, GLIST. = NULL) # } else { Delta <- computeDelta(lavmodel, GLIST. = NULL, m.el.idx. = MY.m.el.idx, # !!!!! different here and below x.el.idx. = MY.x.el.idx ) # } Sigma.hat <- computeSigmaHat(lavmodel) Mu.hat <- computeMuHat(lavmodel) TH <- computeTH(lavmodel) g <- 1 SC <- pml_deriv1( Sigma.hat = Sigma.hat[[g]], TH = TH[[g]], Mu.hat = Mu.hat[[g]], th.idx = lavmodel@th.idx[[g]], num.idx = lavmodel@num.idx[[g]], X = lavdata@X[[g]], lavcache = lavcache, scores = TRUE, negative = FALSE ) group.SC <- SC %*% Delta[[g]] B0.group[[g]] <- lav_matrix_crossprod(group.SC) # !!!! B0.group[[g]] <- B0.group[[g]]/lavsamplestats@ntotal !!! skip so that the result # is in line with the 0.5-18 version of lavaan B0 <- B0.group[[1]] E <- B0 eigvals <- eigen(E, symmetric = TRUE, only.values = TRUE)$values if (any(eigvals < -1 * .Machine$double.eps^(3 / 4))) { lav_msg_warn(gettext( "matrix based on first order outer product of the derivatives is not positive definite; the standard errors may not be thrustworthy")) } NVarCov <- MASS::ginv(E) attr(NVarCov, "B0") <- B0 attr(NVarCov, "B0.group") <- B0.group NVarCov } lavaan/R/lav_h1_logl.R0000644000176200001440000000627314627656441014256 0ustar liggesusers# compute logl for the unrestricted (h1) model -- per group lav_h1_logl <- function(lavdata = NULL, lavsamplestats = NULL, lavoptions = NULL) { # number of groups ngroups <- lavdata@ngroups logl.group <- rep(as.numeric(NA), ngroups) # should compute logl, or return NA? logl.ok <- FALSE if (lavoptions$estimator %in% c("ML", "MML")) { # check if everything is numeric, OR if we have exogenous # factor with 2 levels only if (all(lavdata@ov$type == "numeric")) { logl.ok <- TRUE } else { not.idx <- which(lavdata@ov$type != "numeric") for (i in not.idx) { if (lavdata@ov$type[i] == "factor" && lavdata@ov$exo[i] == 1L && lavdata@ov$nlev[i] == 2L) { logl.ok <- TRUE } else { logl.ok <- FALSE break } } } } # lavsamplestats filled in? (not if no data, or samplestats = FALSE) if (length(lavsamplestats@ntotal) == 0L || (!is.null(lavoptions$samplestats) && !lavoptions$samplestats)) { logl.ok <- FALSE } # new in 0.6-9 (so SAM can handle N 1L) { current.verbose <- lav_verbose() if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose), TRUE) OUT <- lav_mvnorm_cluster_em_sat( YLp = lavsamplestats@YLp[[g]], Lp = lavdata@Lp[[g]], tol = 1e-04, # option? min.variance = 1e-05, # option? max.iter = 5000L ) # option? lav_verbose(current.verbose) # store logl per group logl.group[g] <- OUT$logl } else if (lavsamplestats@missing.flag) { logl.group[g] <- lav_mvnorm_missing_loglik_samplestats( Yp = lavsamplestats@missing[[g]], Mu = lavsamplestats@missing.h1[[g]]$mu, Sigma = lavsamplestats@missing.h1[[g]]$sigma, x.idx = lavsamplestats@x.idx[[g]], x.mean = lavsamplestats@mean.x[[g]], x.cov = lavsamplestats@cov.x[[g]] ) } else { # single-level, complete data # all we need is: logdet of covariance matrix, nobs and nvar if (lavoptions$conditional.x) { logl.group[g] <- lav_mvnorm_h1_loglik_samplestats( sample.cov.logdet = lavsamplestats@res.cov.log.det[[g]], sample.nvar = NCOL(lavsamplestats@res.cov[[g]]), sample.nobs = lavsamplestats@nobs[[g]] ) } else { logl.group[g] <- lav_mvnorm_h1_loglik_samplestats( sample.cov.logdet = lavsamplestats@cov.log.det[[g]], sample.nvar = NCOL(lavsamplestats@cov[[g]]), sample.nobs = lavsamplestats@nobs[[g]], x.idx = lavsamplestats@x.idx[[g]], x.cov = lavsamplestats@cov.x[[g]] ) } } # complete } # g } # logl.ok is TRUE out <- list( loglik = sum(logl.group), loglik.group = logl.group ) out } lavaan/R/lav_samplestats_gamma.R0000644000176200001440000006157614627656441016442 0ustar liggesusers# YR 21 March 2015 # new approach to compute 'Gamma': the asymptotic variance matrix of # sqrt{N} times the # observed sample statistics (means + varcov) # # Gamma = N x ACOV[ ybar, vech(S) ] # = NACOV[ ybar, vech(S) ] # # - one single function for mean + cov # - handle 'fixed.x' exogenous covariates # - YR 3 Dec 2015: allow for conditional.x = TRUE # - YR 22 Jan 2023: add model.based= argument (if object is lavaan object) # - YR 30 May 2024: add lav_samplestats_cor_Gamma(_NT) # generic public function (not exported yet) # input for lavGamma can be lavobject, lavdata, data.frame, or matrix lavGamma <- function(object, group = NULL, missing = "listwise", ov.names.x = NULL, fixed.x = FALSE, conditional.x = FALSE, meanstructure = FALSE, slopestructure = FALSE, gamma.n.minus.one = FALSE, gamma.unbiased = FALSE, ADF = TRUE, model.based = FALSE, NT.rescale = FALSE, Mplus.WLS = FALSE, add.labels = FALSE) { # check object # 1. object is lavaan object if (inherits(object, "lavaan")) { lav_object_gamma( lavobject = object, ADF = ADF, model.based = model.based, Mplus.WLS = Mplus.WLS ) } else if (inherits(object, "lavData")) { lavdata <- object model.based <- FALSE } else if (inherits(object, "data.frame") || inherits(object, "matrix")) { model.based <- FALSE NAMES <- names(object) if (!is.null(NAMES) && !is.null(group)) { NAMES <- NAMES[-match(group, NAMES)] } lavdata <- lavData( data = object, group = group, ov.names = NAMES, ordered = NULL, ov.names.x = ov.names.x, lavoptions = list( warn = FALSE, missing = missing ) ) } else { lav_msg_stop( gettextf("lavGamma can not handle objects of class %s", lav_msg_view(class(object))) ) } # extract data Y <- lavdata@X if (conditional.x) { eXo <- lavdata@eXo for (g in seq_len(lavdata@ngroups)) { Y[[g]] <- cbind(Y[[g]], eXo[[g]]) } } # x.idx x.idx <- lapply( seq_len(lavdata@ngroups), function(g) { match( lavdata@ov.names.x[[g]], lavdata@ov.names[[g]] ) } ) OUT <- lapply(seq_len(lavdata@ngroups), function(g) { if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } if (ADF) { out <- lav_samplestats_Gamma( Y = Y[[g]], Mu = NULL, Sigma = NULL, x.idx = x.idx[[g]], cluster.idx = cluster.idx, fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x, gamma.n.minus.one = gamma.n.minus.one, unbiased = gamma.unbiased, Mplus.WLS = Mplus.WLS ) } else { out <- lav_samplestats_Gamma_NT( Y = Y[[g]], wt = NULL, # for now rescale = NT.rescale, x.idx = x.idx[[g]], fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x ) } out }) # todo: labels OUT } # for internal use -- lavobject or internal slots lav_object_gamma <- function(lavobject = NULL, # or individual slots lavdata = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, lavimplied = NULL, # other options ADF = TRUE, model.based = FALSE, Mplus.WLS = FALSE) { # extract slots if (!is.null(lavobject)) { lavdata <- lavobject@Data lavoptions <- lavobject@Options lavsamplestats <- lavobject@SampleStats if (.hasSlot(lavobject, "h1")) { lavh1 <- lavobject@h1 } else { # only for <0.6 out <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavpartable = lavobject@ParTable, lavoptions = lavoptions ) h1.implied <- out$implied h1.loglik <- out$logl$loglik h1.loglik.group <- out$logl$loglik.group lavh1 <- list( implied = h1.implied, loglik = h1.loglik, loglik.group = h1.loglik.group ) lavoptions$gamma.n.minus.one <- FALSE lavoptions$gamma.unbiased <- FALSE } lavimplied <- lavobject@implied } missing <- lavoptions$missing if (!missing %in% c("listwise", "pairwise")) { model.based <- TRUE } fixed.x <- lavoptions$fixed.x conditional.x <- lavoptions$conditional.x meanstructure <- lavoptions$meanstructure gamma.n.minus.one <- lavoptions$gamma.n.minus.one gamma.unbiased <- lavoptions$gamma.unbiased if (ADF && model.based && conditional.x) { lav_msg_stop(gettext( "ADF + model.based + conditional.x is not supported yet.")) } # output container OUT <- vector("list", length = lavdata@ngroups) # compute Gamma matrix for each group for (g in seq_len(lavdata@ngroups)) { x.idx <- lavsamplestats@x.idx[[g]] COV <- MEAN <- NULL if (!ADF || model.based) { implied <- lavh1$implied # saturated/unstructured if (model.based) { implied <- lavimplied # model-based/structured } if (conditional.x) { # convert to joint COV/MEAN res.S <- implied$res.cov[[g]] res.slopes <- implied$res.slopes[[g]] res.int <- implied$res.int[[g]] S.xx <- implied$cov.x[[g]] M.x <- implied$mean.x[[g]] S.yy <- res.S + res.slopes %*% S.xx %*% t(res.slopes) S.yx <- res.slopes %*% S.xx S.xy <- S.xx %*% t(res.slopes) M.y <- res.int + res.slopes %*% M.x COV <- rbind(cbind(S.yy, S.yx), cbind(S.xy, S.xx)) MEAN <- c(M.y, M.x) } else { # not conditional.x COV <- implied$cov[[g]] MEAN <- implied$mean[[g]] } } # COV/MEAN if (ADF) { if (conditional.x) { Y <- cbind(lavdata@X[[g]], lavdata@eXo[[g]]) } else { Y <- lavdata@X[[g]] } if (length(lavdata@cluster) > 0L) { cluster.idx <- lavdata@Lp[[g]]$cluster.idx[[2]] } else { cluster.idx <- NULL } OUT[[g]] <- lav_samplestats_Gamma( Y = Y, Mu = MEAN, Sigma = COV, x.idx = x.idx, cluster.idx = cluster.idx, fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x, gamma.n.minus.one = gamma.n.minus.one, unbiased = gamma.unbiased, Mplus.WLS = Mplus.WLS ) } else { OUT[[g]] <- lav_samplestats_Gamma_NT( COV = COV, # joint! MEAN = MEAN, # joint! x.idx = x.idx, fixed.x = fixed.x, conditional.x = conditional.x, meanstructure = meanstructure, slopestructure = conditional.x ) } } # g OUT } # NOTE: # - three types: # 1) plain (conditional.x = FALSE, fixed.x = FALSE) # 2) fixed.x (conditional.x = FALSE, fixed.x = TRUE) # 3) conditional.x (conditional.x = TRUE) # - if conditional.x = TRUE, we ignore fixed.x (can be TRUE or FALSE) # NORMAL-THEORY lav_samplestats_Gamma_NT <- function(Y = NULL, # should include # eXo if # conditional.x=TRUE wt = NULL, COV = NULL, # joint! MEAN = NULL, # joint! rescale = FALSE, x.idx = integer(0L), fixed.x = FALSE, conditional.x = FALSE, meanstructure = FALSE, slopestructure = FALSE) { # check arguments if (length(x.idx) == 0L) { conditional.x <- FALSE fixed.x <- FALSE } # compute COV from Y if (is.null(COV)) { stopifnot(!is.null(Y)) # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) if (is.null(wt)) { COV <- cov(Y) if (rescale) { COV <- COV * (N - 1) / N # (normal) ML version } } else { out <- stats::cov.wt(Y, wt = wt, method = "ML") COV <- out$cov } } else { if (!missing(rescale)) { lav_msg_warn(gettext("rescale= argument has no effect if COV is given")) } if (!missing(wt)) { lav_msg_warn(gettext("wt= argument has no effect if COV is given")) } } # if needed, compute MEAN from Y if (conditional.x && length(x.idx) > 0L && is.null(MEAN) && (meanstructure || slopestructure)) { stopifnot(!is.null(Y)) if (is.null(wt)) { MEAN <- colMeans(Y, na.rm = TRUE) } else { MEAN <- out$center } } # rename S <- COV M <- MEAN # unconditional if (!conditional.x) { # unconditional - stochastic x if (!fixed.x) { Gamma <- 2 * lav_matrix_duplication_ginv_pre_post(S %x% S) if (meanstructure) { Gamma <- lav_matrix_bdiag(S, Gamma) } # unconditional - fixed x } else { # handle fixed.x = TRUE # cov(Y|X) = A - B C^{-1} B' # where A = cov(Y), B = cov(Y,X), C = cov(X) A <- S[-x.idx, -x.idx, drop = FALSE] B <- S[-x.idx, x.idx, drop = FALSE] C <- S[x.idx, x.idx, drop = FALSE] YbarX <- A - B %*% solve(C, t(B)) # reinsert YbarX in Y+X (residual) covariance matrix YbarX.aug <- matrix(0, nrow = NROW(S), ncol = NCOL(S)) YbarX.aug[-x.idx, -x.idx] <- YbarX # take difference R <- S - YbarX.aug Gamma.S <- 2 * lav_matrix_duplication_ginv_pre_post(S %x% S) Gamma.R <- 2 * lav_matrix_duplication_ginv_pre_post(R %x% R) Gamma <- Gamma.S - Gamma.R if (meanstructure) { Gamma <- lav_matrix_bdiag(YbarX.aug, Gamma) } } } else { # conditional.x # 4 possibilities: # - no meanstructure, no slopes # - meanstructure, no slopes # - no meanstructure, slopes # - meanstructure, slopes # regress Y on X, and compute covariance of residuals 'R' A <- S[-x.idx, -x.idx, drop = FALSE] B <- S[-x.idx, x.idx, drop = FALSE] C <- S[x.idx, x.idx, drop = FALSE] Cov.YbarX <- A - B %*% solve(C) %*% t(B) Gamma <- 2 * lav_matrix_duplication_ginv_pre_post(Cov.YbarX %x% Cov.YbarX) if (meanstructure || slopestructure) { MY <- M[-x.idx] MX <- M[x.idx] C3 <- rbind( c(1, MX), cbind(MX, C + tcrossprod(MX)) ) # B3 <- cbind(MY, B + tcrossprod(MY,MX)) } if (meanstructure) { if (slopestructure) { # A11 <- solve(C3) %x% Cov.YbarX A11 <- Cov.YbarX %x% solve(C3) } else { # A11 <- solve(C3)[1, 1, drop=FALSE] %x% Cov.YbarX A11 <- Cov.YbarX %x% solve(C3)[1, 1, drop = FALSE] } } else { if (slopestructure) { # A11 <- solve(C3)[-1, -1, drop=FALSE] %x% Cov.YbarX A11 <- Cov.YbarX %x% solve(C3)[-1, -1, drop = FALSE] } else { A11 <- matrix(0, 0, 0) } } Gamma <- lav_matrix_bdiag(A11, Gamma) } Gamma } # NOTE: # - three types: # 1) plain (conditional.x = FALSE, fixed.x = FALSE) # 2) fixed.x (conditional.x = FALSE, fixed.x = TRUE) # 3) conditional.x (conditional.x = TRUE) # - if conditional.x = TRUE, we ignore fixed.x (can be TRUE or FALSE) # # - new in 0.6-1: if Mu/Sigma is provided, compute 'model-based' Gamma # (only if conditional.x = FALSE, for now) # - new in 0.6-2: if cluster.idx is not NULL, correct for clustering # - new in 0.6-13: add unbiased = TRUE (for the 'plain' setting only) # ADF THEORY lav_samplestats_Gamma <- function(Y, # Y+X if cond! Mu = NULL, Sigma = NULL, x.idx = integer(0L), cluster.idx = NULL, fixed.x = FALSE, conditional.x = FALSE, meanstructure = FALSE, slopestructure = FALSE, gamma.n.minus.one = FALSE, unbiased = FALSE, Mplus.WLS = FALSE) { # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) p <- ncol(Y) # unbiased? if (unbiased) { if (conditional.x || fixed.x || !is.null(Sigma) || !is.null(cluster.idx)) { lav_msg_stop( gettext("unbiased Gamma only available for the simple (not conditional.x or fixed.x or model-based or clustered) setting.")) } else { COV <- COV.unbiased <- cov(Y) COV <- COV * (N - 1) / N cov.vech <- lav_matrix_vech(COV) } } # model-based? if (!is.null(Sigma)) { stopifnot(!conditional.x) model.based <- TRUE if (meanstructure) { stopifnot(!is.null(Mu)) sigma <- c(as.numeric(Mu), lav_matrix_vech(Sigma)) } else { Mu <- colMeans(Y, na.rm = TRUE) # for centering! sigma <- lav_matrix_vech(Sigma) } } else { model.based <- FALSE } # denominator if (gamma.n.minus.one) { N <- N - 1 } # check arguments if (length(x.idx) == 0L) { conditional.x <- FALSE fixed.x <- FALSE } if (Mplus.WLS) { stopifnot(!conditional.x, !fixed.x) } if (!conditional.x && !fixed.x) { # center, so we can use crossprod instead of cov if (model.based) { Yc <- t(t(Y) - as.numeric(Mu)) } else { Yc <- t(t(Y) - colMeans(Y, na.rm = TRUE)) } # create Z where the rows_i contain the following elements: # - Y_i (if meanstructure is TRUE) # - vech(Yc_i' %*% Yc_i) where Yc_i are the residuals idx1 <- lav_matrix_vech_col_idx(p) idx2 <- lav_matrix_vech_row_idx(p) if (meanstructure) { Z <- cbind(Y, Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE]) } else { Z <- (Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE]) } if (model.based) { if (meanstructure) { stopifnot(!is.null(Mu)) sigma <- c(as.numeric(Mu), lav_matrix_vech(Sigma)) } else { sigma <- lav_matrix_vech(Sigma) } Zc <- t(t(Z) - sigma) } else { Zc <- t(t(Z) - colMeans(Z, na.rm = TRUE)) } # clustered? if (length(cluster.idx) > 0L) { Zc <- rowsum(Zc, cluster.idx) } if (anyNA(Zc)) { Gamma <- lav_matrix_crossprod(Zc) / N } else { Gamma <- base::crossprod(Zc) / N } } else if (!conditional.x && fixed.x) { if (model.based) { Yc <- t(t(Y) - as.numeric(Mu)) Y.bar <- colMeans(Y, na.rm = TRUE) res.cov <- (Sigma[-x.idx, -x.idx, drop = FALSE] - Sigma[-x.idx, x.idx, drop = FALSE] %*% solve(Sigma[x.idx, x.idx, drop = FALSE]) %*% Sigma[x.idx, -x.idx, drop = FALSE]) res.slopes <- (solve(Sigma[x.idx, x.idx, drop = FALSE]) %*% Sigma[x.idx, -x.idx, drop = FALSE]) res.int <- (Y.bar[-x.idx] - as.numeric(colMeans(Y[, x.idx, drop = FALSE], na.rm = TRUE ) %*% res.slopes)) x.bar <- Y.bar[x.idx] yhat.bar <- as.numeric(res.int + as.numeric(x.bar) %*% res.slopes) YHAT.bar <- numeric(p) YHAT.bar[-x.idx] <- yhat.bar YHAT.bar[x.idx] <- x.bar YHAT.cov <- Sigma YHAT.cov[-x.idx, -x.idx] <- Sigma[-x.idx, -x.idx] - res.cov yhat <- cbind(1, Y[, x.idx]) %*% rbind(res.int, res.slopes) YHAT <- Y YHAT[, -x.idx] <- yhat # YHAT <- cbind(yhat, Y[,x.idx]) YHATc <- t(t(YHAT) - YHAT.bar) idx1 <- lav_matrix_vech_col_idx(p) idx2 <- lav_matrix_vech_row_idx(p) if (meanstructure) { Z <- (cbind(Y, Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE]) - cbind(YHAT, YHATc[, idx1, drop = FALSE] * YHATc[, idx2, drop = FALSE])) sigma1 <- c(Mu, lav_matrix_vech(Sigma)) sigma2 <- c(YHAT.bar, lav_matrix_vech(YHAT.cov)) } else { Z <- (Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE] - YHATc[, idx1, drop = FALSE] * YHATc[, idx2, drop = FALSE]) sigma1 <- lav_matrix_vech(Sigma) sigma2 <- lav_matrix_vech(YHAT.cov) } Zc <- t(t(Z) - (sigma1 - sigma2)) } else { QR <- qr(cbind(1, Y[, x.idx, drop = FALSE])) yhat <- qr.fitted(QR, Y[, -x.idx, drop = FALSE]) # YHAT <- cbind(yhat, Y[,x.idx]) YHAT <- Y YHAT[, -x.idx] <- yhat Yc <- t(t(Y) - colMeans(Y, na.rm = TRUE)) YHATc <- t(t(YHAT) - colMeans(YHAT, na.rm = TRUE)) idx1 <- lav_matrix_vech_col_idx(p) idx2 <- lav_matrix_vech_row_idx(p) if (meanstructure) { Z <- (cbind(Y, Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE]) - cbind(YHAT, YHATc[, idx1, drop = FALSE] * YHATc[, idx2, drop = FALSE])) } else { Z <- (Yc[, idx1, drop = FALSE] * Yc[, idx2, drop = FALSE] - YHATc[, idx1, drop = FALSE] * YHATc[, idx2, drop = FALSE]) } Zc <- t(t(Z) - colMeans(Z, na.rm = TRUE)) } # clustered? if (length(cluster.idx) > 0L) { Zc <- rowsum(Zc, cluster.idx) } if (anyNA(Zc)) { Gamma <- lav_matrix_crossprod(Zc) / N } else { Gamma <- base::crossprod(Zc) / N } } else { # conditional.x # 4 possibilities: # - no meanstructure, no slopes # - meanstructure, no slopes # - no meanstructure, slopes # - meanstructure, slopes # regress Y on X, and compute residuals X <- cbind(1, Y[, x.idx, drop = FALSE]) QR <- qr(X) RES <- qr.resid(QR, Y[, -x.idx, drop = FALSE]) p <- ncol(RES) idx1 <- lav_matrix_vech_col_idx(p) idx2 <- lav_matrix_vech_row_idx(p) if (meanstructure || slopestructure) { XtX.inv <- unname(solve(crossprod(X))) Xi <- (X %*% XtX.inv) * N ## FIXME, shorter way? ncX <- NCOL(X) ncY <- NCOL(RES) } if (meanstructure) { if (slopestructure) { # Xi.idx <- rep(seq_len(ncX), each = ncY) # Res.idx <- rep(seq_len(ncY), times = ncX) Xi.idx <- rep(seq_len(ncX), times = ncY) Res.idx <- rep(seq_len(ncY), each = ncX) Z <- cbind( Xi[, Xi.idx, drop = FALSE] * RES[, Res.idx, drop = FALSE], RES[, idx1, drop = FALSE] * RES[, idx2, drop = FALSE] ) } else { Xi.idx <- rep(1L, each = ncY) Z <- cbind( Xi[, Xi.idx, drop = FALSE] * RES, RES[, idx1, drop = FALSE] * RES[, idx2, drop = FALSE] ) } } else { if (slopestructure) { # Xi.idx <- rep(seq_len(ncX), each = ncY) # Xi.idx <- Xi.idx[ -seq_len(ncY) ] Xi.idx <- rep(seq(2, ncX), times = ncY) # Res.idx <- rep(seq_len(ncY), times = (ncX - 1L)) Res.idx <- rep(seq_len(ncY), each = (ncX - 1L)) Z <- cbind( Xi[, Xi.idx, drop = FALSE] * RES[, Res.idx, drop = FALSE], RES[, idx1, drop = FALSE] * RES[, idx2, drop = FALSE] ) } else { Z <- RES[, idx1, drop = FALSE] * RES[, idx2, drop = FALSE] } } if (model.based) { Zc <- t(t(Z) - sigma) } else { Zc <- t(t(Z) - colMeans(Z, na.rm = TRUE)) } # clustered? if (length(cluster.idx) > 0L) { Zc <- rowsum(Zc, cluster.idx) } if (anyNA(Zc)) { Gamma <- lav_matrix_crossprod(Zc) / N } else { Gamma <- base::crossprod(Zc) / N } } # only to mimic Mplus when estimator = "WLS" if (Mplus.WLS && !fixed.x && !conditional.x) { # adjust G_22 (the varcov part) S <- cov(Y, use = "pairwise") w <- lav_matrix_vech(S) w.biased <- (N - 1) / N * w diff <- outer(w, w) - outer(w.biased, w.biased) if (meanstructure) { Gamma[-seq_len(p), -seq_len(p)] <- Gamma[-seq_len(p), -seq_len(p), drop = FALSE] - diff } else { Gamma <- Gamma - diff } if (meanstructure) { # adjust G_12/G_21 (third-order) # strange rescaling? N1 <- (N - 1) / N Gamma[seq_len(p), -seq_len(p)] <- Gamma[seq_len(p), -seq_len(p)] * N1 Gamma[-seq_len(p), seq_len(p)] <- Gamma[-seq_len(p), seq_len(p)] * N1 } } # clustered? if (length(cluster.idx) > 0L) { nC <- nrow(Zc) Gamma <- Gamma * nC / (nC - 1) } # unbiased? if (unbiased) { # normal-theory Gamma (cov only) GammaNT.cov <- 2 * lav_matrix_duplication_ginv_pre_post(COV %x% COV) if (meanstructure) { Gamma.cov <- Gamma[-(1:p), -(1:p), drop = FALSE] Gamma.mean.cov <- Gamma[1:p, -(1:p), drop = FALSE] } else { Gamma.cov <- Gamma } # Browne's unbiased DF estimator (COV part) Gamma.u <- (N * (N - 1) / (N - 2) / (N - 3) * Gamma.cov - N / (N - 2) / (N - 3) * (GammaNT.cov - 2 / (N - 1) * tcrossprod(cov.vech))) if (meanstructure) { Gamma <- lav_matrix_bdiag(COV, Gamma.u) # 3-rd order: Gamma[1:p, (p + 1):ncol(Gamma)] <- Gamma.mean.cov * N / (N - 2) Gamma[(p + 1):ncol(Gamma), 1:p] <- t(Gamma.mean.cov * N / (N - 2)) } else { Gamma <- Gamma.u } } # unbiased Gamma } # ADF Gamma for correlations # # 30 May 2024: basic version: fixed.x=FALSE, conditional.x=FALSE, ... lav_samplestats_cor_Gamma <- function(Y, meanstructure = FALSE) { # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) P <- ncol(Y) # compute S and R S <- cov(Y) * (N - 1) / N R <- cov2cor(S) # create z-scores SD <- sqrt(diag(S)) Yz <- t( (t(Y) - colMeans(Y))/SD ) # create squared z-scores Yz2 <- Yz*Yz # find indices so we avoid 1) double subscripts (diagonal!), and # 2) duplicated subscripts (symmetric!) idx1 <- lav_matrix_vech_col_idx(P, diagonal = FALSE) idx2 <- lav_matrix_vech_row_idx(P, diagonal = FALSE) ZR1 <- (Yz[, idx1, drop = FALSE] * Yz[, idx2, drop = FALSE]) ZR2 <- (Yz2[, idx1, drop = FALSE] + Yz2[, idx2, drop = FALSE]) ZR2 <- t( t(ZR2) * lav_matrix_vech(R, diagonal = FALSE) ) ZRR <- ZR1 - 0.5*ZR2 if(meanstructure) { ZRR <- cbind(Yz, ZRR) } Gamma <- crossprod(ZRR)/N Gamma } # normal theory version # 30 May 2024: basic version: fixed.x=FALSE, conditional.x=FALSE, ... lav_samplestats_cor_Gamma_NT <- function(Y = NULL, wt = NULL, COV = NULL, # joint! MEAN = NULL, # joint! rescale = FALSE, x.idx = integer(0L), fixed.x = FALSE, conditional.x = FALSE, meanstructure = FALSE, slopestructure = FALSE) { # check arguments if (length(x.idx) == 0L) { conditional.x <- FALSE fixed.x <- FALSE } else { lav_msg_stop(gettext("x.idx not supported (yet) for correlations; use fixed.x = FALSE (for now)")) } if(conditional.x) { lav_msg_stop(gettext("conditional.x = TRUE not supported (yet) for correlations")) } # compute COV from Y if (is.null(COV)) { stopifnot(!is.null(Y)) # coerce to matrix Y <- unname(as.matrix(Y)) N <- nrow(Y) if (is.null(wt)) { COV <- cov(Y) if (rescale) { COV <- COV * (N - 1) / N # (normal) ML version } } else { out <- stats::cov.wt(Y, wt = wt, method = "ML") COV <- out$cov } } else { if (!missing(rescale)) { lav_msg_warn(gettext("rescale= argument has no effect if COV is given")) } if (!missing(wt)) { lav_msg_warn(gettext("wt= argument has no effect if COV is given")) } } # if needed, compute MEAN from Y if (conditional.x && length(x.idx) > 0L && is.null(MEAN) && (meanstructure || slopestructure)) { stopifnot(!is.null(Y)) if (is.null(wt)) { MEAN <- colMeans(Y, na.rm = TRUE) } else { MEAN <- out$center } } # rename S <- COV R <- cov2cor(S) M <- MEAN P <- nrow(S) # unconditional if (!conditional.x) { # unconditional - stochastic x if (!fixed.x) { IP <- diag(P) %x% R RR <- R %x% R Gamma.Z.NT <- RR + lav_matrix_commutation_pre(RR) tmp <- (IP + lav_matrix_commutation_pre(IP))/2 zero.idx <- seq_len(P*P)[-lav_matrix_diag_idx(P)] tmp[, zero.idx] <- 0 A <- -tmp diag(A) <- 1 - diag(tmp) Gamma.NT.big <- A %*% Gamma.Z.NT %*% t(A) r.idx <- lav_matrix_vech_idx(P, diagonal = FALSE) Gamma <- Gamma.NT.big[r.idx, r.idx] if (meanstructure) { Gamma <- lav_matrix_bdiag(R, Gamma) } # unconditional - fixed x } else { # TODO } } else { # conditional.x # TODO } Gamma } lavaan/R/lav_partable_ov_from_data.R0000644000176200001440000000336014627656441017235 0ustar liggesusers# handle ov.order = "data" by adding attribute "ovda" to FLAT lav_partable_ov_from_data <- function(FLAT = NULL, # nolint data = NULL, sample.cov = NULL, slotData = NULL) { # nolint # current model-based ov.names ov.names <- lav_partable_vnames(FLAT, type = "ov") # get data-based ov.names data.names <- NULL if (!is.null(data)) { data.names <- names(data) } else if (!is.null(sample.cov)) { # multiple group/blocks? if (is.list(sample.cov)) { data.names <- unique(unlist(lapply(sample.cov, colnames))) if (is.null(data.names)) { # try again with rows data.names <- unique(unlist(lapply(sample.cov, rownames))) } } else { data.names <- colnames(sample.cov) if (is.null(data.names)) { # try again with rows data.names <- rownames(sample.cov) } } } else if (!is.null(slotData)) { data.names <- unique(unlist(slotData@ov.names)) } if (is.null(data.names) || length(data.names) == 0L) { lav_msg_stop(gettext("could not find variable names in data/sample.cov")) } # extract needed ov.names in the same order as the data ov.names.data <- data.names[data.names %in% ov.names] # check if we have all of them if (length(ov.names.data) != length(ov.names)) { idx.missing <- which(!(ov.names %in% ov.names.data)) lav_msg_stop(gettextf( "some (observed) variables specified in the model are not found in the data: %s", lav_msg_view(ov.names[idx.missing], "none"))) } # check if the order is the same if (!identical(ov.names, ov.names.data)) { attr(FLAT, "ovda") <- ov.names.data # nolint } return(FLAT) } lavaan/R/lav_sem_miiv.R0000644000176200001440000000105114627656441014526 0ustar liggesusers# place-holder for MIIV estimation # internal function to be used inside lav_optim_noniter # return 'x', the estimated vector of free parameters lav_sem_miiv_internal <- function(lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL) { # this is the entry-point for MIIV estimation lav_msg_note(gettext("** Estimator MIIV is still under development! **\n")) # return error (for now) x <- as.numeric(NA) class(x) <- "try-error" x } lavaan/R/lav_lavaan_step06_h1.R0000644000176200001440000000315414627656441015757 0ustar liggesuserslav_lavaan_step06_h1 <- function(sloth1 = NULL, lavoptions = NULL, lavsamplestats = NULL, lavdata = NULL, lavpartable = NULL) { # # # # # # # # # # # 6. lavh1 # # # # # # # # # # # # if sloth1 not NULL # copy to lavh1 # else # if lavoptions$h1 TRUE # if length(lavsamplestats$ntotal) > 0 # compute lavh1 via lav_h1_implied_logl # else # check lavoptions$h1 is logical, if not *** error *** if (!is.null(sloth1)) { lavh1 <- sloth1 } else { lavh1 <- list() if (is.logical(lavoptions$h1) && lavoptions$h1) { if (length(lavsamplestats@ntotal) > 0L || (!is.null(lavoptions$samplestats) && !lavoptions$samplestats)) { # lavsamplestats filled in if (lav_verbose()) { cat("lavh1 ... start:\n") } # implied h1 statistics and logl (if available) lavh1 <- lav_h1_implied_logl( lavdata = lavdata, lavsamplestats = lavsamplestats, lavpartable = lavpartable, lavoptions = lavoptions ) if (lav_debug()) { print(lavh1) } if (lav_verbose()) { cat("lavh1 ... done.\n") } } else { # do nothing for now } } else { if (!is.logical(lavoptions$h1)) { lav_msg_stop(gettext("argument `h1' must be logical (for now)")) } # TODO: allow h1 to be either a model syntax, a parameter table, # or a fitted lavaan object } } lavh1 } lavaan/R/lav_fit_measures.R0000644000176200001440000012004214627656441015406 0ustar liggesusers# user-visible function to extract the fit measures # output can be 1) vector (default), 2) list, 3) matrix, or 4) text # in the latter case, the result will be of class "lavaan.fitMeasures" # for which the printing is done by print.lavaan.fitMeasures() # new in 0.6-13: # the big families are computed in dedicated functions: # - lav_fit_rmsea_lavobject # - lav_fit_cfi_lavobject # - lav_fit_aic_lavojbect # - lav_residuals_summary # Note: fitMeasures/fitmeasures are generic functions; they include a "..." # so lavaan.mi can add arguments to pass to lavTestLRT() and # lavTestLRT.mi() about how to pool chi-squared. setMethod( "fitMeasures", signature(object = "lavaan"), function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { # note: the ... is not used by lavaan lav_fit_measures( object = object, fit.measures = fit.measures, baseline.model = baseline.model, h1.model = h1.model, fm.args = fm.args, output = output ) } ) setMethod( "fitmeasures", signature(object = "lavaan"), function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector", ...) { # note: the ... is not used by lavaan lav_fit_measures( object = object, fit.measures = fit.measures, baseline.model = baseline.model, h1.model = h1.model, fm.args = fm.args, output = output ) } ) # S3 method for efaList fitMeasures.efaList <- fitmeasures.efaList <- function( object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "list", ...) { # kill object$loadings if present object[["loadings"]] <- NULL # get fit measures for each model res <- simplify2array(lapply( object, function(x) { lav_fit_measures( object = x, fit.measures = fit.measures, h1.model = h1.model, baseline.model = baseline.model, fm.args = fm.args, output = "vector" ) } )) # check if res is a matrix if (!is.matrix(res)) { if (is.numeric(res)) { # fit.measures is just 1 element, or only one was correct NAME <- names(res)[1] res <- matrix(res, nrow = 1L) rownames(res) <- NAME } else { # wrong fit measures? # create empty matrix res <- matrix(0, nrow = 0L, ncol = length(object)) } } # rownames nfactors <- sapply(object, function(x) x@pta$nfac[[1]]) colnames(res) <- paste0("nfactors = ", nfactors) # class class(res) <- c("lavaan.matrix", "matrix") res } lav_fit_measures <- function(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ), output = "vector") { # default fm.args default.fm.args <- list( standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE ) if (!missing(fm.args)) { fm.args <- modifyList(default.fm.args, fm.args) } else { fm.args <- default.fm.args } # standard test if (fm.args$standard.test == "default") { fm.args$standard.test <- object@Options$scaled.test # usually "standard", but could have been changed # the 'scaled' version will be based on the scaled.test! if (is.null(fm.args$standard.test)) { # 0L && !object@optim$converged) { lav_msg_stop(gettext( "fit measures not available if model did not converge")) } # do we have a test statistic? TEST <- lavInspect(object, "test") test.names <- unname(sapply(TEST, "[[", "test")) if (test.names[1] == "none") { lav_msg_stop(gettext("fit measures not available if test = \"none\".")) #FIXME: allow RMRs, log.likelihoods, info criteria, npar, ntotal } standard.test <- fm.args$standard.test scaled.test <- fm.args$scaled.test # check standard.test standard.test <- lav_test_rename(standard.test, check = TRUE)[1] # only 1 # check scaled.test if (!scaled.test %in% c("none", "default", "standard")) { scaled.test <- lav_test_rename(scaled.test, check = TRUE)[1] # only 1 } # which test statistic do we need? rerun.lavtest.flag <- FALSE if (!standard.test %in% test.names) { rerun.lavtest.flag <- TRUE } if (!scaled.test %in% c("none", "default", "standard") && !scaled.test %in% test.names) { rerun.lavtest.flag <- TRUE } # do we have a scaled test statistic? if so, which one? scaled.flag <- FALSE if (scaled.test != "none" && any(test.names %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted" ))) { scaled.flag <- TRUE if (scaled.test %in% c("standard", "default")) { tmp.idx <- which(test.names %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted" )) scaled.test <- test.names[tmp.idx[1]] } } # rerun lavTest? if (rerun.lavtest.flag) { this.test <- standard.test if (scaled.flag) { this.test <- unique(this.test, scaled.test) } TEST <- lavTest(object, test = this.test, scaled.test = standard.test, drop.list.single = FALSE ) # replace in object, if we pass it to lav_fit_* functions object@test <- TEST test.names <- unname(sapply(TEST, "[[", "test")) } # TDJ: Check for user-supplied h1 model # Similar to BASELINE model, use the following priority: # 1. user-provided h1 model # 2. h1 model in @external slot # 3. default h1 model (already in @h1 slot, no update necessary) user_h1_exists <- FALSE # 1. user-provided h1 model if (!is.null(h1.model)) { stopifnot(inherits(h1.model, "lavaan")) user_h1_exists <- TRUE # 2. h1 model in @external slot } else if (!is.null(object@external$h1.model)) { stopifnot(inherits(object@external$h1.model, "lavaan")) h1.model <- object@external$h1.model user_h1_exists <- TRUE } ## Update statistics in @test slot? if (user_h1_exists) { ## update @test slot FIT <- lav_update_test_custom_h1(lav_obj_h0 = object, lav_obj_h1 = h1.model) ## re-assign TEST object that is used below object@test <- TEST <- FIT@test test.names <- unname(sapply(TEST, "[[", "test")) } # get index of standard.test in TEST test.idx <- which(test.names == standard.test)[1] # get index of scaled test (if any) in TEST if (scaled.flag) { scaled.idx <- which(test.names == scaled.test)[1] } # check output argument if (output %in% c("vector", "horizontal")) { output <- "vector" } else if (output %in% c("list")) { output <- "list" } else if (output %in% c("matrix", "vertical")) { output <- "matrix" } else if (output %in% c("text", "pretty", "summary")) { output <- "text" } else { lav_msg_stop(gettextf("output should be %s.", lav_msg_view(c("vector", "list", "matrix", "text"), "none", FALSE) )) } # options categorical.flag <- object@Model@categorical fiml.flag <- (fm.args$robust && object@Options$missing %in% c("ml", "ml.x")) estimator <- object@Options$estimator # basic ingredients G <- object@Data@ngroups X2 <- TEST[[test.idx]]$stat df <- TEST[[test.idx]]$df if (scaled.flag) { X2.scaled <- TEST[[scaled.idx]]$stat df.scaled <- TEST[[scaled.idx]]$df } npar <- lav_utils_get_npar(lavobject = object) N <- lav_utils_get_ntotal(lavobject = object) # N vs N-1 # define 'sets' of fit measures: fit.always <- c("npar") # basic chi-square test fit.chisq <- c("fmin", "chisq", "df", "pvalue") if (scaled.flag) { fit.chisq <- c( fit.chisq, "chisq.scaled", "df.scaled", "pvalue.scaled", "chisq.scaling.factor" ) } # baseline model fit.baseline <- c("baseline.chisq", "baseline.df", "baseline.pvalue") if (scaled.flag) { fit.baseline <- c( fit.baseline, "baseline.chisq.scaled", "baseline.df.scaled", "baseline.pvalue.scaled", "baseline.chisq.scaling.factor" ) } fit.cfi.tli <- c("cfi", "tli") if (scaled.flag) { fit.cfi.tli <- c(fit.cfi.tli, "cfi.scaled", "tli.scaled") } if (fm.args$robust && (scaled.flag || categorical.flag || fiml.flag)) { fit.cfi.tli <- c(fit.cfi.tli, "cfi.robust", "tli.robust") } # other incremental fit indices fit.cfi.other <- c("nnfi", "rfi", "nfi", "pnfi", "ifi", "rni") if (scaled.flag) { fit.cfi.other <- c( fit.cfi.other, "nnfi.scaled", "rfi.scaled", "nfi.scaled", "pnfi.scaled", "ifi.scaled", "rni.scaled" ) } if (fm.args$robust && (scaled.flag || categorical.flag || fiml.flag)) { fit.cfi.other <- c(fit.cfi.other, "nnfi.robust", "rni.robust") } fit.cfi <- c(fit.baseline, fit.cfi.tli, fit.cfi.other) # likelihood based measures if (estimator == "MML") { fit.logl <- c("logl", "aic", "bic", "ntotal", "bic2") } else { fit.logl <- c( "logl", "unrestricted.logl", "aic", "bic", "ntotal", "bic2" ) } if (scaled.flag && scaled.test %in% c("yuan.bentler", "yuan.bentler.mplus")) { fit.logl <- c(fit.logl, "scaling.factor.h1", "scaling.factor.h0") } # rmsea fit.rmsea <- c( "rmsea", "rmsea.ci.lower", "rmsea.ci.upper", "rmsea.ci.level", "rmsea.pvalue", "rmsea.close.h0", "rmsea.notclose.pvalue", "rmsea.notclose.h0" ) if (scaled.flag) { fit.rmsea <- c( fit.rmsea, "rmsea.scaled", "rmsea.ci.lower.scaled", "rmsea.ci.upper.scaled", "rmsea.pvalue.scaled", "rmsea.notclose.pvalue.scaled" ) } if (fm.args$robust && (scaled.flag || categorical.flag || fiml.flag)) { fit.rmsea <- c( fit.rmsea, "rmsea.robust", "rmsea.ci.lower.robust", "rmsea.ci.upper.robust", "rmsea.pvalue.robust", "rmsea.notclose.pvalue.robust" ) } # srmr if (categorical.flag) { fit.srmr <- c("srmr") fit.srmr2 <- c( "rmr", "rmr_nomean", "srmr", # per default equal to srmr_bentler_nomean "srmr_bentler", "srmr_bentler_nomean", "crmr", "crmr_nomean", "srmr_mplus", "srmr_mplus_nomean" ) } else { if (object@Data@nlevels > 1L) { fit.srmr <- c("srmr", "srmr_within", "srmr_between") fit.srmr2 <- c("srmr", "srmr_within", "srmr_between") } else { fit.srmr <- c("srmr") fit.srmr2 <- c( "rmr", "rmr_nomean", "srmr", # the default "srmr_bentler", "srmr_bentler_nomean", "crmr", "crmr_nomean", "srmr_mplus", "srmr_mplus_nomean" ) } } # various if (object@Data@nlevels > 1L) { fit.other <- "" } else if (estimator == "PML") { fit.other <- c("cn_05", "cn_01", "mfi") if (!categorical.flag) { # really needed? fit.other <- c(fit.other, "ecvi") } } else { fit.other <- c("cn_05", "cn_01", "gfi", "agfi", "pgfi", "mfi") if (!categorical.flag) { # really needed? fit.other <- c(fit.other, "ecvi") } else { fit.other <- c(fit.other, "wrmr") } } # lower case fit.measures <- tolower(fit.measures) # select 'default' fit measures if (length(fit.measures) == 1L) { if (fit.measures == "default") { if (estimator == "ML" || estimator == "PML") { fit.measures <- c( fit.always, fit.chisq, fit.baseline, fit.cfi.tli, fit.logl, fit.rmsea, fit.srmr ) } else if (estimator == "MML") { fit.measures <- c(fit.always, fit.logl) } else { fit.measures <- c( fit.always, fit.chisq, fit.baseline, fit.cfi.tli, fit.rmsea, fit.srmr ) } } else if (fit.measures == "all") { if (estimator == "ML") { fit.measures <- c( fit.always, fit.chisq, fit.baseline, fit.cfi.tli, fit.cfi.other, fit.logl, fit.rmsea, fit.srmr2, fit.other ) } else { fit.measures <- c( fit.always, fit.chisq, fit.baseline, fit.cfi.tli, fit.cfi.other, fit.rmsea, fit.srmr2, fit.other ) } } } # catch empty list if (length(fit.measures) == 0L) { return(list()) } # main container indices <- list() indices["npar"] <- npar indices["ntotal"] <- object@SampleStats@ntotal indices["fmin"] <- object@optim$fx # note = 0.5 * fmin if ML # CHI-SQUARE TEST if (any(fit.chisq %in% fit.measures)) { indices["chisq"] <- X2 indices["df"] <- df indices["pvalue"] <- TEST[[test.idx]]$pvalue if (scaled.flag) { indices["chisq.scaled"] <- X2.scaled indices["df.scaled"] <- df.scaled indices["chisq.scaling.factor"] <- TEST[[scaled.idx]]$scaling.factor indices["pvalue.scaled"] <- TEST[[scaled.idx]]$pvalue } } # BASELINE FAMILY if (any(fit.cfi %in% fit.measures)) { # rerun baseline? if (rerun.lavtest.flag) { object@Options$test <- this.test fit.indep <- try(lav_object_independence(object), silent = TRUE) # override object object@baseline$test <- fit.indep@test } indices <- c( indices, lav_fit_cfi_lavobject( lavobject = object, fit.measures = fit.measures, baseline.model = baseline.model, h1.model = h1.model, standard.test = standard.test, scaled.test = scaled.test, robust = fm.args$robust, cat.check.pd = fm.args$cat.check.pd ) ) } # INFORMATION CRITERIA if (any(fit.logl %in% fit.measures)) { indices <- c( indices, lav_fit_aic_lavobject( lavobject = object, fit.measures = fit.measures, standard.test = standard.test, scaled.test = scaled.test, estimator = estimator ) ) } # RMSEA and friends if (any(fit.rmsea %in% fit.measures)) { # check rmsea options rmsea.ci.level <- 0.90 rmsea.close.h0 <- 0.05 rmsea.notclose.h0 <- 0.08 if (!is.null(fm.args$rmsea.ci.level) && is.finite(fm.args$rmsea.ci.level)) { rmsea.ci.level <- fm.args$rmsea.ci.level if (rmsea.ci.level < 0 || rmsea.ci.level > 1.0) { lav_msg_warn(gettextf( "invalid rmsea.ci.level value [%s] set to default 0.90.", rmsea.ci.level)) rmsea.ci.level <- 0.90 } } if (!is.null(fm.args$rmsea.close.h0) && is.finite(fm.args$rmsea.close.h0)) { rmsea.close.h0 <- fm.args$rmsea.close.h0 if (rmsea.close.h0 < 0) { rmsea.close.h0 <- 0 } } if (!is.null(fm.args$rmsea.notclose.h0) && is.finite(fm.args$rmsea.notclose.h0)) { rmsea.notclose.h0 <- fm.args$rmsea.notclose.h0 if (rmsea.notclose.h0 < 0) { rmsea.notclose.h0 <- 0 } } indices <- c( indices, lav_fit_rmsea_lavobject( lavobject = object, fit.measures = fit.measures, standard.test = standard.test, scaled.test = scaled.test, ci.level = rmsea.ci.level, close.h0 = rmsea.close.h0, notclose.h0 = rmsea.notclose.h0, robust = fm.args$robust, cat.check.pd = fm.args$cat.check.pd ) ) } # SRMR and friends if (any(fit.srmr2 %in% fit.measures)) { indices <- c( indices, lav_fit_srmr_lavobject( lavobject = object, fit.measures = fit.measures ) ) } # GFI and friends fit.gfi <- c("gfi", "agfi", "pgfi") if (any(fit.gfi %in% fit.measures)) { indices <- c( indices, lav_fit_gfi_lavobject( lavobject = object, fit.measures = fit.measures ) ) } # various: Hoelter Critical N (CN) if (any(c("cn_05", "cn_01") %in% fit.measures)) { indices["cn_05"] <- lav_fit_cn(X2 = X2, df = df, N = N, alpha = 0.05) indices["cn_01"] <- lav_fit_cn(X2 = X2, df = df, N = N, alpha = 0.01) } # various: WRMR if ("wrmr" %in% fit.measures) { nel <- length(object@SampleStats@WLS.obs[[1]]) indices["wrmr"] <- lav_fit_wrmr(X2 = X2, nel = nel) } # various: MFI if ("mfi" %in% fit.measures) { indices["mfi"] <- lav_fit_mfi(X2 = X2, df = df, N = N) } # various: ECVI if ("ecvi" %in% fit.measures) { indices["ecvi"] <- lav_fit_ecvi(X2 = X2, npar = npar, N = N) } # keep only what we need out <- indices[fit.measures] if (all(is.na(names(out)))) { # perhaps, fit.measures = "" # nothing left return(numeric(0L)) } # select output type if (output == "list") { # nothing to do } else if (output == "vector") { out <- unlist(out) class(out) <- c("lavaan.vector", "numeric") } else if (output == "matrix") { out <- as.matrix(unlist(out)) colnames(out) <- "" class(out) <- c("lavaan.matrix", "matrix") } else if (output == "text") { out <- unlist(out) class(out) <- c("lavaan.fitMeasures", "lavaan.vector", "numeric") } out } # print a nice summary of the fit measures print.lavaan.fitMeasures <- function(x, ..., nd = 3L, add.h0 = TRUE) { names.x <- names(x) # scaled? scaled.flag <- "chisq.scaled" %in% names.x # num format num.format <- paste("%", max(8L, nd + 5L), ".", nd, "f", sep = "") ## TDJ: optionally add h0 model's fit statistic, for lavaan.mi if (add.h0 && "chisq" %in% names.x) { cat("\nModel Test User Model:\n\n") # container three columns c1 <- c2 <- c3 <- character(0L) # TDJ: Add header used in summary() by lavaan.mi if (scaled.flag) { c1 <- c("", c1) c2 <- c("Standard", c2) c3 <- c("Scaled", c3) } c1 <- c(c1, "Test statistic") c2 <- c(c2, sprintf(num.format, x["chisq"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["chisq.scaled"]), "" )) c1 <- c(c1, "Degrees of freedom") c2 <- c(c2, x["df"]) c3 <- c(c3, ifelse(scaled.flag, ifelse(x["df.scaled"] %% 1 == 0, x["df.scaled"], sprintf(num.format, x["df.scaled"]) ), "" )) c1 <- c(c1, "P-value") c2 <- c(c2, sprintf(num.format, x["pvalue"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["pvalue.scaled"]), "" )) if (scaled.flag && "chisq.scaling.factor" %in% names.x) { c1 <- c(c1, "Average scaling correction factor") c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["chisq.scaling.factor"])) ## check for shift parameter chisq.shift.parameter <- attr(attr(x, "header"), "shift") if (!is.null(chisq.shift.parameter)) { c1 <- c(c1, "Average shift parameter", " simple second-order correction") c2 <- c(c2, "", "") ## This is only provided by the fitMeasures() method for lavaan.mi-class c3 <- c(c3, sprintf(num.format, chisq.shift.parameter), "") } } # check for lavaan.mi attributes "pool.method" and "pool.robust" if (!is.null(attr(x, "pool.method"))) { ## extract information from lavaan.mi object about ## the method used to pool the test statistic pool.method <- attr( x , "pool.method") pool.robust <- attr( x , "pool.robust") standard.test <- attr(attr(x, "header"), "standard.test") scaled.test <- attr(attr(x, "header"), "scaled.test") c1 <- c(c1, "Pooling method") c2 <- c(c2, pool.method) c3 <- c(c3, "") ## (conditionally for D2 method) add other pooling information if (scaled.flag) { c1 <- c(c1, " Pooled statistic") c2 <- c(c2, ifelse(pool.robust, dQuote(scaled.test), dQuote(standard.test))) c3 <- c(c3, "") if (pool.robust && pool.method == "D2") { c1 <- c(c1, paste0(" ", dQuote(scaled.test), " correction applied")) c2 <- c(c2, "BEFORE") c3 <- c(c3, "pooling") } else { c1 <- c(c1, paste0(" ", dQuote(scaled.test), " correction applied")) c2 <- c(c2, "AFTER") c3 <- c(c3, "pooling") } } } # format c1/c2/c3 c1 <- format(c1, width = 35L) c2 <- format(c2, width = 16L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # independence model if ("baseline.chisq" %in% names.x) { cat("\nModel Test Baseline Model:\n\n") c1 <- c2 <- c3 <- character(0L) c1 <- c(c1, "Test statistic") c2 <- c(c2, sprintf(num.format, x["baseline.chisq"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["baseline.chisq.scaled"]), "" )) c1 <- c(c1, "Degrees of freedom") c2 <- c(c2, x["baseline.df"]) c3 <- c(c3, ifelse(scaled.flag, ifelse(x["baseline.df.scaled"] %% 1 == 0, x["baseline.df.scaled"], sprintf(num.format, x["baseline.df.scaled"]) ), "" )) c1 <- c(c1, "P-value") c2 <- c(c2, sprintf(num.format, x["baseline.pvalue"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["baseline.pvalue.scaled"]), "" )) if (scaled.flag && "baseline.chisq.scaling.factor" %in% names.x) { c1 <- c(c1, "Scaling correction factor") c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["baseline.chisq.scaling.factor"])) } # format c1/c2/c3 c1 <- format(c1, width = 35L) c2 <- format(c2, width = 16L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # cfi/tli if (any(c("cfi", "tli", "nnfi", "rfi", "nfi", "ifi", "rni", "pnfi") %in% names.x)) { cat("\nUser Model versus Baseline Model:\n\n") c1 <- c2 <- c3 <- character(0L) if ("cfi" %in% names.x) { c1 <- c(c1, "Comparative Fit Index (CFI)") c2 <- c(c2, sprintf(num.format, x["cfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["cfi.scaled"]), "" )) } if ("tli" %in% names.x) { c1 <- c(c1, "Tucker-Lewis Index (TLI)") c2 <- c(c2, sprintf(num.format, x["tli"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["tli.scaled"]), "" )) } if ("cfi.robust" %in% names.x) { c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") c1 <- c(c1, "Robust Comparative Fit Index (CFI)") if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["cfi.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["cfi.robust"])) c3 <- c(c3, "") } } if ("tli.robust" %in% names.x) { c1 <- c(c1, "Robust Tucker-Lewis Index (TLI)") if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["tli.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["tli.robust"])) c3 <- c(c3, "") } } if ("nnfi" %in% names.x) { c1 <- c(c1, "Bentler-Bonett Non-normed Fit Index (NNFI)") c2 <- c(c2, sprintf(num.format, x["nnfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["nnfi.robust"]), "" )) } if ("nfi" %in% names.x) { c1 <- c(c1, "Bentler-Bonett Normed Fit Index (NFI)") c2 <- c(c2, sprintf(num.format, x["nfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["nfi.scaled"]), "" )) } if ("pnfi" %in% names.x) { c1 <- c(c1, "Parsimony Normed Fit Index (PNFI)") c2 <- c(c2, sprintf(num.format, x["pnfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["pnfi.scaled"]), "" )) } if ("rfi" %in% names.x) { c1 <- c(c1, "Bollen's Relative Fit Index (RFI)") c2 <- c(c2, sprintf(num.format, x["rfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rfi.scaled"]), "" )) } if ("ifi" %in% names.x) { c1 <- c(c1, "Bollen's Incremental Fit Index (IFI)") c2 <- c(c2, sprintf(num.format, x["ifi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["ifi.scaled"]), "" )) } if ("rni" %in% names.x) { c1 <- c(c1, "Relative Noncentrality Index (RNI)") c2 <- c(c2, sprintf(num.format, x["rni"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rni.robust"]), "" )) } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # likelihood if ("logl" %in% names.x) { cat("\nLoglikelihood and Information Criteria:\n\n") c1 <- c2 <- c3 <- character(0L) c1 <- c(c1, "Loglikelihood user model (H0)") c2 <- c(c2, sprintf(num.format, x["logl"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["logl"]), "")) if (!is.na(x["scaling.factor.h0"])) { c1 <- c(c1, "Scaling correction factor") c2 <- c(c2, sprintf(" %10s", "")) c3 <- c(c3, sprintf(num.format, x["scaling.factor.h0"])) c1 <- c(c1, " for the MLR correction") c2 <- c(c2, "") c3 <- c(c3, "") } if ("unrestricted.logl" %in% names.x) { c1 <- c(c1, "Loglikelihood unrestricted model (H1)") c2 <- c(c2, sprintf(num.format, x["unrestricted.logl"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["unrestricted.logl"]), "" )) if (!is.na(x["scaling.factor.h1"])) { c1 <- c(c1, "Scaling correction factor") c2 <- c(c2, sprintf(" %10s", "")) c3 <- c(c3, sprintf(num.format, x["scaling.factor.h1"])) c1 <- c(c1, " for the MLR correction") c2 <- c(c2, "") c3 <- c(c3, "") } } c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") # c1 <- c(c1, "Number of free parameters") # c2 <- c(c2, sprintf(" %10i", x["npar"])) # c3 <- c(c3, ifelse(scaled, sprintf(" %10i", x["npar"]), "")) c1 <- c(c1, "Akaike (AIC)") c2 <- c(c2, sprintf(num.format, x["aic"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["aic"]), "")) c1 <- c(c1, "Bayesian (BIC)") c2 <- c(c2, sprintf(num.format, x["bic"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["bic"]), "")) if (!is.na(x["bic2"])) { c1 <- c(c1, "Sample-size adjusted Bayesian (SABIC)") c2 <- c(c2, sprintf(num.format, x["bic2"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["bic2"]), "")) } # format c1/c2/c3 c1 <- format(c1, width = 39L) c2 <- format(c2, width = 12L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # RMSEA if ("rmsea" %in% names.x) { cat("\nRoot Mean Square Error of Approximation:\n\n") c1 <- c2 <- c3 <- character(0L) c1 <- c(c1, "RMSEA") c2 <- c(c2, sprintf(num.format, x["rmsea"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmsea.scaled"]), "" )) ci.level <- NULL if ("rmsea.ci.level" %in% names.x) { ci.level <- x["rmsea.ci.level"] } if ("rmsea.ci.lower" %in% names.x) { if (is.null(ci.level)) { c1 <- c(c1, "Confidence interval - lower") } else { c1 <- c(c1, paste0( sprintf("%2d", round(ci.level * 100)), " Percent confidence interval - lower" )) } c2 <- c(c2, sprintf(num.format, x["rmsea.ci.lower"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmsea.ci.lower.scaled"]), "" )) if (is.null(ci.level)) { c1 <- c(c1, "Confidence interval - upper") } else { c1 <- c(c1, paste0( sprintf("%2d", round(ci.level * 100)), " Percent confidence interval - upper" )) } c2 <- c(c2, sprintf(num.format, x["rmsea.ci.upper"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmsea.ci.upper.scaled"]), "" )) } rmsea.close.h0 <- NULL if ("rmsea.close.h0" %in% names.x) { rmsea.close.h0 <- x["rmsea.close.h0"] } rmsea.notclose.h0 <- NULL if ("rmsea.notclose.h0" %in% names.x) { rmsea.notclose.h0 <- x["rmsea.notclose.h0"] } if ("rmsea.pvalue" %in% names.x) { if (is.null(rmsea.close.h0)) { c1 <- c(c1, "P-value H_0: RMSEA <= 0.05") } else { c1 <- c(c1, paste0( "P-value H_0: RMSEA <= ", sprintf("%4.3f", rmsea.close.h0) )) } c2 <- c(c2, sprintf(num.format, x["rmsea.pvalue"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmsea.pvalue.scaled"]), "" )) } if ("rmsea.notclose.pvalue" %in% names.x) { if (is.null(rmsea.notclose.h0)) { c1 <- c(c1, "P-value H_0: RMSEA >= 0.080") } else { c1 <- c(c1, paste0( "P-value H_0: RMSEA >= ", sprintf("%4.3f", rmsea.notclose.h0) )) } c2 <- c(c2, sprintf(num.format, x["rmsea.notclose.pvalue"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmsea.notclose.pvalue.scaled"]), "" )) } # robust if ("rmsea.robust" %in% names.x) { c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") c1 <- c(c1, "Robust RMSEA") if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["rmsea.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["rmsea.robust"])) c3 <- c(c3, "") } } if ("rmsea.ci.lower.robust" %in% names.x) { if (is.null(ci.level)) { c1 <- c(c1, "Confidence interval - lower") } else { c1 <- c(c1, paste0( sprintf("%2d", round(ci.level * 100)), " Percent confidence interval - lower" )) } if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["rmsea.ci.lower.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["rmsea.ci.lower.robust"])) c3 <- c(c3, "") } if (is.null(ci.level)) { c1 <- c(c1, "Confidence interval - upper") } else { c1 <- c(c1, paste0( sprintf("%2d", round(ci.level * 100)), " Percent confidence interval - upper" )) } if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["rmsea.ci.upper.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["rmsea.ci.upper.robust"])) c3 <- c(c3, "") } } if ("rmsea.pvalue.robust" %in% names.x) { if (is.null(rmsea.close.h0)) { c1 <- c(c1, "P-value H_0: Robust RMSEA <= 0.05") } else { c1 <- c(c1, paste0( "P-value H_0: Robust RMSEA <= ", sprintf("%4.3f", rmsea.close.h0) )) } if (scaled.flag) { c2 <- c(c2, "") c3 <- c(c3, sprintf(num.format, x["rmsea.pvalue.robust"])) } else { c2 <- c(c2, sprintf(num.format, x["rmsea.pvalue.robust"])) c3 <- c(c3, "") } } if ("rmsea.notclose.pvalue.robust" %in% names.x) { if (is.null(rmsea.notclose.h0)) { c1 <- c(c1, "P-value H_0: Robust RMSEA >= 0.080") } else { c1 <- c(c1, paste0( "P-value H_0: Robust RMSEA >= ", sprintf("%4.3f", rmsea.notclose.h0) )) } if (scaled.flag) { c2 <- c(c2, "") c3 <- c( c3, sprintf(num.format, x["rmsea.notclose.pvalue.robust"]) ) } else { c2 <- c( c2, sprintf(num.format, x["rmsea.notclose.pvalue.robust"]) ) c3 <- c(c3, "") } } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # SRMR #TODO: add CRMR if (any(c("rmr", "srmr") %in% names.x) && !"srmr_within" %in% names.x) { cat("\nStandardized Root Mean Square Residual:\n\n") c1 <- c2 <- c3 <- character(0L) if ("rmr" %in% names.x) { c1 <- c(c1, "RMR") c2 <- c(c2, sprintf(num.format, x["rmr"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmr"]), "")) } if ("rmr_nomean" %in% names.x) { c1 <- c(c1, "RMR (No Mean)") c2 <- c(c2, sprintf(num.format, x["rmr_nomean"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["rmr_nomean"]), "" )) } if ("srmr" %in% names.x) { c1 <- c(c1, "SRMR") c2 <- c(c2, sprintf(num.format, x["srmr"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["srmr"]), "")) } if ("srmr_nomean" %in% names.x) { c1 <- c(c1, "SRMR (No Mean)") c2 <- c(c2, sprintf(num.format, x["srmr_nomean"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["srmr_nomean"]), "" )) } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # SRMR -- multilevel #TODO: add CRMR? if (any(c("srmr_within", "srmr_between") %in% names.x)) { cat("\nStandardized Root Mean Square Residual (corr metric):\n\n") c1 <- c2 <- c3 <- character(0L) if ("srmr_within" %in% names.x) { c1 <- c(c1, "SRMR (within covariance matrix)") c2 <- c(c2, sprintf(num.format, x["srmr_within"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["srmr_within"]), "" )) } if ("srmr_between" %in% names.x) { c1 <- c(c1, "SRMR (between covariance matrix)") c2 <- c(c2, sprintf(num.format, x["srmr_between"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["srmr_between"]), "" )) } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # WRMR if ("wrmr" %in% names.x) { cat("\nWeighted Root Mean Square Residual:\n\n") c1 <- c2 <- c3 <- character(0L) if ("wrmr" %in% names.x) { c1 <- c(c1, "WRMR") c2 <- c(c2, sprintf(num.format, x["wrmr"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["wrmr"]), "")) } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } # Other if (any(c("cn_05", "cn_01", "gfi", "agfi", "pgfi", "mfi") %in% names.x)) { cat("\nOther Fit Indices:\n\n") c1 <- c2 <- c3 <- character(0L) if ("cn_05" %in% names.x) { c1 <- c(c1, "Hoelter Critical N (CN) alpha = 0.05") c2 <- c(c2, sprintf(num.format, x["cn_05"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["cn_05"]), "" )) } if ("cn_01" %in% names.x) { c1 <- c(c1, "Hoelter Critical N (CN) alpha = 0.01") c2 <- c(c2, sprintf(num.format, x["cn_01"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["cn_01"]), "" )) } if (any(c("cn_05", "cn_01") %in% names.x)) { c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") } if ("gfi" %in% names.x) { c1 <- c(c1, "Goodness of Fit Index (GFI)") c2 <- c(c2, sprintf(num.format, x["gfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["gfi"]), "" )) } if ("agfi" %in% names.x) { c1 <- c(c1, "Adjusted Goodness of Fit Index (AGFI)") c2 <- c(c2, sprintf(num.format, x["agfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["agfi"]), "" )) } if ("pgfi" %in% names.x) { c1 <- c(c1, "Parsimony Goodness of Fit Index (PGFI)") c2 <- c(c2, sprintf(num.format, x["pgfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["pgfi"]), "")) } if (any(c("gfi", "agfi", "pgfi") %in% names.x)) { c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") } if ("mfi" %in% names.x) { c1 <- c(c1, "McDonald Fit Index (MFI)") c2 <- c(c2, sprintf(num.format, x["mfi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["mfi"]), "")) } if ("mfi" %in% names.x) { c1 <- c(c1, "") c2 <- c(c2, "") c3 <- c(c3, "") } if ("ecvi" %in% names.x) { c1 <- c(c1, "Expected Cross-Validation Index (ECVI)") c2 <- c(c2, sprintf(num.format, x["ecvi"])) c3 <- c(c3, ifelse(scaled.flag, sprintf(num.format, x["ecvi"]), "")) } # format c1/c2/c3 c1 <- format(c1, width = 43L) c2 <- format(c2, width = 8L + max(0, (nd - 3L)) * 4L, justify = "right") c3 <- format(c3, width = 8L + nd, justify = "right") # create character matrix if (scaled.flag) { M <- cbind(c1, c2, c3, deparse.level = 0) } else { M <- cbind(c1, c2, deparse.level = 0) } colnames(M) <- rep("", ncol(M)) rownames(M) <- rep(" ", nrow(M)) # print write.table(M, row.names = TRUE, col.names = FALSE, quote = FALSE) } invisible(x) } lavaan/R/lav_simulate_old.R0000644000176200001440000003646014627656441015413 0ustar liggesusers# simulate data starting from a user-specified model # # initial version: YR 24 jan 2011 # revision for 0.4-11: YR 21 okt 2011 simulateData <- function( # user-specified model model = NULL, model.type = "sem", # model modifiers meanstructure = FALSE, int.ov.free = TRUE, int.lv.free = FALSE, marker.int.zero = FALSE, conditional.x = FALSE, fixed.x = FALSE, orthogonal = FALSE, std.lv = TRUE, auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, ..., # data properties sample.nobs = 500L, ov.var = NULL, group.label = paste("G", 1:ngroups, sep = ""), skewness = NULL, kurtosis = NULL, # control seed = NULL, empirical = FALSE, return.type = "data.frame", return.fit = FALSE, debug = FALSE, standardized = FALSE) { if (!missing(debug)) { current.debug <- lav_debug() if (lav_debug(debug)) on.exit(lav_debug(current.debug), TRUE) } if (!is.null(seed)) set.seed(seed) # if(!exists(".Random.seed", envir = .GlobalEnv)) # runif(1) # initialize the RNG if necessary # RNGstate <- .Random.seed # lavaanify if (is.list(model)) { # two possibilities: either model is already lavaanified # or it is something else... if (!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$free)) { lav <- model # until 0.6-5, we only used the 'ustart' column # but what if 'lav' is a fitted lavaan object -> use 'est' if (!is.null(lav$est)) { lav$ustart <- lav$est lav$se <- NULL lav$est <- NULL lav$start <- NULL } } else if (is.character(model[[1]])) { lav_msg_stop(gettext("model is a list, but not a parameterTable?")) } } else { lav <- lavaanify( model = model, meanstructure = meanstructure, int.ov.free = int.ov.free, int.lv.free = int.lv.free, marker.int.zero = marker.int.zero, conditional.x = conditional.x, fixed.x = fixed.x, orthogonal = orthogonal, std.lv = std.lv, auto.fix.first = auto.fix.first, auto.fix.single = auto.fix.single, auto.var = auto.var, auto.cov.lv.x = auto.cov.lv.x, auto.cov.y = auto.cov.y, ngroups = length(sample.nobs) ) } group.values <- lav_partable_group_values(lav) if (lav_debug()) { cat("initial lav\n") print(as.data.frame(lav)) } # fill in any remaining NA values (needed for unstandardize) # 1 for variances and (unstandardized) factor loadings, 0 otherwise idx <- which(lav$op == "=~" & is.na(lav$ustart)) if (length(idx) > 0L) { if (standardized) { lav$ustart[idx] <- 0.7 } else { lav$ustart[idx] <- 1.0 } } idx <- which(lav$op == "~~" & is.na(lav$ustart) & lav$lhs == lav$rhs) if (length(idx) > 0L) lav$ustart[idx] <- 1.0 idx <- which(lav$op == "~" & is.na(lav$ustart)) if (length(idx) > 0L) { lav_msg_warn(gettext( "some regression coefficients are unspecified and will be set to zero")) } idx <- which(is.na(lav$ustart)) if (length(idx) > 0L) lav$ustart[idx] <- 0.0 if (lav_debug()) { cat("lav + default values\n") print(as.data.frame(lav)) } # set residual variances to enforce a standardized solution # but only if no *residual* variances have been specified in the syntax if (standardized) { # check if factor loadings are smaller than 1.0 lambda.idx <- which(lav$op == "=~") if (any(lav$ustart[lambda.idx] >= 1.0)) { lav_msg_warn(gettext("standardized=TRUE but factor loadings are >= 1.0")) } # check if regression coefficients are smaller than 1.0 reg.idx <- which(lav$op == "~") if (any(lav$ustart[reg.idx] >= 1.0)) { lav_msg_warn(gettext( "standardized=TRUE but regression coefficients are >= 1.0")) } # for ordered observed variables, we will get '0.0', but that is ok # so there is no need to make a distinction between numeric/ordered # here?? ngroups <- lav_partable_ngroups(lav) ov.names <- vnames(lav, "ov") ov.nox <- vnames(lav, "ov.nox") lv.names <- vnames(lav, "lv") lv.y <- vnames(lav, "lv.y") lv.nox <- vnames(lav, "lv.nox") ov.var.idx <- which(lav$op == "~~" & lav$lhs %in% ov.nox & lav$rhs == lav$lhs) lv.var.idx <- which(lav$op == "~~" & lav$lhs %in% lv.nox & lav$rhs == lav$lhs) if (any(lav$user[c(ov.var.idx, lv.var.idx)] > 0L)) { lav_msg_warn(gettext( "if residual variances are specified, please use standardized=FALSE")) } lav$ustart[c(ov.var.idx, lv.var.idx)] <- 0.0 fit <- lavaan(model = lav, sample.nobs = sample.nobs, ...) Sigma.hat <- computeSigmaHat(lavmodel = fit@Model) ETA <- computeVETA(lavmodel = fit@Model) if (lav_debug()) { cat("Sigma.hat:\n") print(Sigma.hat) cat("Eta:\n") print(ETA) } # stage 1: standardize LV if (length(lv.nox) > 0L) { for (g in 1:ngroups) { var.group <- which(lav$op == "~~" & lav$lhs %in% lv.nox & lav$rhs == lav$lhs & lav$group == group.values[g]) eta.idx <- match(lv.nox, lv.names) lav$ustart[var.group] <- 1 - diag(ETA[[g]])[eta.idx] } } # refit fit <- lavaan(model = lav, sample.nobs = sample.nobs, ...) Sigma.hat <- computeSigmaHat(lavmodel = fit@Model) if (lav_debug()) { cat("after stage 1:\n") cat("Sigma.hat:\n") print(Sigma.hat) } # stage 2: standardize OV for (g in 1:ngroups) { var.group <- which(lav$op == "~~" & lav$lhs %in% ov.nox & lav$rhs == lav$lhs & lav$group == group.values[g]) ov.idx <- match(ov.nox, ov.names) lav$ustart[var.group] <- 1 - diag(Sigma.hat[[g]])[ov.idx] } if (lav_debug()) { cat("after standardisation lav\n") print(as.data.frame(lav)) } } # unstandardize if (!is.null(ov.var)) { # FIXME: if ov.var is named, check the order of the elements # 1. unstandardize observed variables lav$ustart <- lav_unstandardize_ov(partable = lav, ov.var = ov.var) # 2. unstandardized latent variables if (lav_debug()) { cat("after unstandardisation lav\n") print(as.data.frame(lav)) } } # fit the model without data fit <- lavaan(model = lav, sample.nobs = sample.nobs, ...) # the model-implied moments for the population Sigma.hat <- computeSigmaHat(lavmodel = fit@Model) Mu.hat <- computeMuHat(lavmodel = fit@Model) if (fit@Model@categorical) { TH <- computeTH(lavmodel = fit@Model) } if (lav_debug()) { cat("\nModel-implied moments (before Vale-Maurelli):\n") print(Sigma.hat) print(Mu.hat) if (exists("TH")) print(TH) } # ngroups ngroups <- length(sample.nobs) # prepare X <- vector("list", length = ngroups) out <- vector("list", length = ngroups) for (g in 1:ngroups) { COV <- Sigma.hat[[g]] # if empirical = TRUE, rescale by N/(N-1), so that estimator=ML # returns exact results if (empirical) { COV <- COV * sample.nobs[g] / (sample.nobs[g] - 1) } # FIXME: change to rmvnorm once we include the library? if (is.null(skewness) && is.null(kurtosis)) { X[[g]] <- MASS::mvrnorm( n = sample.nobs[g], mu = Mu.hat[[g]], Sigma = COV, empirical = empirical ) } else { # first generate Z Z <- ValeMaurelli1983( n = sample.nobs[g], COR = cov2cor(COV), skewness = skewness, # FIXME: per group? kurtosis = kurtosis ) # rescale # Note: 'scale()' will first center, and then scale # but we need to first scale, and then center... # this was reported by Jordan Brace (9 may 2014) # X[[g]] <- scale(Z, center = -Mu.hat[[g]], # scale = 1/sqrt(diag(COV))) # first, we scale TMP <- scale(Z, center = FALSE, scale = 1 / sqrt(diag(COV)) )[, , drop = FALSE] # then, we center X[[g]] <- sweep(TMP, MARGIN = 2, STATS = Mu.hat[[g]], FUN = "+") } # any categorical variables? ov.ord <- vnames(lav, type = "ov.ord", group = group.values[g]) if (length(ov.ord) > 0L) { ov.names <- vnames(lav, type = "ov", group = group.values[g]) # use thresholds to cut for (o in ov.ord) { o.idx <- which(o == ov.names) th.idx <- which(lav$op == "|" & lav$lhs == o & lav$group == group.values[g]) th.val <- c(-Inf, sort(lav$ustart[th.idx]), +Inf) X[[g]][, o.idx] <- as.integer(cut(X[[g]][, o.idx], th.val)) } } if (return.type == "data.frame") X[[g]] <- as.data.frame(X[[g]]) } if (return.type == "matrix") { if (ngroups == 1L) { return(X[[1L]]) } else { return(X) } } else if (return.type == "data.frame") { Data <- X[[1L]] # if multiple groups, add group column if (ngroups > 1L) { for (g in 2:ngroups) { Data <- rbind(Data, X[[g]]) } Data$group <- rep(1:ngroups, times = sample.nobs) } var.names <- vnames(fit@ParTable, type = "ov", group = 1L) if (ngroups > 1L) var.names <- c(var.names, "group") names(Data) <- var.names if (return.fit) { attr(Data, "fit") <- fit } return(Data) } else if (return.type == "cov") { if (ngroups == 1L) { return(cov(X[[1L]])) } else { cov.list <- lapply(X, cov) return(cov.list) } } } Skewness <- function(x., N1 = TRUE) { x <- x. x <- x[!is.na(x)] N <- length(x) mean.x <- mean(x) xc <- x - mean.x var.x <- var(x) if (!N1) var.x <- var.x * (N - 1) / N sd.x <- sqrt(var.x) sk <- sum(xc * xc * xc) / (sd.x * sd.x * sd.x) skewness <- N * sk / ((N - 1) * (N - 2)) skewness } Kurtosis <- function(x., N1 = TRUE) { x <- x. x <- x[!is.na(x)] N <- length(x) mean.x <- mean(x) xc <- x - mean.x var.x <- var(x) if (!N1) var.x <- var.x * (N - 1) / N k <- sum(xc * xc * xc * xc) / (var.x * var.x) kurtosis <- N * (N + 1) * k / ((N - 1) * (N - 2) * (N - 3)) - 3 * (N - 1) * (N - 1) / ((N - 2) * (N - 3)) kurtosis } # NOTE: as pointed out in Fleishman (1978), a real solution does not # always exist (for a/b/c/d) for all values of skew/kurtosis # # for example: skew = 3, only valid if kurtosis > 14 (approximately) # # fleishman eq 21 suggests: skew^2 < 0.0629576*kurtosis + 0.0717247 # see figure 1 page 527 # # note also that the a/b/c/d solution is not unique, although this seems # not to matter for generating the data # Fleishman (1978) cubic transformation method lav_fleishman1978 <- function(n = 100, skewness = 0, kurtosis = 0) { system.function <- function(x, skewness, kurtosis) { b <- x[1L] c <- x[2L] d <- x[3L] eq1 <- b * b + 6 * b * d + 2 * c * c + 15 * d * d - 1 eq2 <- 2 * c * (b * b + 24 * b * d + 105 * d * d + 2) - skewness eq3 <- 24 * (b * d + c * c * (1 + b * b + 28 * b * d) + d * d * (12 + 48 * b * d + 141 * c * c + 225 * d * d)) - kurtosis eq <- c(eq1, eq2, eq3) sum(eq * eq) ## SS } out <- nlminb( start = c(1, 0, 0), objective = system.function, scale = 10, control = list(trace = ifelse(lav_debug(), 1, 0), rel.tol = 1e-10), skewness = skewness, kurtosis = kurtosis ) if (out$convergence != 0 || out$objective > 1e-5) lav_msg_warn(gettext("no convergence")) b <- out$par[1L] c <- out$par[2L] d <- out$par[3L] a <- -c Z <- rnorm(n = n) Y <- a + b * Z + c * Z * Z + d * Z * Z * Z Y } ValeMaurelli1983 <- function(n = 100L, COR, skewness, kurtosis) { fleishman1978_abcd <- function(skewness, kurtosis) { system.function <- function(x, skewness, kurtosis) { b. <- x[1L] c. <- x[2L] d. <- x[3L] eq1 <- b. * b. + 6 * b. * d. + 2 * c. * c. + 15 * d. * d. - 1 eq2 <- 2 * c. * (b. * b. + 24 * b. * d. + 105 * d. * d. + 2) - skewness eq3 <- 24 * (b. * d. + c. * c. * (1 + b. * b. + 28 * b. * d.) + d. * d. * (12 + 48 * b. * d. + 141 * c. * c. + 225 * d. * d.)) - kurtosis eq <- c(eq1, eq2, eq3) sum(eq * eq) ## SS } out <- nlminb( start = c(1, 0, 0), objective = system.function, scale = 10, control = list(trace = 0), skewness = skewness, kurtosis = kurtosis ) if (out$convergence != 0 || out$objective > 1e-5) { lav_msg_warn(gettext("ValeMaurelli1983 method did not convergence, or it did not find the roots")) } b. <- out$par[1L] c. <- out$par[2L] d. <- out$par[3L] a. <- -c. c(a., b., c., d.) } getICOV <- function(b1, c1, d1, b2, c2, d2, R) { objectiveFunction <- function(x, b1, c1, d1, b2, c2, d2, R) { rho <- x[1L] eq <- rho * (b1 * b2 + 3 * b1 * d2 + 3 * d1 * b2 + 9 * d1 * d2) + rho * rho * (2 * c1 * c2) + rho * rho * rho * (6 * d1 * d2) - R eq * eq } # gradientFunction <- function(x, bcd1, bcd2, R) { # # } out <- nlminb( start = R, objective = objectiveFunction, scale = 10, control = list(trace = 0), b1 = b1, c1 = c1, d1 = d1, b2 = b2, c2 = c2, d2 = d2, R = R ) if (out$convergence != 0 || out$objective > 1e-5) lav_msg_warn(gettext("no convergence")) rho <- out$par[1L] rho } # number of variables nvar <- ncol(COR) # check skewness if (is.null(skewness)) { SK <- rep(0, nvar) } else if (length(skewness) == nvar) { SK <- skewness } else if (length(skewness) == 1L) { SK <- rep(skewness, nvar) } else { lav_msg_stop(gettext("skewness has wrong length")) } if (is.null(kurtosis)) { KU <- rep(0, nvar) } else if (length(kurtosis) == nvar) { KU <- kurtosis } else if (length(kurtosis) == 1L) { KU <- rep(kurtosis, nvar) } else { lav_msg_stop(gettext("kurtosis has wrong length")) } # create Fleishman table FTable <- matrix(0, nvar, 4L) for (i in 1:nvar) { FTable[i, ] <- fleishman1978_abcd(skewness = SK[i], kurtosis = KU[i]) } # compute intermediate correlations between all pairs ICOR <- diag(nvar) for (j in 1:(nvar - 1L)) { for (i in (j + 1):nvar) { if (COR[i, j] == 0) next ICOR[i, j] <- ICOR[j, i] <- getICOV(FTable[i, 2], FTable[i, 3], FTable[i, 4], FTable[j, 2], FTable[j, 3], FTable[j, 4], R = COR[i, j] ) } } if (lav_debug()) { cat("\nOriginal correlations (for Vale-Maurelli):\n") print(COR) cat("\nIntermediate correlations (for Vale-Maurelli):\n") print(ICOR) cat("\nEigen values ICOR:\n") print(eigen(ICOR)$values) } # generate Z ## FIXME: replace by rmvnorm once we use that package X <- Z <- MASS::mvrnorm(n = n, mu = rep(0, nvar), Sigma = ICOR) # transform Z using Fleishman constants for (i in 1:nvar) { X[, i] <- FTable[i, 1L] + FTable[i, 2L] * Z[, i] + FTable[i, 3L] * Z[, i] * Z[, i] + FTable[i, 4L] * Z[, i] * Z[, i] * Z[, i] } X } lavaan/R/lav_mvnorm_missing_h1.R0000644000176200001440000001600414627656441016361 0ustar liggesusers# the Multivariate normal distribution, unrestricted (h1), missing values # 1) loglikelihood --> same as h0 but where Mu and Sigma are unrestricted # 2) 3) 4) 5) --> (idem) # YR 26 Mar 2016: first version # YR 20 Jan 2017: added _h1_omega_sw() # here, we estimate Mu and Sigma from Y with missing values, assuming normality # this is a rewrite of the 'estimate.moments.EM' function in <= 0.5-22 lav_mvnorm_missing_h1_estimate_moments <- function(Y = NULL, Mp = NULL, Yp = NULL, wt = NULL, Sinv.method = "eigen", max.iter = 500L, tol = 1e-05) { # check input Y <- as.matrix(Y) P <- NCOL(Y) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } if (is.null(Yp)) { Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp, wt = wt) } if (is.null(max.iter)) { max.iter <- 500L } if (is.null(tol)) { tol <- 1e-05 } # remove empty cases N.full <- N if (length(Mp$empty.idx) > 0L) { if (!is.null(wt)) { N <- N - sum(wt[Mp$empty.idx]) } else { N <- N - length(Mp$empty.idx) } } # verbose? if (lav_verbose()) { cat("\n") cat("lav_mvnorm_missing_h1_estimate_moments: start EM steps\n") } # starting values; zero covariances to guarantee a pd matrix if (!is.null(wt)) { tmp <- na.omit(cbind(wt, Y)) if (nrow(tmp) > 2L) { Y.tmp <- tmp[, -1, drop = FALSE] wt.tmp <- tmp[, 1] out <- stats::cov.wt(Y.tmp, wt = wt.tmp, method = "ML") Mu0 <- out$center var0 <- diag(out$cov) } else { Mu0 <- base::.colMeans(Y, m = N.full, n = P, na.rm = TRUE) Yc <- t(t(Y) - Mu0) var0 <- base::.colMeans(Yc * Yc, m = N.full, n = P, na.rm = TRUE) } } else { Mu0 <- base::.colMeans(Y, m = N.full, n = P, na.rm = TRUE) Yc <- t(t(Y) - Mu0) var0 <- base::.colMeans(Yc * Yc, m = N.full, n = P, na.rm = TRUE) } # sanity check bad.idx <- which(!is.finite(var0) | var0 == 0) if (length(bad.idx) > 0L) { var0[bad.idx] <- 1 } bad.idx <- which(!is.finite(Mu0)) if (length(bad.idx) > 0L) { Mu0[bad.idx] <- 0 } Sigma0 <- diag(x = var0, nrow = P) Mu <- Mu0 Sigma <- Sigma0 # report if (lav_verbose()) { # fx0 <- estimator.FIML(Sigma.hat=Sigma, Mu.hat=Mu, M=Yp) fx0 <- lav_mvnorm_missing_loglik_samplestats( Yp = Yp, Mu = Mu, Sigma = Sigma, log2pi = FALSE, minus.two = TRUE ) / N cat( " EM iteration:", sprintf("%4d", 0), " fx = ", sprintf("%15.10f", fx0), "\n" ) } # EM steps for (i in 1:max.iter) { # E-step Estep <- lav_mvnorm_missing_estep( Y = Y, Mp = Mp, wt = wt, Mu = Mu, Sigma = Sigma, Sinv.method = Sinv.method ) T1 <- Estep$T1 T2 <- Estep$T2 # M-step Mu <- T1 / N Sigma <- T2 / N - tcrossprod(Mu) # check if Sigma is near-pd (+ poor fix) ev <- eigen(Sigma, symmetric = TRUE, only.values = TRUE) evtol <- 1e-6 # FIXME! if (any(ev$values < evtol)) { # too.small <- which( ev$values < tol ) # ev$values[too.small] <- tol # ev$values <- ev$values + tol # Sigma <- ev$vectors %*% diag(ev$values) %*% t(ev$vectors) # ridge diag(Sigma) <- diag(Sigma) + max(diag(Sigma)) * 1e-08 } # max absolute difference in parameter values DELTA <- max(abs(c(Mu, lav_matrix_vech(Sigma)) - c(Mu0, lav_matrix_vech(Sigma0)))) # report fx if (lav_verbose()) { # fx <- estimator.FIML(Sigma.hat=Sigma, Mu.hat=Mu, M=Yp) fx <- lav_mvnorm_missing_loglik_samplestats( Yp = Yp, Mu = Mu, Sigma = Sigma, log2pi = FALSE, minus.two = TRUE ) / N cat( " EM iteration:", sprintf("%4d", i), " fx = ", sprintf("%15.10f", fx), " delta par = ", sprintf("%9.8f", DELTA), "\n" ) } # convergence check: using parameter values: if (DELTA < tol) { break } # again Mu0 <- Mu Sigma0 <- Sigma } # EM iterations if (lav_verbose()) { cat("\nSigma:\n") print(Sigma) cat("\nMu:\n") print(Mu) cat("\n") } # compute fx if we haven't already if (!lav_verbose()) { # fx <- estimator.FIML(Sigma.hat = Sigma, Mu.hat = Mu, M = Yp) fx <- lav_mvnorm_missing_loglik_samplestats( Yp = Yp, Mu = Mu, Sigma = Sigma, log2pi = FALSE, minus.two = TRUE ) / N } # warning? if (i == max.iter) { lav_msg_warn( gettext("Maximum number of iterations reached when computing the sample moments using EM; use the em.h1.iter.max= argument to increase the number of iterations") ) } ev <- eigen(Sigma, symmetric = TRUE, only.values = TRUE)$values if (any(ev < 1e-05)) { # make an option? lav_msg_warn( gettext("The smallest eigenvalue of the EM estimated variance-covariance matrix (Sigma) is smaller than 1e-05; this may cause numerical instabilities; interpret the results with caution.") ) } list(Sigma = Sigma, Mu = Mu, fx = fx) } # compute N times ACOV(Mu, vech(Sigma)) # in the literature: - `Omega_{SW}' # - `Gamma for incomplete data' # - (N times the) sandwich estimator for acov(mu,vech(Sigma)) lav_mvnorm_missing_h1_omega_sw <- function(Y = NULL, Mp = NULL, wt = NULL, cluster.idx = NULL, Yp = NULL, Sinv.method = "eigen", Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, information = "observed") { # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # sample stats per pattern if (is.null(Yp) && (information == "observed" || is.null(Sigma))) { Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp, wt = wt) } # Sigma and Mu if (is.null(Sigma) || is.null(Mu)) { out <- lav_mvnorm_missing_h1_estimate_moments(Y = Y, Mp = Mp, Yp = Yp) Mu <- out$Mu Sigma <- out$Sigma } # information matrices info <- lav_mvnorm_missing_information_both( Y = Y, Mp = Mp, Mu = Mu, wt = wt, cluster.idx = cluster.idx, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv, information = information ) A <- info$Abeta A.inv <- lav_matrix_symmetric_inverse( S = A, logdet = FALSE, Sinv.method = Sinv.method ) B <- info$Bbeta # sandwich SW <- A.inv %*% B %*% A.inv SW } lavaan/R/xxx_efa.R0000644000176200001440000001007414627656441013523 0ustar liggesusers# EFA: exploratory factor analysis # # EFA is implemented as a special version of ESEM # - it is therefore a wrapper around the lavaan() function to simplify # the input # - a lavaan model is generated with a single 'block' that can be rotated # - the 'default' output produces output that is more in line with traditional # EFA software (in R) like factanal() and fa() from the psych package # YR 20 Sept 2022 - first version efa <- function(data = NULL, nfactors = 1L, sample.cov = NULL, sample.nobs = NULL, rotation = "geomin", rotation.args = list(), ov.names = names(data), bounds = "pos.var", ..., output = "efa") { # handle dotdotdot dotdotdot <- list(...) # twolevel? twolevel.flag <- !is.null(dotdotdot$cluster) # check for unallowed arguments if (!is.null(dotdotdot$group)) { lav_msg_stop(gettext("efa has no support for multiple groups (for now)")) } # handle ov.names if (!is.null(data) && inherits(data, "lavMoments")) { if ("sample.cov" %in% names(data)) { ov.names <- rownames(data$sample.cov) if (is.null(ov.names)) { ov.names <- colnames(data$sample.cov) } } else { lav_msg_stop(gettext( "When data= is of class lavMoments, it must contain sample.cov")) } } else if (!is.null(data) && inherits(data, "data.frame")) { if (length(ov.names) > 0L) { if (twolevel.flag) { data <- data[, c(ov.names, dotdotdot$cluster)] } else { data <- data[, ov.names, drop = FALSE] } } else { ov.names <- names(data) } } else if (!is.null(sample.cov)) { ov.names <- rownames(sample.cov) if (is.null(ov.names)) { ov.names <- colnames(sample.cov) } } # ov.names? if (length(ov.names) == 0L) { lav_msg_stop(gettext( "could not extract variable names from data or sample.cov")) } # check nfactors if (any(nfactors < 1L)) { lav_msg_stop(gettext("nfactors must be greater than zero.")) } else { # check for maximum number of factors # Fixme: can we do this more efficiently? also holds for categorical? nvar <- length(ov.names) p.star <- nvar * (nvar + 1) / 2 nfac.max <- 0L for (nfac in seq_len(nvar)) { # compute number of free parameters npar <- nfac * nvar + nfac * (nfac + 1L) / 2 + nvar - nfac^2 if (npar > p.star) { nfac.max <- nfac - 1L break } } if (any(nfactors > nfac.max)) { lav_msg_stop(gettextf("when nvar = %1$s the maximum number of factors is %2$s", nvar, nfac.max)) } } # output output <- tolower(output) if (!output %in% c("lavaan", "efa")) { lav_msg_stop(gettext("output= must be either \"lavaan\" or \"efa\"")) } if (output == "lavaan" && length(nfactors) > 1L) { lav_msg_stop(gettext("when output = \"lavaan\", nfactors must be a single (integer) number.")) } # fit models nfits <- length(nfactors) out <- vector("list", length = nfits) for (f in seq_len(nfits)) { # generate model syntax model.syntax <- lav_syntax_efa( ov.names = ov.names, nfactors = nfactors[f], twolevel = twolevel.flag ) # call lavaan (using sem()) FIT <- do.call("sem", args = c( list( model = model.syntax, data = data, sample.cov = sample.cov, sample.nobs = sample.nobs, rotation = rotation, rotation.args = rotation.args, bounds = bounds ), dotdotdot ) ) if (output == "efa") { FIT@Options$model.type <- "efa" } out[[f]] <- FIT } # class if (nfits == 1L && output == "lavaan") { out <- out[[1]] } else { names(out) <- paste0("nf", nfactors) # add loadings element to the end of the list # so we an use the non-generic but useful loadings() function # from the stats package out$loadings <- lav_efa_get_loadings(out) class(out) <- c("efaList", "list") } out } lavaan/R/lav_utils.R0000644000176200001440000005113314627656441014064 0ustar liggesusers# utility functions # # initial version: YR 25/03/2009 # get 'test' # make sure we return a single element lav_utils_get_test <- function(lavobject) { test <- lavobject@Options$test # 0.6.5: for now, we make sure that 'test' is a single element if (length(test) > 1L) { standard.idx <- which(test == "standard") if (length(standard.idx) > 0L) { test <- test[-standard.idx] } if (length(test) > 1L) { # only retain the first one test <- test[1] } } test } # check if we use a robust/scaled test statistic lav_utils_get_scaled <- function(lavobject) { test.names <- unname(sapply(lavobject@test, "[[", "test")) scaled <- FALSE if (any(test.names %in% c( "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus", "mean.var.adjusted", "scaled.shifted" ))) { scaled <- TRUE } scaled } # check for marker indicators: # - if std.lv = FALSE: a single '1' per factor, everything else zero # - if std.lv = TRUE: a single non-zero value per factor, everything else zero lav_utils_get_marker <- function(LAMBDA = NULL, std.lv = FALSE) { LAMBDA <- as.matrix(LAMBDA) nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) # round values LAMBDA <- round(LAMBDA, 3L) marker.idx <- numeric(nfac) for (f in seq_len(nfac)) { if (std.lv) { marker.idx[f] <- which(rowSums(cbind( LAMBDA[, f] != 0, LAMBDA[, -f] == 0 )) == nfac)[1] } else { marker.idx[f] <- which(rowSums(cbind( LAMBDA[, f] == 1, LAMBDA[, -f] == 0 )) == nfac)[1] } } marker.idx } # get npar (taking into account explicit equality constraints) # (changed in 0.5-13) lav_utils_get_npar <- function(lavobject) { npar <- lav_partable_npar(lavobject@ParTable) if (nrow(lavobject@Model@con.jac) > 0L) { ceq.idx <- attr(lavobject@Model@con.jac, "ceq.idx") if (length(ceq.idx) > 0L) { neq <- qr(lavobject@Model@con.jac[ceq.idx, , drop = FALSE])$rank npar <- npar - neq } } else if (.hasSlot(lavobject@Model, "ceq.simple.only") && lavobject@Model@ceq.simple.only) { npar <- lavobject@Model@nx.free } npar } # N versus N-1 (or N versus N-G in the multiple group setting) # Changed 0.5-15: suggestion by Mark Seeto lav_utils_get_ntotal <- function(lavobject) { if (lavobject@Options$estimator %in% c("ML", "PML", "FML", "catML") && lavobject@Options$likelihood %in% c("default", "normal")) { N <- lavobject@SampleStats@ntotal } else { N <- lavobject@SampleStats@ntotal - lavobject@SampleStats@ngroups } N } # compute log(sum(exp(x))) avoiding under/overflow # using the identity: log(sum(exp(x)) = a + log(sum(exp(x - a))) lav_utils_logsumexp <- function(x) { a <- max(x) a + log(sum(exp(x - a))) } # mdist = Mahalanobis distance lav_mdist <- function(Y, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, Sinv.method = "eigen", ginv = TRUE, rescale = FALSE) { # check input Y <- as.matrix(Y) P <- NCOL(Y) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) # missing data? missing.flag <- anyNA(Y) # missing patterns? if (missing.flag && is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # no Mu? compute sample mean if (is.null(Mu)) { Mu <- colMeans(Y, na.rm = TRUE) } # no Sigma? if (is.null(Sigma)) { if (missing.flag) { out <- lav_mvnorm_missing_h1_estimate_moments( Y = Y, Mp = Mp, wt = wt ) Mu <- out$Mu Sigma <- out$Sigma } else { if (!is.null(wt)) { out <- stats::cov.wt(Y, wt = wt, method = "ML") Sigma <- out$cov Mu <- out$center } else { Sigma <- stats::cov(Y, use = "pairwise") # rescale? if (rescale) { Sigma <- ((N - 1) / N) * Sigma } } } } # subtract Mu Yc <- t(t(Y) - Mu) # DIST per case DIST <- rep(as.numeric(NA), NY) # invert Sigma if (ginv) { Sigma.inv <- MASS::ginv(Sigma) } else { Sigma.inv <- try( lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ), silent = TRUE ) if (inherits(Sigma.inv, "try-error")) { lav_msg_warn(gettext( "problem computing distances: could not invert Sigma")) return(DIST) } } # complete data? if (!missing.flag) { # center factor scores Y.c <- t(t(Y) - Mu) # Mahalobis distance DIST <- rowSums((Y.c %*% Sigma.inv) * Y.c) # missing data? } else { # for each pattern, compute sigma.inv; compute DIST for all # observations of this pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # identify cases with this pattern case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { if (ginv) { sigma.inv <- MASS::ginv(Sigma[-na.idx, -na.idx, drop = FALSE]) } else { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } } else { sigma.inv <- Sigma.inv } if (Mp$freq[p] == 1L) { DIST[case.idx] <- sum(sigma.inv * crossprod(Yc[case.idx, var.idx, drop = FALSE])) } else { DIST[case.idx] <- rowSums(Yc[case.idx, var.idx, drop = FALSE] %*% sigma.inv * Yc[case.idx, var.idx, drop = FALSE]) } } # patterns } # missing data # use weights? (no for now) # DIST <- DIST * wt DIST } # create matrix with indices to reconstruct the bootstrap samples # per group # (originally needed for BCa confidence intervals) # # rows are the (R) bootstrap runs # columns are the (N) observations # # simple version: no strata, no weights # lav_utils_bootstrap_indices <- function(R = 0L, nobs = list(0L), # per group parallel = "no", ncpus = 1L, cl = NULL, iseed = NULL, merge.groups = FALSE, return.freq = FALSE) { # iseed must be set! stopifnot(!is.null(iseed)) if (return.freq && !merge.groups) { lav_msg_stop(gettext("return.freq only available if merge.groups = TRUE")) } if (is.integer(nobs)) { nobs <- list(nobs) } # number of groups ngroups <- length(nobs) # mimic 'random' sampling from lav_bootstrap_internal: # the next 7 lines are borrowed from the boot package have_mc <- have_snow <- FALSE parallel <- parallel[1] if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { have_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") have_snow <- TRUE if (!have_mc && !have_snow) ncpus <- 1L loadNamespace("parallel") # before recording seed! } temp.seed <- NULL if (exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { temp.seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) } if (!(ncpus > 1L && (have_mc || have_snow))) { # Only for serial set.seed(iseed) } # fn() returns indices per group fn <- function(b) { BOOT.idx <- vector("list", length = ngroups) OFFSet <- cumsum(c(0, unlist(nobs))) for (g in 1:ngroups) { stopifnot(nobs[[g]] > 1L) boot.idx <- sample.int(nobs[[g]], replace = TRUE) if (merge.groups) { BOOT.idx[[g]] <- boot.idx + OFFSet[g] } else { BOOT.idx[[g]] <- boot.idx } } BOOT.idx } RR <- R res <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { RNGkind_old <- RNGkind() # store current kind RNGkind("L'Ecuyer-CMRG") # to allow for reproducible results set.seed(iseed) parallel::mclapply(seq_len(RR), fn, mc.cores = ncpus) } else if (have_snow) { # list(...) # evaluate any promises if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) parallel::clusterSetRNGStream(cl, iseed = iseed) res <- parallel::parLapply(cl, seq_len(RR), fn) parallel::stopCluster(cl) res } else { parallel::parLapply(cl, seq_len(RR), fn) } } } else { lapply(seq_len(RR), fn) } # restore old RNGkind() if (ncpus > 1L && have_mc) { RNGkind(RNGkind_old[1], RNGkind_old[2], RNGkind_old[3]) } # handle temp.seed if (!is.null(temp.seed) && !identical(temp.seed, NA)) { assign(".Random.seed", temp.seed, envir = .GlobalEnv) } else if (is.null(temp.seed) && !(ncpus > 1L && (have_mc || have_snow))) { # serial rm(.Random.seed, pos = 1) } else if (is.null(temp.seed) && (ncpus > 1L && have_mc)) { # parallel/multicore only rm(.Random.seed, pos = 1) # because set used set.seed() } # assemble IDX BOOT.idx <- vector("list", length = ngroups) for (g in 1:ngroups) { # FIXME: handle failed runs BOOT.idx[[g]] <- do.call("rbind", lapply(res, "[[", g)) } # merge groups if (merge.groups) { out <- do.call("cbind", BOOT.idx) } else { out <- BOOT.idx } # NOTE: the order of the indices is different from the boot package! # we fill in the matrix 'row-wise' (1 row = sample(N, replace = TRUE)), # while boot fills in the matrix 'column-wise' # this also explains why we get different results with return.boot = TRUE # despite using the same iseed # return frequencies instead? if (return.freq && merge.groups) { out <- t(apply(out, 1L, tabulate, ncol(out))) } out } # invert positive definite symmetric matrix (eg cov matrix) # using choleski decomposition # return log determinant as an attribute inv.chol <- function(S, logdet = FALSE) { cS <- chol(S) # if( inherits(cS, "try-error") ) { # print(S) # warning("lavaan WARNING: symmetric matrix is not positive symmetric!") # } S.inv <- chol2inv(cS) if (logdet) { diag.cS <- diag(cS) attr(S.inv, "logdet") <- sum(log(diag.cS * diag.cS)) } S.inv } # convert correlation matrix + standard deviations to covariance matrix # based on cov2cor in package:stats cor2cov <- function(R, sds, names = NULL) { p <- (d <- dim(R))[1L] if (!is.numeric(R) || length(d) != 2L || p != d[2L]) { lav_msg_stop(gettext("'V' is not a square numeric matrix")) } if (any(!is.finite(sds))) { lav_msg_warn(gettext( "sds had 0 or NA entries; non-finite result is doubtful")) } # if(sum(diag(R)) != p) # stop("The diagonal of a correlation matrix should be all ones.") if (p != length(sds)) { lav_msg_stop(gettext("The standard deviation vector and correlation matrix have a different number of variables")) } S <- R S[] <- sds * R * rep(sds, each = p) # optionally, add names if (!is.null(names)) { stopifnot(length(names) == p) rownames(S) <- colnames(S) <- names } S } # convert characters within single quotes to numeric vector # eg. s <- '3 4.3 8e-3 2.0' # x <- char2num(s) char2num <- function(s = "") { # first, strip all ',' or ';' s. <- gsub(",", " ", s) s. <- gsub(";", " ", s.) tc <- textConnection(s.) x <- scan(tc, quiet = TRUE) close(tc) x } # create full matrix based on lower.tri or upper.tri elements; add names # always ROW-WISE!! getCov <- function(x, lower = TRUE, diagonal = TRUE, sds = NULL, names = paste("V", 1:nvar, sep = "")) { # check x and sds if (is.character(x)) x <- char2num(x) if (is.character(sds)) sds <- char2num(sds) nels <- length(x) if (lower) { COV <- lav_matrix_lower2full(x, diagonal = diagonal) } else { COV <- lav_matrix_upper2full(x, diagonal = diagonal) } nvar <- ncol(COV) # if diagonal is false, assume unit diagonal if (!diagonal) diag(COV) <- 1 # check if we have a sds argument if (!is.null(sds)) { stopifnot(length(sds) == nvar) COV <- cor2cov(COV, sds) } # names stopifnot(length(names) == nvar) rownames(COV) <- colnames(COV) <- names COV } # translate row+col matrix indices to vec idx rowcol2vec <- function(row.idx, col.idx, nrow, symmetric = FALSE) { idx <- row.idx + (col.idx - 1) * nrow if (symmetric) { idx2 <- col.idx + (row.idx - 1) * nrow idx <- unique(sort(c(idx, idx2))) } idx } # dummy function to 'pretty' print a vector with fixed width pprint.vector <- function(x, digits.after.period = 3, ncols = NULL, max.col.width = 11, newline = TRUE) { n <- length(x) var.names <- names(x) total.width <- getOption("width") max.width <- max(nchar(var.names)) if (max.width < max.col.width) { # shrink max.col.width <- max(max.width, digits.after.period + 2) } # automatic number of columns if (is.null(ncols)) { ncols <- floor((total.width - 2) / (max.col.width + 2)) } nrows <- ceiling(n / ncols) if (digits.after.period >= (max.col.width - 3)) { max.col.width <- digits.after.period + 3 } string.format <- paste(" %", max.col.width, "s", sep = "") number.format <- paste(" %", max.col.width, ".", digits.after.period, "f", sep = "") for (nr in 1:nrows) { rest <- min(ncols, n) if (newline) cat("\n") # labels for (nc in 1:rest) { vname <- substr(var.names[(nr - 1) * ncols + nc], 1, max.col.width) cat(sprintf(string.format, vname)) } cat("\n") for (nc in 1:rest) { cat(sprintf(number.format, x[(nr - 1) * ncols + nc])) } cat("\n") n <- n - ncols } if (newline) cat("\n") } # print only lower half of symmetric matrix pprint.matrix.symm <- function(x, digits.after.period = 3, ncols = NULL, max.col.width = 11, newline = TRUE) { n <- ncol <- ncol(x) nrow <- nrow(x) stopifnot(ncol == nrow) var.names <- rownames(x) total.width <- getOption("width") max.width <- max(nchar(var.names)) if (max.width < max.col.width) { # shrink max.col.width <- max(max.width, digits.after.period + 2) } # automatic number of columns if (is.null(ncols)) { ncols <- floor((total.width - 2) / (max.col.width + 2)) } nblocks <- ceiling(n / ncols) if (digits.after.period >= (max.col.width - 3)) { max.col.width <- digits.after.period + 3 } fc.format <- paste(" %", min(max.width, max.col.width), "s", sep = "") string.format <- paste(" %", max.col.width, "s", sep = "") number.format <- paste(" %", max.col.width, ".", digits.after.period, "f", sep = "") for (nb in 1:nblocks) { rest <- min(ncols, n) if (newline) cat("\n") # empty column cat(sprintf(fc.format, "")) # labels for (nc in 1:rest) { vname <- substr(var.names[(nb - 1) * ncols + nc], 1, max.col.width) cat(sprintf(string.format, vname)) } cat("\n") row.start <- (nb - 1) * ncols + 1 for (nr in row.start:nrow) { # label vname <- substr(var.names[nr], 1, max.col.width) cat(sprintf(fc.format, vname)) col.rest <- min(rest, (nr - row.start + 1)) for (nc in 1:col.rest) { value <- x[nr, (nb - 1) * ncols + nc] cat(sprintf(number.format, value)) } cat("\n") } n <- n - ncols } if (newline) cat("\n") } # elimination of rows/cols symmetric matrix eliminate.rowcols <- function(x, el.idx = integer(0)) { if (length(el.idx) == 0) { return(x) } stopifnot(ncol(x) == nrow(x)) stopifnot(min(el.idx) > 0 && max(el.idx) <= ncol(x)) x[-el.idx, -el.idx] } # locate pstar idx of a given set of elements # # for example, if nvar = 4, pstar = 10 elements # which elements correponds to the second and third element (in 1:nvar)? # # deprecated! replaced by lav_matrix_vech_which_idx() # eliminate.pstar.idx2 <- function(nvar = 1L, el.idx = integer(0), meanstructure = FALSE, correlation = FALSE, return.idx = FALSE) { if (length(el.idx) > 0) { stopifnot(min(el.idx) > 0 && max(el.idx) <= nvar) } # create col/row indices XX <- rbind( lav_matrix_vech_col_idx(nvar), lav_matrix_vech_row_idx(nvar) ) # if correlation matrix, remove col/row corresponding to the variances if (correlation && nvar > 1L) { var.idx <- lav_matrix_diagh_idx(n = nvar) XX <- XX[, -var.idx, drop = FALSE] } # locate pstar indices (as logicals) idx <- XX[1, ] %in% el.idx & XX[2, ] %in% el.idx # if meanstructure, add location in mean vector if (meanstructure) { idx <- c((1:nvar %in% el.idx), idx) } # return indices instead of logicals if (return.idx) { idx <- which(idx) } idx } # elimination of rows/cols pstar symmetric matrix # # type = "all" -> only remove var(el.idx) and cov(el.idx) # type = "any" -> remove all rows/cols of el.idx eliminate.pstar.idx <- function(nvar = 1, el.idx = integer(0), meanstructure = FALSE, type = "all") { if (length(el.idx) > 0) { stopifnot(min(el.idx) > 0 && max(el.idx) <= nvar) } XX <- utils::combn(1:(nvar + 1), 2) XX[2, ] <- XX[2, ] - 1 if (type == "all") { idx <- !(apply(apply(XX, 2, function(x) { x %in% el.idx }), 2, all)) } else { idx <- !(apply(apply(XX, 2, function(x) { x %in% el.idx }), 2, any)) } if (meanstructure) { idx <- c(!(1:nvar %in% el.idx), idx) # idx <- c(rep(TRUE, nvar), idx) } idx } # construct 'augmented' covariance matrix # based on the covariance matrix and the mean vector augmented.covariance <- function(S., mean) { S <- as.matrix(S.) m <- as.matrix(mean) p <- ncol(S) if (nrow(m) != p) { lav_msg_stop(gettext("incompatible dimension of mean vector")) } out <- matrix(0, ncol = (p + 1), nrow = (p + 1)) out[1:p, 1:p] <- S + m %*% t(m) out[p + 1, 1:p] <- t(m) out[1:p, p + 1] <- m out[p + 1, p + 1] <- 1 out } # linesearch using 'armijo' backtracking # to find a suitable `stepsize' (alpha) linesearch.backtracking.armijo <- function(f.alpha, s.alpha, alpha = 10) { tau <- 0.5 ftol <- 0.001 f.old <- f.alpha(0) s.old <- s.alpha(0) armijo.condition <- function(alpha) { f.new <- f.alpha(alpha) # condition f.new > f.old + ftol * alpha * s.old } i <- 1 while (armijo.condition(alpha)) { alpha <- alpha * tau f.new <- f.alpha(alpha) cat("... backtracking: ", i, "alpha = ", alpha, "f.new = ", f.new, "\n") i <- i + 1 } alpha } steepest.descent <- function(start, objective, gradient, iter.max) { x <- start if (lav_verbose()) { cat("Steepest descent iterations\n") cat("iter function abs.change rel.change step.size norm.gx\n") gx <- gradient(x) norm.gx <- sqrt(gx %*% gx) fx <- objective(x) cat(sprintf( "%4d %11.7E %11.5E %11.5E", 0, fx, 0, norm.gx ), "\n") } for (iter in 1:iter.max) { fx.old <- objective(x) # normalized gradient gx <- gradient(x) old.gx <- gx norm.gx <- sqrt(gx %*% gx) gradient.old <- gx / norm.gx direction.vector <- (-1) * gradient.old f.alpha <- function(alpha) { new.x <- x + alpha * direction.vector fx <- objective(new.x) # cat(" [stepsize] iter ", iter, " step size = ", alpha, # " fx = ", fx, "\n", sep="") # for optimize only if (is.infinite(fx)) { fx <- .Machine$double.xmax } fx } # s.alpha <- function(alpha) { # new.x <- x + alpha * direction.vector # gradient.new <- gradient(new.x) # norm.gx <- sqrt( gradient.new %*% gradient.new) # gradient.new <- gradient.new/norm.gx # as.numeric(gradient.new %*% direction.vector) # } # find step size # alpha <- linesearch.backtracking.armijo(f.alpha, s.alpha, alpha=1) if (iter == 1) { alpha <- 0.1 } else { alpha <- optimize(f.alpha, lower = 0.0, upper = 1)$minimum if (f.alpha(alpha) > fx.old) { alpha <- optimize(f.alpha, lower = -1, upper = 0.0)$minimum } } # steepest descent step old.x <- x x <- x + alpha * direction.vector gx.old <- gx gx <- gradient(x) dx.max <- max(abs(gx)) # verbose if (lav_verbose()) { fx <- fx.old fx.new <- objective(x) abs.change <- fx.new - fx.old rel.change <- abs.change / fx.old norm.gx <- sqrt(gx %*% gx) cat( sprintf( "%4d %11.7E %10.7f %10.7f %11.5E %11.5E", iter, fx.new, abs.change, rel.change, alpha, norm.gx ), "\n" ) } # convergence check if (dx.max < 1e-05) { break } } x } lavaan/R/lav_samplestats_step1.R0000644000176200001440000001023214627656441016373 0ustar liggesuserslav_samplestats_step1 <- function(Y, wt = NULL, # new in 0.6-6 ov.names = NULL, ov.types = NULL, ov.levels = NULL, ov.names.x = character(0L), eXo = NULL, scores.flag = TRUE, # scores? group = 1L) { # for error message # just in case Y is a vector Y <- as.matrix(Y) nvar <- NCOL(Y) N <- NROW(Y) nTH <- ov.levels - 1L nTH[nTH == -1L] <- 1L nth <- sum(nTH) th.end.idx <- cumsum(nTH) th.start.idx <- th.end.idx - (nTH - 1L) # variable types; default = numeric nexo <- length(ov.names.x) if (nexo > 0L) stopifnot(NCOL(eXo) == nexo) # means/thresholds/intercepts, slopes, variances TH <- vector("list", length = nvar) TH.NOX <- vector("list", length = nvar) TH.NAMES <- vector("list", length = nvar) TH.IDX <- vector("list", length = nvar) SLOPES <- matrix(as.numeric(NA), nrow = nvar, ncol = nexo) # if conditional.x VAR <- numeric(length = nvar) # continuous variables only # SCORES SC.VAR <- matrix(0, N, nvar) SC.SL <- matrix(0, N, nvar * nexo) SC.TH <- matrix(0, N, nth) # fitted objects FIT <- vector("list", length = nvar) # stage one - TH/SLOPES/VAR only for (i in 1:nvar) { th.idx <- th.start.idx[i]:th.end.idx[i] sl.idx <- seq(i, by = nvar, length.out = nexo) if (ov.types[i] == "numeric") { fit <- lav_uvreg_fit(y = Y[, i], X = eXo, wt = wt) if (any(is.na(fit$theta))) { lav_msg_stop(gettextf( "linear regression failed for %1$s; X may not be of full rank in group %2$s", ov.names[i], group)) } FIT[[i]] <- fit # compute mean and variance TH[[i]] <- TH.NOX[[i]] <- fit$theta[1L] VAR[i] <- fit$theta[fit$var.idx] TH.NAMES[[i]] <- ov.names[i] TH.IDX[[i]] <- 0L if (scores.flag) { scores <- lav_uvreg_scores(y = Y[, i], X = eXo, wt = wt) SC.TH[, th.idx] <- scores[, 1L] SC.VAR[, i] <- scores[, fit$var.idx] } if (nexo > 0L) { SLOPES[i, ] <- fit$theta[-c(1L, fit$var.idx)] if (scores.flag) { SC.SL[, sl.idx] <- scores[, -c(1L, fit$var.idx), drop = FALSE] } TH.NOX[[i]] <- mean(Y[, i], na.rm = TRUE) } } else if (ov.types[i] == "ordered") { # check if we have enough categories in this group # FIXME: should we more tolerant here??? y.freq <- tabulate(Y[, i], nbins = ov.levels[i]) if (length(y.freq) != ov.levels[i]) { lav_msg_stop(gettextf( "variable %1$s has fewer categories (%2$s) than expected (%3$s) in group %4$s", ov.names[i], length(y.freq), ov.levels[i], group)) } if (any(y.freq == 0L)) { lav_msg_stop(gettextf( "some categories of variable `%1$s' are empty in group %2$s; frequencies are [%3$s]", ov.names[i], group, lav_msg_view(y.freq, "none"))) } fit <- lav_uvord_fit(y = Y[, i], X = eXo, wt = wt) if (any(is.na(fit$theta))) { lav_msg_stop(gettextf( "probit regression failed for %1$s; X may not be of full rank in group %2$s", ov.names[i], group)) } FIT[[i]] <- fit TH[[i]] <- fit$theta[fit$th.idx] TH.NOX[[i]] <- lav_uvord_th(y = Y[, i], wt = wt) if (scores.flag) { scores <- lav_uvord_scores(y = Y[, i], X = eXo, wt = wt) SC.TH[, th.idx] <- scores[, fit$th.idx, drop = FALSE] } SLOPES[i, ] <- fit$theta[fit$slope.idx] if (scores.flag) { SC.SL[, sl.idx] <- scores[, fit$slope.idx, drop = FALSE] } VAR[i] <- 1.0 TH.NAMES[[i]] <- paste(ov.names[i], "|t", 1:length(TH[[i]]), sep = "" ) TH.IDX[[i]] <- rep(i, length(TH[[i]])) } else { lav_msg_stop(gettext("unknown ov.types:"), ov.types[i]) } } list( FIT = FIT, VAR = VAR, SLOPES = SLOPES, TH = TH, TH.NOX = TH.NOX, TH.IDX = TH.IDX, TH.NAMES = TH.NAMES, SC.TH = SC.TH, SC.VAR = SC.VAR, SC.SL = SC.SL, th.start.idx = th.start.idx, th.end.idx = th.end.idx ) } lavaan/R/lav_lavaan_step01_ovnames.R0000644000176200001440000004540314627656441017115 0ustar liggesuserslav_lavaan_step01_ovnames_initflat <- function(slotParTable = NULL, # nolint model = NULL, dotdotdot.parser = "new") { # if slotPartable not NULL copy to flat.model # else # if model is of type character # parse model to flat.model # else if model is a formula (** warning **) # transform "~ x1 + x2 + x3" to character "f =~ x1 + x2 + x3", # and parse to flat.model # transform "y =~ x1 + x2 + x3" to character and parse to flat.model # something else : *** error *** # else if model is a lavaan object # extract flat.model from @parTable # else if model is a list # if bare minimum present (columns lhs, op, rhs, free) # flat.model = model # replace column block by column group (if present) # else # --> *** error *** # else # --> ***error*** # # 1a. get ov.names and ov.names.x (per group) -- needed for lavData() if (!is.null(slotParTable)) { flat.model <- slotParTable } else if (is.character(model)) { if (is.null(dotdotdot.parser)) { flat.model <- lavParseModelString(model, parser = "new") } else { flat.model <- lavParseModelString(model, parser = dotdotdot.parser) } } else if (inherits(model, "formula")) { # two typical cases: # 1. regression type formula # 2. no quotes, e.g. f =~ x1 + x2 + x3 # TODO: this isn't a valid formula !!! tmp <- as.character(model) if (tmp[1] == "~" && length(tmp) == 2L) { # looks like an unquoted single factor model f =~ something lav_msg_warn( gettext("model seems to be a formula; please enclose the model syntax between quotes")) # create model and hope for the best model.bis <- paste("f =", paste(tmp, collapse = " "), sep = "") flat.model <- lavParseModelString(model.bis) } else if (tmp[1] == "~" && length(tmp) == 3L) { # looks like a (unquoted) regression formula lav_msg_warn( gettext("model seems to be a formula; please enclose the model syntax between quotes")) # create model and hope for the best model.bis <- paste(tmp[2], tmp[1], tmp[3]) flat.model <- lavParseModelString(model.bis) } else { lav_msg_stop( gettext("model seems to be a formula; please enclose the model syntax between quotes")) } } else if (inherits(model, "lavaan")) { # hm, a lavaan model; let's try to extract the parameter table # and see what happens flat.model <- parTable(model) } else if (is.list(model)) { # a list! perhaps a full parameter table, or an initial flat model, # or something else... # 1. flat.model already (output of lavParseModelString)? if (!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$mod.idx) && !is.null(attr(model, "modifiers"))) { flat.model <- model } # look for the bare minimum columns: lhs - op - rhs else if (!is.null(model$lhs) && !is.null(model$op) && !is.null(model$rhs) && !is.null(model$free)) { # ok, we have something that looks like a parameter table flat.model <- model # fix semTools issue here? for auxiliary() which does not use # block column yet if (!is.null(flat.model$block)) { nn <- length(flat.model$lhs) if (length(flat.model$block) != nn) { flat.model$block <- flat.model$group } if (any(is.na(flat.model$block))) { flat.model$block <- flat.model$group } } else if (!is.null(flat.model$group)) { flat.model$block <- flat.model$group } } else { bare.minimum <- c("lhs", "op", "rhs", "free") missing.idx <- is.na(match(bare.minimum, names(model))) missing.txt <- paste(bare.minimum[missing.idx], collapse = ", ") lav_msg_stop( gettextf("model is a list, but not a parameterTable? missing column(s) in parameter table: [%s]", lav_msg_view(bare.minimum[missing.idx], "none"))) } } else { lav_msg_stop(gettext("model is NULL or not a valid type for it!")) } # Ok, we got a flattened model; usually this a flat.model object, but it # could also be an already lavaanified parTable, or a bare-minimum list with # lhs/op/rhs/free elements flat.model } lav_lavaan_step01_ovnames_ovorder <- function(flat.model = NULL, # nolint ov.order = "model", data = NULL, sample.cov = NULL, slotData = NULL) { # nolint # set ov.order in lowercase, check if it is "data" or "model", # if not *** error *** # if ov.order == "data" # try adapt flat.model via lav_partable_ov_from_data # (** warning ** if this fails) # new in 0.6-14 # if ov.order = "data", it would seem we need to intervene here; # ldw 1/3/2024: # we do this by adding an attribute "ovda" to flat.model and partable ov.order <- tolower(ov.order) if (ov.order == "data") { flat.model.orig <- flat.model try( flat.model <- lav_partable_ov_from_data(flat.model, data = data, sample.cov = sample.cov, slotData = slotData ), silent = TRUE ) if (inherits(flat.model, "try-error")) { lav_msg_warn(gettext("ov.order = \"data\" setting failed; switching back to ov.order = \"model\"")) flat.model <- flat.model.orig } } else if (ov.order != "model") { lav_msg_stop(gettext( "ov.order= argument should be \"model\" (default) or \"data\"")) } flat.model } lav_lavaan_step01_ovnames_group <- function(flat.model = NULL, # nolint ngroups = 1L) { # if "group :" appears in flat.model # tmp.group.values: set of names in corresponding right hand sides # copy flat.model without attributes and call lavaanify, # store result in tmp.lav # extract ov.names, ov.names.y, ov.names.x, lv.names from tmp.lav # via lav_partable_vnames # else # if flat.model$group not NULL and more then 1 group.value # extract group.values via lav_partable_group_values # extract, for each group.value, # ov.names, ov.names.y, ov.names.x, lv.names from flat.model # via lav_partable_vnames # else # extract ov.names, ov.names.y, ov.names.x, lv.names from flat.model # via lav_partable_vnames # # TODO: call lav_partable_vnames only ones and not for each type flat.model.2 <- NULL tmp.lav <- NULL group.values <- NULL ov.names <- character(0L) if (any(flat.model$op == ":" & tolower(flat.model$lhs) == "group")) { # here, we only need to figure out: # - ngroups # - ov's per group # # - FIXME: we need a more efficient way, avoiding # lavaanify/lav_partable_vnames # group.idx <- which(flat.model$op == ":" & tolower(flat.model$lhs) == "group") # replace by 'group' (in case we got 'Group'): flat.model$lhs[group.idx] <- "group" tmp.group.values <- unique(flat.model$rhs[group.idx]) tmp.ngroups <- length(tmp.group.values) flat.model.2 <- flat.model attr(flat.model.2, "modifiers") <- NULL attr(flat.model.2, "constraints") <- NULL tmp.lav <- lavaanify(flat.model.2, ngroups = tmp.ngroups, warn = FALSE) ov.names <- ov.names.y <- ov.names.x <- lv.names <- vector("list", length = tmp.ngroups ) attr(tmp.lav, "vnames") <- lav_partable_vnames(tmp.lav, type = "*") for (g in seq_len(tmp.ngroups)) { ov.names[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "ov", group = tmp.group.values[g] ))) ov.names.y[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "ov.nox", group = tmp.group.values[g] ))) ov.names.x[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "ov.x", group = tmp.group.values[g] ))) lv.names[[g]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "lv", group = tmp.group.values[g] ))) } } else if (!is.null(flat.model$group)) { # user-provided full partable with group column! attr(flat.model, "vnames") <- lav_partable_vnames(flat.model, type = "*") ngroups <- lav_partable_ngroups(flat.model) if (ngroups > 1L) { group.values <- lav_partable_group_values(flat.model) ov.names <- ov.names.y <- ov.names.x <- lv.names <- vector("list", length = ngroups ) for (g in seq_len(ngroups)) { # collapsed over levels (if any) ov.names[[g]] <- unique(unlist(lav_partable_vnames(flat.model, type = "ov", group = group.values[g] ))) ov.names.y[[g]] <- unique(unlist(lav_partable_vnames(flat.model, type = "ov.nox", group = group.values[g] ))) ov.names.x[[g]] <- unique(unlist(lav_partable_vnames(flat.model, type = "ov.x", group = group.values[g] ))) lv.names[[g]] <- unique(unlist(lav_partable_vnames(flat.model, type = "lv", group = group.values[g] ))) } } else { ov.names <- lav_partable_vnames(flat.model, type = "ov") ov.names.y <- lav_partable_vnames(flat.model, type = "ov.nox") ov.names.x <- lav_partable_vnames(flat.model, type = "ov.x") lv.names <- lav_partable_vnames(flat.model, type = "lv") } } else { # collapse over levels (if any) attr(flat.model, "vnames") <- lav_partable_vnames(flat.model, type = "*") ov.names <- unique(unlist(lav_partable_vnames(flat.model, type = "ov"))) ov.names.y <- unique(unlist(lav_partable_vnames(flat.model, type = "ov.nox"))) ov.names.x <- unique(unlist(lav_partable_vnames(flat.model, type = "ov.x"))) lv.names <- unique(unlist(lav_partable_vnames(flat.model, type = "lv"))) } # sanity check (new in 0.6-8): do we have any ov.names? # detect early if (length(unlist(ov.names)) == 0L) { lav_msg_stop( gettext("ov.names is empty: model does not refer to any observed variables; check your syntax.")) } list( flat.model = flat.model, ov.names = ov.names, ov.names.x = ov.names.x, ov.names.y = ov.names.y, lv.names = lv.names, group.values = group.values, ngroups = ngroups ) } lav_lavaan_step01_ovnames_checklv <- function( # nolint lv.names = character(0L), data = NULL, sample.cov = NULL, dotdotdot = NULL, slotOptions = NULL) { # nolint # latent variables cannot appear in data --> *** error *** # (except when explicitly requested) # latent interactions are not supported ---> *** error *** # sanity check: ov.names.x should NOT appear in ov.names.y # this may happen if 'x' is exogenous in one block, but not in another... # endo.idx <- which(ov.names.x %in% ov.names.y) # if (length(endo.idx) > 0L) { # # remove from x! (new in 0.6-8) # ov.names.x <- ov.names.x[-endo.idx] # } # handle for lv.names that are also observed variables (new in 0.6-6) lv.lv.names <- unique(unlist(lv.names)) if (length(lv.lv.names) > 0L) { # check for lv.names in data/cov if (!is.null(data)) { bad.idx <- which(lv.lv.names %in% names(data)) } else if (!is.null(sample.cov)) { bad.idx <- which(lv.lv.names %in% rownames(sample.cov)) } else { bad.idx <- integer(0L) } # if found, hard stop if (length(bad.idx) > 0L) { if (!is.null(dotdotdot$check.lv.names) && !dotdotdot$check.lv.names) { # ignore it, user switched this check off -- new in 0.6-7 } else { lav_msg_stop(gettext( "some latent variable names collide with observed variable names:"), paste(lv.lv.names[bad.idx], collapse = " ") ) } # rename latent variables (by adding 'lat') # flat.model.idx <- which(flat.model$op == "=~" & # flat.model$lhs %in% lv.names[bad.idx]) # flat.model$lhs[flat.model.idx] <- # paste(flat.model$lhs[flat.model.idx], "lat", sep = "") # add names to ov.names # ov.names <- c(ov.names, lv.names[bad.idx]) # what about ov.names.y and ov.names.x? } } # sanity check: we do not support latent interaction yet (using the :) lv.int.idx <- which(grepl(":", lv.lv.names)) if (length(lv.int.idx) > 0L) { if (!is.null(dotdotdot$check.lv.interaction) && !dotdotdot$check.lv.interaction) { # ignore, user (or sam) switched this check off - new in 0.6-16 } else if (!is.null(slotOptions) && !slotOptions$check.lv.interaction) { # ignore } else { lav_msg_stop(gettextf( "Interaction terms involving latent variables (%s) are not supported. You may consider creating product indicators to define the latent interaction term. See the indProd() function in the semTools package.", lv.lv.names[lv.int.idx[1]])) } } invisible(NULL) } lav_lavaan_step01_ovnames_namesl <- function(data = NULL, # nolint cluster = NULL, flat.model = NULL, group.values = NULL, ngroups = 1L) { # if "level :" appears in flat.model # if data not NULL, cluster must not be NULL, if it is: *** error *** # compute tmp.group.values and tmp.level.values from flat.model # there should be at least 2 levels, if not *** error *** # copy flat.model without attributes and lavaanify -> tmp.lav # check at least 2 levels for tmp.lav, if not *** error *** # compute ov.names.l per group and per level (via lav_partable_vnames # on tmp.lav) # else # if lav_partable_nlevels(flat.model) > 0 # if data not NULL, cluster must not be NULL, if it is: *** error *** # compute ov.names.l per group and per level (via lav_partable_vnames # on flat.model) # else # there are no levels (ov.names.l = list()) # handle ov.names.l if (any(flat.model$op == ":" & tolower(flat.model$lhs) == "level")) { # check for cluster argument if (!is.null(data) && is.null(cluster)) { lav_msg_stop(gettext("cluster argument is missing.")) } # here, we only need to figure out: # - nlevels # - ov's per level # - FIXME: we need a more efficient way, avoiding lavaanify/vnames group.idx <- which(flat.model$op == ":" & flat.model$lhs == "group") tmp.group.values <- unique(flat.model$rhs[group.idx]) tmp.ngroups <- max(c(length(tmp.group.values), 1)) level.idx <- which(flat.model$op == ":" & tolower(flat.model$lhs) == "level") # replace by "level" (in case we got 'Level') flat.model$lhs[level.idx] <- "level" tmp.level.values <- unique(flat.model$rhs[level.idx]) tmp.nlevels <- length(tmp.level.values) # we need at least 2 levels (for now) if (tmp.nlevels < 2L) { lav_msg_stop( gettext("when data is clustered, you must specify a model for each level in the model syntax (for now); see example(Demo.twolevel)") ) } flat.model.2 <- flat.model attr(flat.model.2, "modifiers") <- NULL attr(flat.model.2, "constraints") <- NULL tmp.lav <- lavaanify(flat.model.2, ngroups = tmp.ngroups, warn = FALSE) # check for empty levels if (max(tmp.lav$level) < 2L) { lav_msg_stop( gettext("at least one level has no model syntax; you must specify a model for each level in the model syntax; see example(Demo.twolevel)") ) } ov.names.l <- vector("list", length = tmp.ngroups) # per group for (g in seq_len(tmp.ngroups)) { ov.names.l[[g]] <- vector("list", length = tmp.nlevels) for (l in seq_len(tmp.nlevels)) { if (tmp.ngroups > 1L) { ov.names.l[[g]][[l]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "ov", group = tmp.group.values[g], level = tmp.level.values[l] ))) } else { ov.names.l[[g]][[l]] <- unique(unlist(lav_partable_vnames(tmp.lav, type = "ov", level = tmp.level.values[l] ))) } } # levels } # groups } else { # perhaps model is already a parameter table nlevels <- lav_partable_nlevels(flat.model) if (nlevels > 1L) { # check for cluster argument (only if we have data) if (!is.null(data) && is.null(cluster)) { lav_msg_stop(gettext("cluster argument is missing.")) } ngroups <- lav_partable_ngroups(flat.model) group.values <- lav_partable_group_values(flat.model) ov.names.l <- vector("list", length = ngroups) for (g in 1:ngroups) { # note: lavNames() will return a list if any level: ov.names.l[[g]] <- lavNames(flat.model, "ov", group = group.values[g]) } } else { # no level: in model syntax ov.names.l <- list() } } list( flat.model = flat.model, ov.names.l = ov.names.l ) } lav_lavaan_step01_ovnames_ordered <- function(ordered = NULL, # nolint flat.model = NULL, data = NULL) { # interpretation and check ordered parameter, modify if needed # sanity check ordered argument (just in case, add lhs variables names) if (!is.null(ordered)) { # new in 0.6-4 if (is.logical(ordered) && ordered) { # ordered = TRUE # assume the user means: ordered = names(Data) ordered <- lavNames(flat.model, "ov.nox") # new in 0.6-6: changed from ov } else if (is.logical(ordered) && !ordered) { ordered <- character(0L) } else if (!is.character(ordered)) { lav_msg_stop(gettext("ordered argument must be a character vector")) } else if (length(ordered) == 1L && nchar(ordered) == 0L) { ordered <- character(0L) } else { # check if all names in "ordered" occur in the dataset? if (!is.null(data)) { if (inherits(data, "data.frame")) { data_names <- names(data) } else if (inherits(data, "matrix")) { data_names <- colnames(data) } missing.idx <- which(!ordered %in% data_names) if (length(missing.idx) > 0L) { # FIXme: warn = FALSE has no eff lav_msg_warn(gettextf( "ordered variable(s): %s could not be found in the data and will be ignored", paste(ordered[missing.idx], collapse = " "))) } } } } # add the variable names that were treated as ordinal # in the model syntax ordered <- unique(c(ordered, lavNames(flat.model, "ov.ord"))) ordered } lavaan/R/lav_model_gradient_pml.R0000644000176200001440000011602414627656441016552 0ustar liggesusers# utility functions for pairwise maximum likelihood # stub for fml_deriv1 fml_deriv1 <- function(Sigma.hat = NULL, # model-based var/cov/cor TH = NULL, # model-based thresholds + means th.idx = NULL, # threshold idx per variable num.idx = NULL, # which variables are numeric X = NULL, # data eXo = NULL, # external covariates lavcache = NULL, # housekeeping stuff scores = FALSE, # return case-wise scores negative = TRUE) { lav_msg_stop(gettext("not implemented")) } # the first derivative of the pairwise logLik function with respect to the # thresholds/slopes/var/correlations; together with DELTA, we can use the # chain rule to get the gradient # this is adapted from code written by Myrsini Katsikatsou # first attempt - YR 5 okt 2012 # HJ 18/10/23: Modification for complex design and completely observed data (no # missing) with only ordinal indicators to get the right gradient for the # optimisation and Hessian computation. pml_deriv1 <- function(Sigma.hat = NULL, # model-based var/cov/cor Mu.hat = NULL, # model-based means TH = NULL, # model-based thresholds + means th.idx = NULL, # threshold idx per variable num.idx = NULL, # which variables are numeric X = NULL, # data eXo = NULL, # external covariates wt = NULL, # case weights (not used yet) lavcache = NULL, # housekeeping stuff PI = NULL, # slopes missing = "listwise", # how to deal with missings scores = FALSE, # return case-wise scores negative = TRUE) { # multiply by -1 # diagonal of Sigma.hat is not necessarily 1, even for categorical vars Sigma.hat2 <- Sigma.hat if (length(num.idx) > 0L) { diag(Sigma.hat2)[-num.idx] <- 1 } else { diag(Sigma.hat2) <- 1 } Cor.hat <- cov2cor(Sigma.hat2) # to get correlations (rho!) cors <- lav_matrix_vech(Cor.hat, diagonal = FALSE) if (any(abs(cors) > 1)) { # what should we do now... force cov2cor? # cat("FFFFOOOORRRRRCEEE PD!\n") # Sigma.hat <- Matrix::nearPD(Sigma.hat) # Sigma.hat <- as.matrix(Sigma.hat$mat) # Sigma.hat <- cov2cor(Sigma.hat) # cors <- Sigma.hat[lower.tri(Sigma.hat)] idx <- which(abs(cors) > 0.99) cors[idx] <- 0.99 # clip # cat("CLIPPING!\n") } nvar <- nrow(Sigma.hat) pstar <- nvar * (nvar - 1) / 2 ov.types <- rep("ordered", nvar) if (length(num.idx) > 0L) ov.types[num.idx] <- "numeric" if (!is.null(eXo)) { nexo <- ncol(eXo) } else { nexo <- 0 } if (all(ov.types == "numeric")) { N.TH <- nvar } else { N.TH <- length(th.idx) } N.SL <- nvar * nexo N.VAR <- length(num.idx) N.COR <- pstar # add num.idx to th.idx if (length(num.idx) > 0L) { th.idx[th.idx == 0] <- num.idx } # print(Sigma.hat); print(TH); print(th.idx); print(num.idx); print(str(X)) # shortcut for ordinal-only/no-exo case if (!scores && all(ov.types == "ordered") && nexo == 0L) { # >>>>>>>> HJ/MK PML CODE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> if (is.null(wt)) { n.xixj.vec <- lavcache$bifreq } else { n.xixj.vec <- lavcache$sum_obs_weights_xixj_ab_vec } gradient <- grad_tau_rho( no.x = nvar, all.thres = TH, index.var.of.thres = th.idx, rho.xixj = cors, n.xixj.vec = n.xixj.vec, out.LongVecInd = lavcache$long ) # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> if (missing == "available.cases") { uniPI <- univariateExpProbVec(TH = TH, th.idx = th.idx) tmp <- lavcache$uniweights / uniPI var.idx <- split(th.idx, th.idx) var.idx <- unlist(lapply(var.idx, function(x) { c(x, x[1]) })) tmp.varwise <- split(tmp, var.idx) tmp1 <- unlist(lapply( tmp.varwise, function(x) { c(x[-length(x)]) } )) tmp2 <- unlist(lapply(tmp.varwise, function(x) { c(x[-1]) })) uni.der.tau <- dnorm(TH) * (tmp1 - tmp2) nTH <- length(TH) gradient[1:nTH] <- gradient[1:nTH] + uni.der.tau } if (negative) { gradient <- -1 * gradient } return(gradient) } # in this order: TH/MEANS + SLOPES + VAR + COR GRAD.size <- N.TH + N.SL + N.VAR + N.COR # scores or gradient? if (scores) { SCORES <- matrix(0, nrow(X), GRAD.size) # we will sum up over all pairs } else { GRAD <- matrix(0, pstar, GRAD.size) # each pair is a row } PSTAR <- matrix(0, nvar, nvar) # utility matrix, to get indices PSTAR[lav_matrix_vech_idx(nvar, diagonal = FALSE)] <- 1:pstar N <- length(X[, 1]) for (j in seq_len(nvar - 1L)) { for (i in (j + 1L):nvar) { # cat(" i = ", i, " j = ", j, "\n") # debug only pstar.idx <- PSTAR[i, j] cor.idx <- N.TH + N.SL + N.VAR + PSTAR[i, j] th.idx_i <- which(th.idx == i) th.idx_j <- which(th.idx == j) if (nexo > 0L) { sl.idx_i <- N.TH + seq(i, by = nvar, length.out = nexo) sl.idx_j <- N.TH + seq(j, by = nvar, length.out = nexo) if (length(num.idx) > 0L) { var.idx_i <- N.TH + N.SL + match(i, num.idx) var.idx_j <- N.TH + N.SL + match(j, num.idx) } } else { if (length(num.idx) > 0L) { var.idx_i <- N.TH + match(i, num.idx) var.idx_j <- N.TH + match(j, num.idx) } } if (ov.types[i] == "numeric" && ov.types[j] == "numeric") { if (nexo > 1L) { lav_msg_stop(gettext( "mixed + exo in PML not implemented; try optim.gradient = \"numerical\"")) } SC <- lav_mvnorm_scores_mu_vech_sigma( Y = X[, c(i, j)], Mu = Mu.hat[c(i, j)], Sigma = Sigma.hat[c(i, j), c(i, j)] ) if (scores) { if (all(ov.types == "numeric") && nexo == 0L) { # MU1 + MU2 SCORES[, c(i, j)] <- SCORES[, c(i, j)] + SC[, c(1, 2)] # VAR1 + COV_12 + VAR2 var.idx <- (nvar + lav_matrix_vech_match_idx(nvar, idx = c(i, j))) SCORES[, var.idx] <- SCORES[, var.idx] + SC[, c(3, 4, 5)] } else { # mixed ordered/continuous # MU mu.idx <- c(th.idx_i, th.idx_j) SCORES[, mu.idx] <- SCORES[, mu.idx] + (-1) * SC[, c(1, 2)] # VAR+COV var.idx <- c(var.idx_i, cor.idx, var.idx_j) SCORES[, var.idx] <- SCORES[, var.idx] + SC[, c(3, 4, 5)] } } else { if (all(ov.types == "numeric") && nexo == 0L) { mu.idx <- c(i, j) sigma.idx <- (nvar + lav_matrix_vech_match_idx(nvar, idx = c(i, j))) # MU1 + MU2 GRAD[pstar.idx, mu.idx] <- colSums(SC[, c(1, 2)], na.rm = TRUE) } else { mu.idx <- c(th.idx_i, th.idx_j) sigma.idx <- c(var.idx_i, cor.idx, var.idx_j) # MU (reverse sign!) GRAD[pstar.idx, mu.idx] <- -1 * colSums(SC[, c(1, 2)], na.rm = TRUE) } # SIGMA GRAD[pstar.idx, sigma.idx] <- colSums(SC[, c(3, 4, 5)], na.rm = TRUE) } # gradient only } else if (ov.types[i] == "numeric" && ov.types[j] == "ordered") { # polyserial correlation if (nexo > 1L) { lav_msg_stop(gettext( "mixed + exo in PML not implemented; try optim.gradient = \"numerical\"")) } SC.COR.UNI <- lav_bvmix_cor_scores( Y1 = X[, i], Y2 = X[, j], eXo = NULL, wt = wt, evar.y1 = Sigma.hat[i, i], beta.y1 = Mu.hat[i], th.y2 = TH[th.idx == j], sl.y2 = NULL, rho = Cor.hat[i, j], sigma.correction = TRUE ) if (scores) { # MU SCORES[, th.idx_i] <- (SCORES[, th.idx_i] + -1 * SC.COR.UNI$dx.mu.y1) # TH SCORES[, th.idx_j] <- (SCORES[, th.idx_j] + SC.COR.UNI$dx.th.y2) # VAR SCORES[, var.idx_i] <- (SCORES[, var.idx_i] + SC.COR.UNI$dx.var.y1) # COR SCORES[, cor.idx] <- (SCORES[, cor.idx] + SC.COR.UNI$dx.rho) } else { # MU GRAD[pstar.idx, th.idx_i] <- -1 * sum(SC.COR.UNI$dx.mu.y1, na.rm = TRUE) # TH GRAD[pstar.idx, th.idx_j] <- colSums(SC.COR.UNI$dx.th.y2, na.rm = TRUE) # VAR GRAD[pstar.idx, var.idx_i] <- sum(SC.COR.UNI$dx.var.y1, na.rm = TRUE) # COR GRAD[pstar.idx, cor.idx] <- sum(SC.COR.UNI$dx.rho, na.rm = TRUE) } # grad only } else if (ov.types[j] == "numeric" && ov.types[i] == "ordered") { # polyserial correlation if (nexo > 1L) { lav_msg_stop(gettext( "mixed + exo in PML not implemented; try optim.gradient = \"numerical\"")) } SC.COR.UNI <- lav_bvmix_cor_scores( Y1 = X[, j], Y2 = X[, i], eXo = NULL, wt = wt, evar.y1 = Sigma.hat[j, j], beta.y1 = Mu.hat[j], th.y2 = TH[th.idx == i], rho = Cor.hat[i, j], sigma.correction = TRUE ) if (scores) { # MU SCORES[, th.idx_j] <- (SCORES[, th.idx_j] + -1 * SC.COR.UNI$dx.mu.y1) # TH SCORES[, th.idx_i] <- (SCORES[, th.idx_i] + SC.COR.UNI$dx.th.y2) # VAR SCORES[, var.idx_j] <- (SCORES[, var.idx_j] + SC.COR.UNI$dx.var.y1) # COR SCORES[, cor.idx] <- (SCORES[, cor.idx] + SC.COR.UNI$dx.rho) } else { # MU GRAD[pstar.idx, th.idx_j] <- -1 * sum(SC.COR.UNI$dx.mu.y1, na.rm = TRUE) # TH GRAD[pstar.idx, th.idx_i] <- colSums(SC.COR.UNI$dx.th.y2, na.rm = TRUE) # VAR GRAD[pstar.idx, var.idx_j] <- sum(SC.COR.UNI$dx.var.y1, na.rm = TRUE) # COR GRAD[pstar.idx, cor.idx] <- sum(SC.COR.UNI$dx.rho, na.rm = TRUE) } # grad only } else if (ov.types[i] == "ordered" && ov.types[j] == "ordered") { # polychoric correlation if (nexo == 0L) { SC.COR.UNI <- lav_bvord_cor_scores( Y1 = X[, i], Y2 = X[, j], eXo = NULL, wt = wt, rho = Sigma.hat[i, j], fit.y1 = NULL, # fixme fit.y2 = NULL, # fixme th.y1 = TH[th.idx == i], th.y2 = TH[th.idx == j], sl.y1 = NULL, sl.y2 = NULL, na.zero = TRUE ) } else { SC.COR.UNI <- pc_cor_scores_PL_with_cov( Y1 = X[, i], Y2 = X[, j], eXo = eXo, Rho = Sigma.hat[i, j], th.y1 = TH[th.idx == i], th.y2 = TH[th.idx == j], sl.y1 = PI[i, ], sl.y2 = PI[j, ], missing.ind = missing ) } if (scores) { # TH SCORES[, th.idx_i] <- SCORES[, th.idx_i] + SC.COR.UNI$dx.th.y1 SCORES[, th.idx_j] <- SCORES[, th.idx_j] + SC.COR.UNI$dx.th.y2 # SL if (nexo > 0L) { SCORES[, sl.idx_i] <- SCORES[, sl.idx_i] + SC.COR.UNI$dx.sl.y1 SCORES[, sl.idx_j] <- SCORES[, sl.idx_j] + SC.COR.UNI$dx.sl.y2 } # NO VAR # RHO SCORES[, cor.idx] <- SCORES[, cor.idx] + SC.COR.UNI$dx.rho } else { # TH if (length(th.idx_i) > 1L) { GRAD[pstar.idx, th.idx_i] <- colSums(SC.COR.UNI$dx.th.y1, na.rm = TRUE) } else { GRAD[pstar.idx, th.idx_i] <- sum(SC.COR.UNI$dx.th.y1, na.rm = TRUE) } if (length(th.idx_j) > 1L) { GRAD[pstar.idx, th.idx_j] <- colSums(SC.COR.UNI$dx.th.y2, na.rm = TRUE) } else { GRAD[pstar.idx, th.idx_j] <- sum(SC.COR.UNI$dx.th.y2, na.rm = TRUE) } # SL if (nexo > 0L) { if (length(sl.idx_i) > 1L) { GRAD[pstar.idx, sl.idx_i] <- colSums(SC.COR.UNI$dx.sl.y1, na.rm = TRUE) } else { GRAD[pstar.idx, sl.idx_i] <- sum(SC.COR.UNI$dx.sl.y1, na.rm = TRUE) } if (length(sl.idx_j) > 1L) { GRAD[pstar.idx, sl.idx_j] <- colSums(SC.COR.UNI$dx.sl.y2, na.rm = TRUE) } else { GRAD[pstar.idx, sl.idx_j] <- sum(SC.COR.UNI$dx.sl.y2, na.rm = TRUE) } } # NO VAR # RHO GRAD[pstar.idx, cor.idx] <- sum(SC.COR.UNI$dx.rho, na.rm = TRUE) } # GRAD2 <- numDeriv::grad(func = pc_logl_x, # x = c(Sigma.hat[i,j], # TH[ th.idx == i ], # TH[ th.idx == j]), # Y1 = X[,i], # Y2 = X[,j], # eXo = eXo, # nth.y1 = sum( th.idx == i ), # nth.y2 = sum( th.idx == j )) } } } if (missing == "available.cases" && all(ov.types == "ordered")) { if (nexo == 0L) { UNI_SCORES <- matrix(0, nrow(X), N.TH) for (i in seq_len(nvar)) { th.idx_i <- which(th.idx == i) derY1 <- uni_scores( Y1 = X[, i], th.y1 = TH[th.idx == i], eXo = NULL, sl.y1 = NULL, weights.casewise = lavcache$uniweights.casewise ) UNI_SCORES[, th.idx_i] <- derY1$dx.th.y1 } } else { UNI_SCORES <- matrix(0, nrow(X), ncol = (N.TH + N.SL)) for (i in seq_len(nvar)) { th.idx_i <- which(th.idx == i) sl.idx_i <- N.TH + seq(i, by = nvar, length.out = nexo) derY1 <- uni_scores( Y1 = X[, i], th.y1 = TH[th.idx == i], eXo = eXo, sl.y1 = PI[i, ], weights.casewise = lavcache$uniweights.casewise ) UNI_SCORES[, th.idx_i] <- derY1$dx.th.y1 UNI_SCORES[, sl.idx_i] <- derY1$dx.sl.y1 } if (scores) { SCORES <- SCORES[, 1:(N.TH + N.SL)] + UNI_SCORES } else { uni_gradient <- colSums(UNI_SCORES) } } } # do we need scores? if (scores) { return(SCORES) } # DEBUG # :print(GRAD) ########### # gradient is sum over all pairs gradient <- colSums(GRAD, na.rm = TRUE) if (missing == "available.cases" && all(ov.types == "ordered")) { if (nexo == 0L) { gradient[1:N.TH] <- gradient + uni_gradient } else { gradient[1:(N.TH + N.SL)] <- gradient + uni_gradient } } # we multiply by -1 because we minimize if (negative) { gradient <- -1 * gradient } gradient } ### all code below written by Myrsini Katsikatsou # The function grad_tau_rho # input: # no.x - is scalar, the number of ordinal variables # all.thres - is vector containing the thresholds of all variables in the # following order: thres_var1, thres_var2,..., thres_var_p # within each variable the thresholds are in ascending order # Note that all.thres do NOT contain tau_0=-Inf and tau_last=Inf # for all variables. # index.var.of.thres - a vector keeping track to which variable the thresholds # in all.thres belongs to, it is of the form # (1,1,1..., 2,2,2,..., p,p,p,...) # rho.xixj - is the vector of all correlations where j runs faster than i # i.e. the order is rho_12, rho_13, ..., rho_1p, rho_23, ..., rho_2p, # etc. # n.xixj.vec - a vector with the observed frequency for every combination # of categories and every pair. The frequencies are given in # the same order as the expected probabilities in the output of # pairwiseExpProbVec output # out.LongVecInd - it is the output of function LongVecInd # the output: it gives the elements of der.L.to.tau and der.L.to.rho in this # order. The elements of der.L.to.tau where the elements are # ordered as follows: the thresholds of each variable with respect # to ascending order of the variable index (i.e. thres_var1, # thres_var2, etc.) and within each variable the thresholds in # ascending order. # The elements of vector der.L.to.rho are der.Lxixj.to.rho.xixj # where j runs faster than i. # The function depends on four other functions: LongVecTH.Rho, # pairwiseExpProbVec, derLtoRho, and derLtoTau, all given below. # if n.xixj.ab is either an array or a list the following should be done # n.xixj.vec <- if(is.array(n.xixj.ab)) { # c(n.xixj.ab) # } else if(is.list(n.xixj.ab)){ # unlist(n.xixj.ab) # } grad_tau_rho <- function(no.x, all.thres, index.var.of.thres, rho.xixj, n.xixj.vec, out.LongVecInd) { out.LongVecTH.Rho <- LongVecTH.Rho( no.x = no.x, all.thres = all.thres, index.var.of.thres = index.var.of.thres, rho.xixj = rho.xixj ) pi.xixj <- pairwiseExpProbVec( ind.vec = out.LongVecInd, th.rho.vec = out.LongVecTH.Rho ) out.derLtoRho <- derLtoRho( ind.vec = out.LongVecInd, th.rho.vec = out.LongVecTH.Rho, n.xixj = n.xixj.vec, pi.xixj = pi.xixj, no.x = no.x ) out.derLtoTau <- derLtoTau( ind.vec = out.LongVecInd, th.rho.vec = out.LongVecTH.Rho, n.xixj = n.xixj.vec, pi.xixj = pi.xixj, no.x = no.x ) grad <- c(out.derLtoTau, out.derLtoRho) attr(grad, "pi.xixj") <- pi.xixj grad } ################################################################################ # The input of the function LongVecInd: # no.x is scalar, the number of ordinal variables # all.thres is vector containing the thresholds of all variables in the # following order: thres_var1, thres_var2,..., thres_var_p # within each variable the thresholds are in ascending order # Note that all.thres does NOT contain the first and the last threshold of the # variables, i.e. tau_0=-Inf and tau_last=Inf # index.var.of.thres is a vector keeping track to which variable the thresholds # in all.thres belongs to, it is of the form (1,1,1..., 2,2,2,..., p,p,p,...) # The output of the function: # it is a list of vectors keeping track of the indices # of thresholds, of variables, and of pairs, and two T/F vectors indicating # if the threshold index corresponds to the last threshold of a variable; all # these for all pairs of variables. All are needed for the # computation of expected probabilities, der.L.to.rho, and der.L.to.tau # all duplications of indices are done as follows: within each pair of variables, # xi-xj, if for example we want to duplicate the indices of the thresholds, # tau^xi_a and tau^xj_b, then index a runs faster than b, i.e. for each b we # take all different tau^xi's, and then we proceed to the next b and do the # same. In other words if it was tabulated we fill the table columnwise. # All pairs xi-xj are taken with index j running faster than i. # Note that each variable may have a different number of categories, that's why # for example we take lists below. LongVecInd <- function(no.x, all.thres, index.var.of.thres) { no.thres.of.each.var <- tapply(all.thres, index.var.of.thres, length) index.pairs <- utils::combn(no.x, 2) no.pairs <- ncol(index.pairs) # index.thres.var1.of.pair and index.thres.var2.of.pair contain the indices of # of all thresholds (from tau_0 which is -Inf to tau_last which is Inf) # for any pair of variables appropriately duplicated so that the two vectors # together give all possible combinations of thresholds indices # Since here the threshold indices 0 and "last" are included, the vectors are # longer than the vectors thres.var1.of.pair and thres.var2.of.pair above. index.thres.var1.of.pair <- vector("list", no.pairs) index.thres.var2.of.pair <- vector("list", no.pairs) # index.var1.of.pair and index.var2.of.pair keep track the index of the # variable that the thresholds in index.thres.var1.of.pair and # index.thres.var2.of.pair belong to, respectively. So, these two variables # are of same length as that of index.thres.var1.of.pair and # index.thres.var2.of.pair index.var1.of.pair <- vector("list", no.pairs) index.var2.of.pair <- vector("list", no.pairs) # index.pairs.extended gives the index of the pair for each pair of variables # e.g. pair of variables 1-2 has index 1, variables 1-3 has index 2, etc. # The vector is of the same length as index.thres.var1.of.pair, # index.thres.var2.of.pair, index.var1.of.pair, and index.var2.of.pair index.pairs.extended <- vector("list", no.pairs) for (i in 1:no.pairs) { no.thres.var1.of.pair <- no.thres.of.each.var[index.pairs[1, i]] no.thres.var2.of.pair <- no.thres.of.each.var[index.pairs[2, i]] index.thres.var1.of.pair[[i]] <- rep(0:(no.thres.var1.of.pair + 1), times = (no.thres.var2.of.pair + 2) ) index.thres.var2.of.pair[[i]] <- rep(0:(no.thres.var2.of.pair + 1), each = (no.thres.var1.of.pair + 2) ) length.vec <- length(index.thres.var1.of.pair[[i]]) index.var1.of.pair[[i]] <- rep(index.pairs[1, i], length.vec) index.var2.of.pair[[i]] <- rep(index.pairs[2, i], length.vec) index.pairs.extended[[i]] <- rep(i, length.vec) } index.thres.var1.of.pair <- unlist(index.thres.var1.of.pair) index.thres.var2.of.pair <- unlist(index.thres.var2.of.pair) index.var1.of.pair <- unlist(index.var1.of.pair) index.var2.of.pair <- unlist(index.var2.of.pair) index.pairs.extended <- unlist(index.pairs.extended) # indicator vector (T/F) showing which elements of index.thres.var1.of.pair # correspond to the last thresholds of variables. The length is the same as # that of index.thres.var1.of.pair. last.thres.var1.of.pair <- index.var1.of.pair == 1 & index.thres.var1.of.pair == (no.thres.of.each.var[1] + 1) # we consider up to variable (no.x-1) because in pairs xi-xj where j runs # faster than i, the last variable is not included in the column of xi's for (i in 2:(no.x - 1)) { new.condition <- index.var1.of.pair == i & index.thres.var1.of.pair == (no.thres.of.each.var[i] + 1) last.thres.var1.of.pair <- last.thres.var1.of.pair | new.condition } # indicator vector (T/F) showing which elements of index.thres.var2.of.pair # correspond to the last thresholds of variables. Notet that in pairs xi-xj # where j runs faster than i, the first variable is not included in the column # of xj's. That's why we start with variable 2. The length is the same as # that of index.thres.var1.of.pair. last.thres.var2.of.pair <- index.var2.of.pair == 2 & index.thres.var2.of.pair == (no.thres.of.each.var[2] + 1) for (i in 3:no.x) { new.condition <- index.var2.of.pair == i & index.thres.var2.of.pair == (no.thres.of.each.var[i] + 1) last.thres.var2.of.pair <- last.thres.var2.of.pair | new.condition } list( index.thres.var1.of.pair = index.thres.var1.of.pair, index.thres.var2.of.pair = index.thres.var2.of.pair, index.var1.of.pair = index.var1.of.pair, index.var2.of.pair = index.var2.of.pair, index.pairs.extended = index.pairs.extended, last.thres.var1.of.pair = last.thres.var1.of.pair, last.thres.var2.of.pair = last.thres.var2.of.pair ) } ################################################################################ # The input of the function LongVecTH.Rho: # no.x is scalar, the number of ordinal variables # all.thres is vector containing the thresholds of all variables in the # following order: thres_var1, thres_var2,..., thres_var_p # within each variable the thresholds are in ascending order # Note that all.thres does NOT contain the first and the last threshold of the # variables, i.e. tau_0=-Inf and tau_last=Inf # index.var.of.thres is a vector keeping track to which variable the thresholds # in all.thres belongs to, it is of the form (1,1,1..., 2,2,2,..., p,p,p,...) # rho.xixj is the vector of all corrlations where j runs faster than i # i.e. the order is rho_12, rho_13, ..., rho_1p, rho_23, ..., rho_2p, etc. # The output of the function: # it is a list of vectors with thresholds and rho's duplicated appropriately, # all needed for the computation of expected probabilities, # der.L.to.rho, and der.L.to.tau # all duplications below are done as follows: within each pair of variables, # xi-xj, if for example we want to duplicate their thresholds, tau^xi_a and # tau^xj_b, then index a runs faster than b, i.e. for each b we take all # different tau^xi's, and then we proceed to the next b and do the same. # In other words if it was tabulated we fill the table columnwise. # All pairs xi-xj are taken with index j running faster than i. # Note that each variable may have a different number of categories, that's why # for example we take lists below. LongVecTH.Rho <- function(no.x, all.thres, index.var.of.thres, rho.xixj) { no.thres.of.each.var <- tapply(all.thres, index.var.of.thres, length) index.pairs <- utils::combn(no.x, 2) no.pairs <- ncol(index.pairs) # create the long vectors needed for the computation of expected probabilities # for each cell and each pair of variables. The vectors thres.var1.of.pair and # thres.var2.of.pair together give all the possible combinations of the # thresholds of any two variables. Note the combinations (-Inf, -Inf), # (-Inf, Inf), (Inf, -Inf), (Inf, Inf) are NOT included. Only the combinations # of the middle thresholds (tau_1 to tau_(last-1)). # thres.var1.of.pair and thres.var2.of.pair give the first and the second # argument, respectively, in functions pbivnorm and dbinorm thres.var1.of.pair <- vector("list", no.pairs) thres.var2.of.pair <- vector("list", no.pairs) # Extending the rho.vector accordingly so that it will be the the third # argument in pbivnorm and dbinorm functions. It is of same length as # thres.var1.of.pair and thres.var2.of.pair. rho.vector <- vector("list", no.pairs) # thres.var1.for.dnorm.in.der.pi.to.tau.xi and # thres.var2.for.dnorm.in.der.pi.to.tau.xj give the thresholds of almost # all variables appropriately duplicated so that the vectors can be used # as input in dnorm() to compute der.pi.xixj.to.tau.xi and # der.pi.xixj.to.tau.xj. # thres.var1.for.dnorm.in.der.pi.to.tau.xi does not contain the thresholds of # the last variable and thres.var2.for.dnorm.in.der.pi.to.tau.xj those of # the first variable thres.var1.for.dnorm.in.der.pi.to.tau.xi <- vector("list", no.pairs) thres.var2.for.dnorm.in.der.pi.to.tau.xj <- vector("list", no.pairs) for (i in 1:no.pairs) { single.thres.var1.of.pair <- all.thres[index.var.of.thres == index.pairs[1, i]] single.thres.var2.of.pair <- all.thres[index.var.of.thres == index.pairs[2, i]] # remember that the first (-Inf) and last (Inf) thresholds are not included # so no.thres.var1.of.pair is equal to number of categories of var1 minus 1 # similarly for no.thres.var2.of.pair no.thres.var1.of.pair <- no.thres.of.each.var[index.pairs[1, i]] no.thres.var2.of.pair <- no.thres.of.each.var[index.pairs[2, i]] thres.var1.of.pair[[i]] <- rep(single.thres.var1.of.pair, times = no.thres.var2.of.pair ) thres.var2.of.pair[[i]] <- rep(single.thres.var2.of.pair, each = no.thres.var1.of.pair ) rho.vector[[i]] <- rep(rho.xixj[i], length(thres.var1.of.pair[[i]])) thres.var1.for.dnorm.in.der.pi.to.tau.xi[[i]] <- rep(single.thres.var1.of.pair, times = (no.thres.var2.of.pair + 1)) thres.var2.for.dnorm.in.der.pi.to.tau.xj[[i]] <- rep(single.thres.var2.of.pair, each = (no.thres.var1.of.pair + 1)) } thres.var1.of.pair <- unlist(thres.var1.of.pair) thres.var2.of.pair <- unlist(thres.var2.of.pair) rho.vector <- unlist(rho.vector) thres.var1.for.dnorm.in.der.pi.to.tau.xi <- unlist(thres.var1.for.dnorm.in.der.pi.to.tau.xi) thres.var2.for.dnorm.in.der.pi.to.tau.xj <- unlist(thres.var2.for.dnorm.in.der.pi.to.tau.xj) # thres.var2.for.last.cat.var1 and thres.var1.for.last.cat.var2 are needed # for the computation of expected probabilities. In the computation of # \Phi_2(tau1, tau2; rho) when either tau1 or tau2 are Inf then it is enought # to compute pnorm() with the non-infinite tau as an argument # In particular when the first variable of the pair has tau_last= Inf # and the second a non-infite threshold we compute # pnorm(thres.var2.for.last.cat.var1). Similarly, when the second variable of # the pair has tau_last=Inf and the first a non-infite threshold we compute # pnorm(thres.var1.for.last.cat.var2). thres.var2.for.last.cat.var1 <- vector("list", (no.x - 1)) thres.var1.for.last.cat.var2 <- vector("list", (no.x - 1)) for (i in 1:(no.x - 1)) { thres.var2.for.last.cat.var1[[i]] <- c(all.thres[index.var.of.thres %in% (i + 1):no.x]) thres.var1.for.last.cat.var2[[i]] <- rep(all.thres[index.var.of.thres == i], times = (no.x - i) ) } thres.var2.for.last.cat.var1 <- unlist(thres.var2.for.last.cat.var1) thres.var1.for.last.cat.var2 <- unlist(thres.var1.for.last.cat.var2) list( thres.var1.of.pair = thres.var1.of.pair, # these 3 of same length thres.var2.of.pair = thres.var2.of.pair, rho.vector = rho.vector, # the following of length dependning on the number of categories thres.var1.for.dnorm.in.der.pi.to.tau.xi = thres.var1.for.dnorm.in.der.pi.to.tau.xi, thres.var2.for.dnorm.in.der.pi.to.tau.xj = thres.var2.for.dnorm.in.der.pi.to.tau.xj, thres.var2.for.last.cat.var1 = thres.var2.for.last.cat.var1, thres.var1.for.last.cat.var2 = thres.var1.for.last.cat.var2 ) } ######################################################### ######################################################### # The function pairwiseExpProbVec # input: ind.vec - the output of function LongVecInd # th.rho.vec - the output of function LongVecTH.Rho # output: it gives the elements of pairwiseTablesExpected()$pi.tables # table-wise and column-wise within each table. In other words if # pi^xixj_ab is the expected probability for the pair of variables xi-xj # and categories a and b, then index a runs the fastest of all, followed by b, # then by j, and lastly by i. pairwiseExpProbVec <- function(ind.vec, th.rho.vec) { prob.vec <- rep(NA, length(ind.vec$index.thres.var1.of.pair)) prob.vec[ind.vec$index.thres.var1.of.pair == 0 | ind.vec$index.thres.var2.of.pair == 0] <- 0 prob.vec[ind.vec$last.thres.var1.of.pair & ind.vec$last.thres.var2.of.pair] <- 1 prob.vec[ind.vec$last.thres.var1.of.pair & ind.vec$index.thres.var2.of.pair != 0 & !ind.vec$last.thres.var2.of.pair] <- pnorm(th.rho.vec$thres.var2.for.last.cat.var1) prob.vec[ind.vec$last.thres.var2.of.pair & ind.vec$index.thres.var1.of.pair != 0 & !ind.vec$last.thres.var1.of.pair] <- pnorm(th.rho.vec$thres.var1.for.last.cat.var2) prob.vec[is.na(prob.vec)] <- pbivnorm( th.rho.vec$thres.var1.of.pair, th.rho.vec$thres.var2.of.pair, th.rho.vec$rho.vector ) cum.term1 <- prob.vec[ind.vec$index.thres.var1.of.pair != 0 & ind.vec$index.thres.var2.of.pair != 0] cum.term2 <- prob.vec[ind.vec$index.thres.var1.of.pair != 0 & !ind.vec$last.thres.var2.of.pair] cum.term3 <- prob.vec[ind.vec$index.thres.var2.of.pair != 0 & !ind.vec$last.thres.var1.of.pair] cum.term4 <- prob.vec[!ind.vec$last.thres.var1.of.pair & !ind.vec$last.thres.var2.of.pair] PI <- cum.term1 - cum.term2 - cum.term3 + cum.term4 # added by YR 11 nov 2012 to avoid Nan/-Inf # log(.Machine$double.eps) = -36.04365 # all elements should be strictly positive PI[PI < .Machine$double.eps] <- .Machine$double.eps PI } # derLtoRho # input: ind.vec - the output of function LongVecInd # th.rho.vec - the output of function LongVecTH.Rho # n.xixj - a vector with the observed frequency for every combination # of categories and every pair. The frequencies are given in # the same order as the expected probabilities in the output of # pairwiseExpProbVec output # pi.xixj - the output of pairwiseExpProbVec function # no.x - the number of ordinal variables # output: the vector of der.L.to.rho, each element corresponds to # der.Lxixj.to.rho.xixj where j runs faster than i derLtoRho <- function(ind.vec, th.rho.vec, n.xixj, pi.xixj, no.x) { prob.vec <- rep(NA, length(ind.vec$index.thres.var1.of.pair)) prob.vec[ind.vec$index.thres.var1.of.pair == 0 | ind.vec$index.thres.var2.of.pair == 0 | ind.vec$last.thres.var1.of.pair | ind.vec$last.thres.var2.of.pair] <- 0 prob.vec[is.na(prob.vec)] <- dbinorm(th.rho.vec$thres.var1.of.pair, th.rho.vec$thres.var2.of.pair, rho = th.rho.vec$rho.vector ) den.term1 <- prob.vec[ind.vec$index.thres.var1.of.pair != 0 & ind.vec$index.thres.var2.of.pair != 0] den.term2 <- prob.vec[ind.vec$index.thres.var1.of.pair != 0 & !ind.vec$last.thres.var2.of.pair] den.term3 <- prob.vec[ind.vec$index.thres.var2.of.pair != 0 & !ind.vec$last.thres.var1.of.pair] den.term4 <- prob.vec[!ind.vec$last.thres.var1.of.pair & !ind.vec$last.thres.var2.of.pair] der.pi.xixj.to.rho.xixj <- den.term1 - den.term2 - den.term3 + den.term4 prod.terms <- (n.xixj / pi.xixj) * der.pi.xixj.to.rho.xixj # to get der.Lxixj.to.rho.xixj we should all the elements of # der.pi.xixj.to.rho.xixj which correspond to the pair xi-xj, to do so: xnew <- lapply( ind.vec[c("index.pairs.extended")], function(y) { y[ind.vec$index.thres.var1.of.pair != 0 & ind.vec$index.thres.var2.of.pair != 0] } ) # der.L.to.rho is: tapply(prod.terms, xnew$index.pairs.extended, sum) } ########################################################################### # derLtoTau # input: ind.vec - the output of function LongVecInd # th.rho.vec - the output of function LongVecTH.Rho # n.xixj - a vector with the observed frequency for every combination # of categories and every pair. The frequencies are given in # the same order as the expected probabilities in the output of # pairwiseExpProbVec output # pi.xixj - the output of pairwiseExpProbVec function # output: the vector of der.L.to.tau where the elements are ordered as follows: # the thresholds of each variable with respect to ascending order of # the variable index (i.e. thres_var1, thres_var2, etc.) and within # each variable the thresholds in ascending order. derLtoTau <- function(ind.vec, th.rho.vec, n.xixj, pi.xixj, no.x = 0L) { # to compute der.pi.xixj.to.tau.xi xi <- lapply( ind.vec[c( "index.thres.var2.of.pair", "last.thres.var2.of.pair" )], function(y) { y[!(ind.vec$index.thres.var1.of.pair == 0 | ind.vec$last.thres.var1.of.pair)] } ) cum.prob.vec <- rep(NA, length(xi$index.thres.var2.of.pair)) cum.prob.vec[xi$index.thres.var2.of.pair == 0] <- 0 cum.prob.vec[xi$last.thres.var2.of.pair] <- 1 denom <- sqrt(1 - (th.rho.vec$rho.vector * th.rho.vec$rho.vector)) cum.prob.vec[is.na(cum.prob.vec)] <- pnorm((th.rho.vec$thres.var2.of.pair - th.rho.vec$rho.vector * th.rho.vec$thres.var1.of.pair) / denom) den.prob.vec <- dnorm(th.rho.vec$thres.var1.for.dnorm.in.der.pi.to.tau.xi) der.pi.xixj.to.tau.xi <- den.prob.vec * (cum.prob.vec[xi$index.thres.var2.of.pair != 0] - cum.prob.vec[!xi$last.thres.var2.of.pair]) # to compute der.pi.xixj.to.tau.xj xj <- lapply( ind.vec[c( "index.thres.var1.of.pair", "last.thres.var1.of.pair" )], function(y) { y[!(ind.vec$index.thres.var2.of.pair == 0 | ind.vec$last.thres.var2.of.pair)] } ) cum.prob.vec <- rep(NA, length(xj$index.thres.var1.of.pair)) cum.prob.vec[xj$index.thres.var1.of.pair == 0] <- 0 cum.prob.vec[xj$last.thres.var1.of.pair] <- 1 denom <- sqrt(1 - (th.rho.vec$rho.vector * th.rho.vec$rho.vector)) cum.prob.vec[is.na(cum.prob.vec)] <- pnorm((th.rho.vec$thres.var1.of.pair - th.rho.vec$rho.vector * th.rho.vec$thres.var2.of.pair) / denom) den.prob.vec <- dnorm(th.rho.vec$thres.var2.for.dnorm.in.der.pi.to.tau.xj) der.pi.xixj.to.tau.xj <- den.prob.vec * (cum.prob.vec[xj$index.thres.var1.of.pair != 0] - cum.prob.vec[!xj$last.thres.var1.of.pair]) # to compute der.Lxixj.tau.xi and der.Lxixj.tau.xi n.over.pi <- n.xixj / pi.xixj # get the appropriate differences of n.over.pi for der.Lxixj.to.tau.xi and # der.Lxixj.to.tau.xj x3a <- lapply(ind.vec, function(y) { y[!(ind.vec$index.thres.var1.of.pair == 0 | ind.vec$index.thres.var2.of.pair == 0)] }) diff.n.over.pi.to.xi <- n.over.pi[!x3a$last.thres.var1.of.pair] - n.over.pi[x3a$index.thres.var1.of.pair != 1] diff.n.over.pi.to.xj <- n.over.pi[!x3a$last.thres.var2.of.pair] - n.over.pi[x3a$index.thres.var2.of.pair != 1] # terms.der.Lxixj.to.tau.xi and terms.der.Lxixj.to.tau.xj terms.der.Lxixj.to.tau.xi <- diff.n.over.pi.to.xi * der.pi.xixj.to.tau.xi terms.der.Lxixj.to.tau.xj <- diff.n.over.pi.to.xj * der.pi.xixj.to.tau.xj # to add appropriately elements of terms.der.Lxixj.to.tau.xi x3b <- lapply( ind.vec[c( "index.pairs.extended", "index.thres.var1.of.pair" )], function(y) { y[!(ind.vec$index.thres.var1.of.pair == 0 | ind.vec$last.thres.var1.of.pair | ind.vec$index.thres.var2.of.pair == 0)] } ) # to add appropriately elements of terms.der.Lxixj.to.tau.xj x4b <- lapply( ind.vec[c( "index.pairs.extended", "index.thres.var2.of.pair" )], function(y) { y[!(ind.vec$index.thres.var2.of.pair == 0 | ind.vec$last.thres.var2.of.pair | ind.vec$index.thres.var1.of.pair == 0)] } ) ind.pairs <- utils::combn(no.x, 2) # der.Lxixj.to.tau.xi is a matrix, nrow=no.pairs, ncol=max(no.of.free.thres) # thus, there are NA's, similarly for der.Lxixj.to.tau.xj der.Lxixj.to.tau.xi <- tapply( terms.der.Lxixj.to.tau.xi, list( x3b$index.pairs.extended, x3b$index.thres.var1.of.pair ), sum ) der.Lxixj.to.tau.xj <- tapply( terms.der.Lxixj.to.tau.xj, list( x4b$index.pairs.extended, x4b$index.thres.var2.of.pair ), sum ) # to add appropriately the terms of der.Lxixj.to.tau.xi and # der.Lxixj.to.tau.xj split.der.Lxixj.to.tau.xi <- split( as.data.frame(der.Lxixj.to.tau.xi), ind.pairs[1, ] ) sums.der.Lxixj.to.tau.xi <- lapply( split.der.Lxixj.to.tau.xi, function(x) { y <- apply(x, 2, sum) y[!is.na(y)] } ) # Note: NA exist in the case where the ordinal variables have different # number of response categories split.der.Lxixj.to.tau.xj <- split( as.data.frame(der.Lxixj.to.tau.xj), ind.pairs[2, ] ) sums.der.Lxixj.to.tau.xj <- lapply( split.der.Lxixj.to.tau.xj, function(x) { y <- apply(x, 2, sum) y[!is.na(y)] } ) # to get der.L.to.tau c( sums.der.Lxixj.to.tau.xi[[1]], c(unlist(sums.der.Lxixj.to.tau.xi[2:(no.x - 1)]) + unlist(sums.der.Lxixj.to.tau.xj[1:(no.x - 2)])), sums.der.Lxixj.to.tau.xj[[no.x - 1]] ) } lavaan/R/lav_lavaan_step12_implied.R0000644000176200001440000000241114627656441017062 0ustar liggesuserslav_lavaan_step12_implied <- function(lavoptions = NULL, lavmodel = NULL) { # # # # # # # # # # # # # # 12. lavimplied # # # # # # # # # # # # # # # if lavoptions$implied compute lavimplied via lav_model_implied lavimplied <- list() if (lavoptions$implied) { if (lav_verbose()) { cat("lavimplied ...") } lavimplied <- lav_model_implied(lavmodel) if (lav_verbose()) { cat(" done.\n") } } lavimplied } lav_lavaan_step12_loglik <- function(lavoptions = NULL, lavdata = NULL, lavsamplestats = NULL, lavimplied = NULL, lavmodel = NULL) { # # # # # # # # # # # # # # 12. lavloglik # # # # # # # # # # # # # # # if lavoptions$loglik compute lavloglik via lav_model_loglik lavloglik <- list() if (lavoptions$loglik) { if (lav_verbose()) { cat("lavloglik ...") } lavloglik <- lav_model_loglik( lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavmodel = lavmodel, lavoptions = lavoptions ) if (lav_verbose()) { cat(" done.\n") } } lavloglik } lavaan/R/lav_sam_step2.R0000644000176200001440000001322414627656441014620 0ustar liggesusers# SAM step 2: estimate structural part lav_sam_step2 <- function(STEP1 = NULL, FIT = NULL, sam.method = "local", struc.args = list()) { lavoptions <- FIT@Options lavpta <- FIT@pta nlevels <- lavpta$nlevels PT <- STEP1$PT LV.names <- unique(unlist(FIT@pta$vnames$lv.regular)) # adjust options lavoptions.PA <- lavoptions if (lavoptions.PA$se == "naive") { lavoptions.PA$se <- "standard" } else { # twostep or none -> none lavoptions.PA$se <- "none" } # lavoptions.PA$fixed.x <- TRUE # may be false if indicator is predictor lavoptions.PA$fixed.x <- FALSE # until we fix this... lavoptions.PA$categorical <- FALSE lavoptions.PA$.categorical <- FALSE lavoptions.PA$rotation <- "none" lavoptions.PA <- modifyList(lavoptions.PA, struc.args) # override, no matter what lavoptions.PA$do.fit <- TRUE if (sam.method %in% c("local", "fsr")) { lavoptions.PA$missing <- "listwise" lavoptions.PA$sample.cov.rescale <- FALSE # lavoptions.PA$baseline <- FALSE lavoptions.PA$h1 <- FALSE # lavoptions.PA$implied <- FALSE lavoptions.PA$loglik <- FALSE } else { lavoptions.PA$h1 <- FALSE # lavoptions.PA$implied <- FALSE lavoptions.PA$loglik <- FALSE } # construct PTS if (sam.method %in% c("local", "fsr")) { # extract structural part PTS <- lav_partable_subset_structural_model(PT, add.idx = TRUE, add.exo.cov = TRUE, fixed.x = lavoptions.PA$fixed.x, meanstructure = lavoptions.PA$meanstructure ) # any 'extra' parameters: not (free) in PT, but free in PTS (user == 3) # - fixed.x in PT, but fixed.x = FALSE is PTS # - fixed-to-zero interceps in PT, but free in PTS # - add.exo.cov: absent/fixed-to-zero in PT, but add/free in PTS extra.id <- which(PTS$user == 3L) # remove est/se/start columns PTS$est <- NULL PTS$se <- NULL PTS$start <- NULL if (nlevels > 1L) { PTS$level <- NULL PTS$group <- NULL PTS$group <- PTS$block NOBS <- FIT@Data@Lp[[1]]$nclusters } else { NOBS <- FIT@Data@nobs } # if meanstructure, 'free' user=0 intercepts? # if (lavoptions.PA$meanstructure) { # extra.int.idx <- which(PTS$op == "~1" & PTS$user == 0L & # PTS$free == 0L & # PTS$exo == 0L) # needed? # if (length(extra.int.idx) > 0L) { # PTS$free[extra.int.idx] <- 1L # PTS$ustart[extra.int.idx] <- as.numeric(NA) # PTS$free[PTS$free > 0L] <- # seq_len(length(PTS$free[PTS$free > 0L])) # PTS$user[extra.int.idx] <- 3L # } # } else { # extra.int.idx <- integer(0L) # } # extra.id <- c(extra.id, extra.int.idx) reg.idx <- attr(PTS, "idx") attr(PTS, "idx") <- NULL } else { # global SAM # the measurement model parameters now become fixed ustart values PT$ustart[PT$free > 0] <- PT$est[PT$free > 0] reg.idx <- lav_partable_subset_structural_model( PT = PT, idx.only = TRUE ) # remove 'exogenous' factor variances (if any) from reg.idx lv.names.x <- LV.names[LV.names %in% unlist(lavpta$vnames$eqs.x) & !LV.names %in% unlist(lavpta$vnames$eqs.y)] if (lavoptions.PA$fixed.x && length(lv.names.x) > 0L) { var.idx <- which(PT$lhs %in% lv.names.x & PT$op == "~~" & PT$lhs == PT$rhs) rm.idx <- which(reg.idx %in% var.idx) if (length(rm.idx) > 0L) { reg.idx <- reg.idx[-rm.idx] } } # adapt parameter table for structural part PTS <- PT # remove constraints we don't need con.idx <- which(PTS$op %in% c("==", "<", ">", ":=")) if (length(con.idx) > 0L) { needed.idx <- which(con.idx %in% reg.idx) if (length(needed.idx) > 0L) { con.idx <- con.idx[-needed.idx] } if (length(con.idx) > 0L) { PTS <- as.data.frame(PTS, stringsAsFactors = FALSE) PTS <- PTS[-con.idx, ] } } PTS$est <- NULL PTS$se <- NULL PTS$free[!PTS$id %in% reg.idx & PTS$free > 0L] <- 0L # set 'ustart' values for free FIT.PA parameter to NA PTS$ustart[PTS$free > 0L] <- as.numeric(NA) PTS <- lav_partable_complete(PTS) extra.id <- extra.int.idx <- integer(0L) } # global # fit structural model if (lav_verbose()) { cat("Fitting the structural part ... \n") } if (sam.method %in% c("local", "fsr")) { FIT.PA <- lavaan::lavaan(PTS, sample.cov = STEP1$VETA, sample.mean = STEP1$EETA, sample.nobs = NOBS, slotOptions = lavoptions.PA, verbose = FALSE ) } else { FIT.PA <- lavaan::lavaan( model = PTS, slotData = FIT@Data, slotSampleStats = FIT@SampleStats, slotOptions = lavoptions.PA, verbose = FALSE ) } if (lav_verbose()) { cat("Fitting the structural part ... done.\n") } # which parameters from PTS do we wish to fill in: # - all 'free' parameters # - :=, <, > (if any) # - and NOT element with user=3 (add.exo.cov = TRUE, extra.int.idx) pts.idx <- which((PTS$free > 0L | (PTS$op %in% c(":=", "<", ">"))) & !PTS$user == 3L) # find corresponding rows in PT PTS2 <- as.data.frame(PTS, stringsAsFactors = FALSE) pt.idx <- lav_partable_map_id_p1_in_p2(PTS2[pts.idx, ], PT, exclude.nonpar = FALSE ) # fill in PT$est[pt.idx] <- FIT.PA@ParTable$est[pts.idx] # create step2.free.idx p2.idx <- seq_len(length(PT$lhs)) %in% pt.idx & PT$free > 0 # no def! step2.free.idx <- STEP1$PT.free[p2.idx] # add 'step' column in PT PT$step <- rep(1L, length(PT$lhs)) PT$step[seq_len(length(PT$lhs)) %in% reg.idx] <- 2L STEP2 <- list( FIT.PA = FIT.PA, PT = PT, reg.idx = reg.idx, step2.free.idx = step2.free.idx, extra.id = extra.id ) STEP2 } lavaan/R/lav_partable_merge.R0000644000176200001440000001276414627656440015703 0ustar liggesusers# merge two parameter tables # - but allow different number of columns lav_partable_merge <- function(pt1 = NULL, pt2 = NULL, remove.duplicated = FALSE, fromLast = FALSE, warn = TRUE) { if (!missing(warn)) { current.warn <- lav_warn() if (lav_warn(warn)) on.exit(lav_warn(current.warn), TRUE) } # check for empty pt2 if (is.null(pt2) || length(pt2) == 0L) { return(pt1) } pt1 <- as.data.frame(pt1, stringsAsFactors = FALSE) pt2 <- as.data.frame(pt2, stringsAsFactors = FALSE) # check minimum requirements: lhs, op, rhs stopifnot( !is.null(pt1$lhs), !is.null(pt1$op), !is.null(pt1$rhs), !is.null(pt2$lhs), !is.null(pt2$op), !is.null(pt2$rhs) ) # both should have block (or not) if (is.null(pt1$block) && is.null(pt2$block)) { pt1$block <- rep(1L, length(pt1$lhs)) pt2$block <- rep(1L, length(pt2$lhs)) TMP <- rbind( pt1[, c("lhs", "op", "rhs", "block")], pt2[, c("lhs", "op", "rhs", "block")] ) } else { if (is.null(pt1$block) && !is.null(pt2$block)) { pt1$block <- rep(1L, length(pt1$lhs)) } else if (is.null(pt2$block) && !is.null(pt1$block)) { pt2$block <- rep(1L, length(pt2$lhs)) } TMP <- rbind( pt1[, c("lhs", "op", "rhs", "block")], pt2[, c("lhs", "op", "rhs", "block")] ) } # if missing columns, provide default values of the right type # (numeric/integer/character) # group if (is.null(pt1$group) && !is.null(pt2$group)) { pt1$group <- rep(1L, length(pt1$lhs)) } else if (is.null(pt2$group) && !is.null(pt1$group)) { pt2$group <- rep(1L, length(pt2$lhs)) } # level if (is.null(pt1$level) && !is.null(pt2$level)) { pt1$level <- rep(1L, length(pt1$lhs)) } else if (is.null(pt2$level) && !is.null(pt1$level)) { pt2$level <- rep(1L, length(pt2$lhs)) } # user if (is.null(pt1$user) && !is.null(pt2$user)) { pt1$user <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$user) && !is.null(pt1$user)) { pt2$user <- rep(0L, length(pt2$lhs)) } # free if (is.null(pt1$free) && !is.null(pt2$free)) { pt1$free <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$free) && !is.null(pt1$free)) { pt2$free <- rep(0L, length(pt2$lhs)) } # ustart -- set to zero!! if (is.null(pt1$ustart) && !is.null(pt2$ustart)) { pt1$ustart <- rep(0, length(pt1$lhs)) } else if (is.null(pt2$ustart) && !is.null(pt1$ustart)) { pt2$ustart <- rep(0, length(pt2$lhs)) } # exo if (is.null(pt1$exo) && !is.null(pt2$exo)) { pt1$exo <- rep(0L, length(pt1$lhs)) } else if (is.null(pt2$exo) && !is.null(pt1$exo)) { pt2$exo <- rep(0L, length(pt2$lhs)) } # label if (is.null(pt1$label) && !is.null(pt2$label)) { pt1$label <- rep("", length(pt1$lhs)) } else if (is.null(pt2$label) && !is.null(pt1$label)) { pt2$label <- rep("", length(pt2$lhs)) } # plabel if (is.null(pt1$plabel) && !is.null(pt2$plabel)) { pt1$plabel <- rep("", length(pt1$lhs)) } else if (is.null(pt2$plabel) && !is.null(pt1$plabel)) { pt2$plabel <- rep("", length(pt2$lhs)) } # efa if (is.null(pt1$efa) && !is.null(pt2$efa)) { pt1$efa <- rep("", length(pt1$lhs)) } else if (is.null(pt2$efa) && !is.null(pt1$efa)) { pt2$efa <- rep("", length(pt2$lhs)) } # start if (is.null(pt1$start) && !is.null(pt2$start)) { pt1$start <- rep(as.numeric(NA), length(pt1$lhs)) } else if (is.null(pt2$start) && !is.null(pt1$start)) { pt2$start <- rep(as.numeric(NA), length(pt2$lhs)) } # est if (is.null(pt1$est) && !is.null(pt2$est)) { pt1$est <- rep(0, length(pt1$lhs)) } else if (is.null(pt2$est) && !is.null(pt1$est)) { pt2$est <- rep(0, length(pt2$lhs)) } # check for duplicated elements if (remove.duplicated) { # if fromLast = TRUE, idx is in pt1 # if fromLast = FALSE, idx is in pt2 idx <- which(duplicated(TMP, fromLast = fromLast)) if (length(idx)) { lav_msg_warn( gettext("duplicated parameters are ignored:"), paste(apply(TMP[idx, c("lhs", "op", "rhs")], 1, paste, collapse = " " ), collapse = "\n") ) if (fromLast) { pt1 <- pt1[-idx, ] } else { idx <- idx - nrow(pt1) pt2 <- pt2[-idx, ] } } } else if (!is.null(pt1$start) && !is.null(pt2$start)) { # copy start values from pt1 to pt2 for (i in 1:length(pt1$lhs)) { idx <- which(pt2$lhs == pt1$lhs[i] & pt2$op == pt1$op[i] & pt2$rhs == pt1$rhs[i] & pt2$block == pt1$block[i]) pt2$start[idx] <- pt1$start[i] } } # nicely merge, using 'id' column (if it comes first) if (is.null(pt1$id) && !is.null(pt2$id)) { nid <- max(pt2$id) pt1$id <- (nid + 1L):(nid + nrow(pt1)) } else if (is.null(pt2$id) && !is.null(pt1$id)) { nid <- max(pt1$id) pt2$id <- (nid + 1L):(nid + nrow(pt2)) } NEW <- base::merge(pt1, pt2, all = TRUE, sort = FALSE) # make sure group/block/level are zero (or "") if # op %in% c("==", "<", ">", ":=") op.idx <- which(NEW$op %in% c("==", "<", ">", ":=")) if (length(op.idx) > 0L) { if (!is.null(NEW$block)) { # ALWAYS integer NEW$block[op.idx] <- 0L } if (!is.null(NEW$group)) { if (is.character(NEW$level)) { NEW$group[op.idx] <- "" } else { NEW$group[op.idx] <- 0L } } if (!is.null(NEW$level)) { if (is.character(NEW$level)) { NEW$level[op.idx] <- "" } else { NEW$level[op.idx] <- 0L } } } NEW } lavaan/R/lav_representation_ram.R0000644000176200001440000002420614627656441016626 0ustar liggesusers# RAM representation # # initial version: YR 2021-10-04 lav_ram <- function(partable = NULL, target = NULL, extra = FALSE, remove.nonexisting = TRUE) { # prepare target list if (is.null(target)) target <- partable stopifnot(!is.null(target$block)) # not for categorical data (yet) if (any(partable$op == "|")) { lav_msg_stop(gettext("RAM representation is not (yet) supported for categorical endogenous variables.")) } # not for conditional.x = TRUE yet conditional.x <- any(partable$exo > 0L & partable$op == "~") if (conditional.x) { lav_msg_stop(gettext("RAM representation is not (yet) supported if conditional.x = TRUE")) } # prepare output N <- length(target$lhs) tmp.mat <- character(N) tmp.row <- integer(N) tmp.col <- integer(N) # global settings meanstructure <- any(partable$op == "~1") categorical <- any(partable$op == "|") group.w.free <- any(partable$lhs == "group" & partable$op == "%") # number of blocks nblocks <- lav_partable_nblocks(partable) # always return ov.idx ov.idx <- vector("list", nblocks) ov.dummy.names.nox <- vector("list", nblocks) ov.dummy.names.x <- vector("list", nblocks) if (extra) { REP.mmNames <- vector("list", nblocks) REP.mmNumber <- vector("list", nblocks) REP.mmRows <- vector("list", nblocks) REP.mmCols <- vector("list", nblocks) REP.mmDimNames <- vector("list", nblocks) REP.mmSymmetric <- vector("list", nblocks) } for (g in 1:nblocks) { # info from user model per block ov.names <- vnames(partable, "ov", block = g) nvar <- length(ov.names) ov.idx[[g]] <- seq_len(nvar) ov.dummy.names.nox[[g]] <- character(0) ov.dummy.names.x[[g]] <- character(0) lv.names <- vnames(partable, "lv", block = g) both.names <- c(ov.names, lv.names) nboth <- length(both.names) # 1. "=~" indicators idx <- which(target$block == g & target$op == "=~") tmp.mat[idx] <- "A" tmp.row[idx] <- match(target$rhs[idx], both.names) tmp.col[idx] <- match(target$lhs[idx], both.names) # 2. "~" regressions idx <- which(target$block == g & (target$op == "~" | target$op == "<~")) tmp.mat[idx] <- "A" tmp.row[idx] <- match(target$lhs[idx], both.names) tmp.col[idx] <- match(target$rhs[idx], both.names) # 3. "~~" variances/covariances idx <- which(target$block == g & target$op == "~~") tmp.mat[idx] <- "S" tmp.row[idx] <- match(target$lhs[idx], both.names) tmp.col[idx] <- match(target$rhs[idx], both.names) # catch lower-elements in theta/psi idx.lower <- which(tmp.mat == "S" & tmp.row > tmp.col) if (length(idx.lower) > 0L) { tmp <- tmp.row[idx.lower] tmp.row[idx.lower] <- tmp.col[idx.lower] tmp.col[idx.lower] <- tmp } # 4. "~1" means/intercepts idx <- which(target$block == g & target$op == "~1") tmp.mat[idx] <- "m" tmp.row[idx] <- match(target$lhs[idx], both.names) tmp.col[idx] <- 1L # 5. "|" th # not used yet # 6. "~*~" scales # not used yet # 7. group weights idx <- which(target$block == g & target$lhs == "group" & target$op == "%") tmp.mat[idx] <- "gw" tmp.row[idx] <- 1L tmp.col[idx] <- 1L if (extra) { # mRows mmRows <- list( ov.idx = 1L, A = nboth, S = nboth, m = nboth, gw = 1L ) # mCols mmCols <- list( ov.idx = nvar, A = nboth, S = nboth, m = 1L, gw = 1L ) # dimNames for LISREL model matrices mmDimNames <- list( ov.idx = list("ov.idx", ov.names), A = list(both.names, both.names), S = list(both.names, both.names), m = list(both.names, "intercept"), gw = list("group", "weight") ) # isSymmetric mmSymmetric <- list( ov.idx = FALSE, A = FALSE, S = TRUE, m = FALSE, gw = FALSE ) # which mm's do we need? (always include ov.idx, A and S) IDX <- which(target$block == g) mmNames <- c("ov.idx", "A", "S") if (meanstructure) { mmNames <- c(mmNames, "m") } if ("gw" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "gw") } REP.mmNames[[g]] <- mmNames REP.mmNumber[[g]] <- length(mmNames) REP.mmRows[[g]] <- unlist(mmRows[mmNames]) REP.mmCols[[g]] <- unlist(mmCols[mmNames]) REP.mmDimNames[[g]] <- mmDimNames[mmNames] REP.mmSymmetric[[g]] <- unlist(mmSymmetric[mmNames]) } # extra } # nblocks REP <- list( mat = tmp.mat, row = tmp.row, col = tmp.col ) # always return ov.idx attribute attr(REP, "ov.idx") <- ov.idx attr(REP, "ov.dummy.names.nox") <- ov.dummy.names.nox attr(REP, "ov.dummy.names.x") <- ov.dummy.names.x if (extra) { attr(REP, "mmNames") <- REP.mmNames attr(REP, "mmNumber") <- REP.mmNumber attr(REP, "mmRows") <- REP.mmRows attr(REP, "mmCols") <- REP.mmCols attr(REP, "mmDimNames") <- REP.mmDimNames attr(REP, "mmSymmetric") <- REP.mmSymmetric } REP } # the model-implied variance/covariance matrix of the observed variables lav_ram_sigmahat <- function(MLIST = NULL, delta = NULL) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A S <- MLIST$S # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) # compute Sigma for all ov and lv VYeta <- tcrossprod(IA.inv %*% S, IA.inv) # select only observed part VY <- VYeta[ov.idx, ov.idx, drop = FALSE] # if delta, scale if (!is.null(MLIST$delta) && delta) { nvar <- ncol(VY) DELTA <- diag(MLIST$delta[, 1L], nrow = nvar, ncol = nvar) VY <- DELTA %*% VY %*% DELTA } VY } # VETA: the variance/covariance matrix of the latent variables only lav_ram_veta <- function(MLIST = NULL) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A S <- MLIST$S # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) # compute Sigma for all ov and lv VYeta <- tcrossprod(IA.inv %*% S, IA.inv) # select only latent part VETA <- VYeta[-ov.idx, -ov.idx, drop = FALSE] VETA } # MuHat: the model-implied means/intercepts lav_ram_muhat <- function(MLIST = NULL) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A m <- MLIST$m # shortcut if (is.null(m)) { return(matrix(0, nrow = length(ov.idx), 1L)) } # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) # all means/intercepts EYeta <- IA.inv %*% m # select observed only muhat <- EYeta[ov.idx, , drop = FALSE] muhat } # derivative of 'Sigma' wrt the (freel) elements in A and/or S lav_ram_dsigma <- function(m = "A", idx = seq_len(length(MLIST[[m]])), MLIST = NULL, vech = TRUE) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A S <- MLIST$S nvar <- length(ov.idx) nboth <- nrow(A) # shortcut for ov.idx, m, ... if (!m %in% c("A", "S")) { pstar <- nvar * (nvar + 1) / 2 return(matrix(0.0, nrow = pstar, ncol = length(idx))) } # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) if (m == "A") { L1 <- (IA.inv %*% S %*% t(IA.inv))[ov.idx, , drop = FALSE] KOL.idx <- matrix(1:(nboth * nboth), nboth, nboth, byrow = TRUE)[idx] DX <- (L1 %x% IA.inv[ov.idx, , drop = FALSE])[, idx, drop = FALSE] + (IA.inv[ov.idx, , drop = FALSE] %x% L1)[, KOL.idx, drop = FALSE] # this is not really needed (because we select idx=m.el.idx) # but just in case we need all elements of beta... DX[, which(idx %in% lav_matrix_diag_idx(nboth))] <- 0.0 } else if (m == "S") { DX <- (IA.inv[ov.idx, , drop = FALSE] %x% IA.inv[ov.idx, , drop = FALSE]) # symmetry correction, but keeping all duplicated elements # since we depend on idx=m.el.idx lower.idx <- lav_matrix_vech_idx(nboth, diagonal = FALSE) upper.idx <- lav_matrix_vechru_idx(nboth, diagonal = FALSE) offdiagSum <- DX[, lower.idx] + DX[, upper.idx] DX[, c(lower.idx, upper.idx)] <- cbind(offdiagSum, offdiagSum) DX <- DX[, idx, drop = FALSE] } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } # vech? if (vech) { v.idx <- lav_matrix_vech_idx(nvar) DX <- DX[v.idx, , drop = FALSE] } DX } # derivative of 'Mu' wrt the (free) elements in A and/or m lav_ram_dmu <- function(m = "A", idx = seq_len(length(MLIST[[m]])), MLIST = NULL, vech = TRUE) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A S <- MLIST$S nvar <- length(ov.idx) nboth <- nrow(A) # shortcut for ov.idx, m, ... if (!m %in% c("A", "m")) { return(matrix(0.0, nrow = nvar, ncol = length(idx))) } # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) if (m == "A") { DX <- (t(IA.inv %*% MLIST$m) %x% IA.inv)[ov.idx, idx, drop = FALSE] } else if (m == "m") { DX <- IA.inv[ov.idx, idx, drop = FALSE] } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } DX } # derivative of ML/GLS objective function F wrt the free parameters lav_ram_df <- function(MLIST = NULL, Omega = NULL, Omega.mu = NULL) { ov.idx <- as.integer(MLIST$ov.idx[1, ]) A <- MLIST$A S <- MLIST$S nvar <- length(ov.idx) nboth <- nrow(A) # get (I-A)^{-1} IA.inv <- lav_matrix_inverse_iminus(A) # meanstructure? meanstructure <- FALSE if (!is.null(Omega.mu)) meanstructure <- TRUE # pre-compute tIA.inv <- t(IA.inv) Omega..IA.inv..S..tIA.inv <- (Omega %*% IA.inv[ov.idx, , drop = FALSE] %*% S %*% t(IA.inv)) # 1. A if (meanstructure) { A.deriv <- -1.0 * ((t(IA.inv)[, ov.idx, drop = FALSE] %*% (Omega.mu %*% t(MLIST$m)) %*% t(IA.inv)) + (tIA.inv[, ov.idx, drop = FALSE] %*% Omega..IA.inv..S..tIA.inv)) } else { A.deriv <- -1.0 * (tIA.inv[, ov.idx, drop = FALSE] %*% Omega..IA.inv..S..tIA.inv) } # 2. S S.deriv <- -1.0 * (tIA.inv[, ov.idx, drop = FALSE] %*% Omega %*% IA.inv[ov.idx, , drop = FALSE]) diag(S.deriv) <- 0.5 * diag(S.deriv) if (meanstructure) { m.deriv <- -1.0 * t(t(Omega.mu) %*% IA.inv[ov.idx, , drop = FALSE]) } else { m.deriv <- NULL } list( A = A.deriv, S = S.deriv, m = m.deriv ) } lavaan/R/lav_fit_aic.R0000644000176200001440000000744214627656441014326 0ustar liggesusers# functions related to AIC and other information criteria # lower-level functions: # - lav_fit_aic # - lav_fit_bic # - lav_fit_sabic # higher-level functions: # - lav_fit_aic_lavobject # Y.R. 21 July 2022 lav_fit_aic <- function(logl = NULL, npar = NULL) { AIC <- (-2 * logl) + (2 * npar) AIC } lav_fit_bic <- function(logl = NULL, npar = NULL, N = NULL) { BIC <- (-2 * logl) + (npar * log(N)) BIC } lav_fit_sabic <- function(logl = NULL, npar = NULL, N = NULL) { N.star <- (N + 2) / 24 SABIC <- (-2 * logl) + (npar * log(N.star)) SABIC } lav_fit_aic_lavobject <- function(lavobject = NULL, fit.measures = "aic", standard.test = "standard", scaled.test = "none", estimator = "ML") { # check lavobject stopifnot(inherits(lavobject, "lavaan")) # tests TEST <- lavobject@test test.names <- sapply(lavobject@test, "[[", "test") if (test.names[1] == "none" || standard.test == "none") { return(list()) } test.idx <- which(test.names == standard.test)[1] if (length(test.idx) == 0L) { return(list()) } scaled.flag <- FALSE if (!scaled.test %in% c("none", "standard", "default")) { scaled.idx <- which(test.names == scaled.test) if (length(scaled.idx) > 0L) { scaled.idx <- scaled.idx[1] # only the first one scaled.flag <- TRUE } } # estimator? if (missing(estimator)) { estimator <- lavobject@Options$estimator } # supported fit measures in this function if (estimator == "MML") { fit.logl <- c("logl", "aic", "bic", "ntotal", "bic2") } else { fit.logl <- c( "logl", "unrestricted.logl", "aic", "bic", "ntotal", "bic2" ) } if (scaled.flag && scaled.test %in% c("yuan.bentler", "yuan.bentler.mplus")) { fit.logl <- c(fit.logl, "scaling.factor.h1", "scaling.factor.h0") } # which one do we need? if (missing(fit.measures)) { # default set fit.measures <- fit.logl } else { # remove any not-CFI related index from fit.measures rm.idx <- which(!fit.measures %in% fit.logl) if (length(rm.idx) > 0L) { fit.measures <- fit.measures[-rm.idx] } if (length(fit.measures) == 0L) { return(list()) } } # output container indices <- list() # non-ML values indices["logl"] <- as.numeric(NA) indices["unrestricted.logl"] <- as.numeric(NA) indices["aic"] <- as.numeric(NA) indices["bic"] <- as.numeric(NA) indices["ntotal"] <- lavobject@SampleStats@ntotal indices["bic2"] <- as.numeric(NA) if (estimator %in% c("ML", "MML")) { # do we have a @h1 slot? if (.hasSlot(lavobject, "h1") && length(lavobject@h1) > 0L) { indices["unrestricted.logl"] <- lavobject@h1$logl$loglik } else { lavh1 <- lav_h1_implied_logl( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavoptions = lavobject@Options ) indices["unrestricted.logl"] <- lavh1$logl$loglik } # logl H0 if (.hasSlot(lavobject, "loglik")) { loglik <- lavobject@loglik } else { loglik <- lav_model_loglik( lavdata = lavobject@Data, lavsamplestats = lavobject@SampleStats, lavimplied = lavobject@implied, lavmodel = lavobject@Model, lavoptions = lavobject@Options ) } indices["logl"] <- loglik$loglik indices["aic"] <- loglik$AIC indices["bic"] <- loglik$BIC indices["ntotal"] <- loglik$ntotal indices["bic2"] <- loglik$BIC2 # scaling factor for MLR if (scaled.test %in% c("yuan.bentler", "yuan.bentler.mplus")) { indices["scaling.factor.h1"] <- TEST[[scaled.idx]]$scaling.factor.h1 indices["scaling.factor.h0"] <- TEST[[scaled.idx]]$scaling.factor.h0 } } # ML # return only those that were requested indices[fit.measures] } lavaan/R/lav_partable_complete.R0000644000176200001440000001114514627656441016405 0ustar liggesusers# handle bare-minimum partables # add some additional columns lav_partable_complete <- function(partable = NULL, start = TRUE) { # nolint # check if we hava a data.frame # if so, check for columns that are 'factor' and convert them to 'character' ovda <- attr(partable, "ovda") if (is.data.frame(partable)) { fac.idx <- sapply(partable, is.factor) partable[fac.idx] <- lapply(partable[fac.idx], as.character) } # check if we have lhs, op, rhs stopifnot(!is.null(partable$lhs), !is.null(partable$op), !is.null(partable$rhs)) # number of elements tmp.n <- length(partable$lhs) if (!is.data.frame(partable)) { # check for equal column length nel <- sapply(partable, length) short.idx <- which(nel < tmp.n) long.idx <- which(nel > tmp.n) if (length(long.idx) > 0L) { lav_msg_warn(gettext("partable columns have unequal length")) } if (length(short.idx) > 0L) { # try to extend them in a 'natural' way for (i in short.idx) { too.short <- tmp.n - nel[i] if (is.integer(partable[[i]])) { partable[[i]] <- c(partable[[i]], integer(too.short)) } else if (is.numeric(partable[[i]])) { partable[[i]] <- c(partable[[i]], numeric(too.short)) } else { partable[[i]] <- c(partable[[i]], character(too.short)) } } } } # create new id column # if(is.null(partable$id)) { partable$id <- seq_len(tmp.n) # } # add user column if (is.null(partable$user)) { partable$user <- rep(1L, tmp.n) } else { partable$user <- as.integer(partable$user) } # add block column if (is.null(partable$block)) { partable$block <- rep(1L, tmp.n) } else { partable$block <- as.integer(partable$block) } # add group column if (is.null(partable$group)) { partable$group <- rep(1L, tmp.n) } else { # partable$group <- as.integer(partable$group) # maybe labels? } # add free column if (is.null(partable$free)) { partable$free <- seq_len(tmp.n) # 0.6-11: check for simple equality constraints # note: this is perhaps only a subset (eg SAM!) of a larger # table, and we have to renumber the 'free' column } else if (is.integer(partable$free) && any(partable$free > 0L) && !any(partable$op == "==") && !is.null(partable$label) && !is.null(partable$plabel) && any(duplicated(partable$free[partable$free > 0L]))) { dup.idx <- which(partable$free > 0L & duplicated(partable$free)) all.idx <- which(partable$free %in% unique(partable$free[dup.idx])) eq.labels <- unique(partable$free[all.idx]) eq.id <- integer(length(partable$lhs)) eq.id[all.idx] <- partable$free[all.idx] partable$free[dup.idx] <- 0L idx.free <- which(partable$free > 0L) partable$free <- rep(0L, tmp.n) partable$free[idx.free] <- seq_along(idx.free) for (eq.label in eq.labels) { all.idx <- which(eq.id == eq.label) ref.idx <- all.idx[1L] other.idx <- all.idx[-1L] partable$free[other.idx] <- partable$free[ref.idx] } } else { # treat non-zero as 'free' free.idx <- which(as.logical(partable$free)) partable$free <- rep(0L, tmp.n) if (length(free.idx) > 0L) { partable$free[free.idx] <- seq_len(length(free.idx)) } } # add ustart column if (is.null(partable$ustart)) { # do we have something else? start? est? if (!is.null(partable$start)) { partable$ustart <- as.numeric(partable$start) } else if (!is.null(partable$est)) { partable$ustart <- as.numeric(partable$est) } else { partable$ustart <- rep(as.numeric(NA), tmp.n) non.free <- which(!partable$free) if (length(non.free)) { partable$ustart[non.free] <- 0 } } } else { partable$ustart <- as.numeric(partable$ustart) } # add exo column if (is.null(partable$exo)) { partable$exo <- rep(0, tmp.n) } else { partable$exo <- as.integer(partable$exo) } # add label column if (is.null(partable$label)) { partable$label <- rep("", tmp.n) } else { partable$label <- as.character(partable$label) } # order them nicely: id lhs op rhs group idx <- match(c("id", "lhs", "op", "rhs", "user", "block", "group", "free", "ustart", "exo", "label"), names(partable)) tmp <- partable[idx] partable <- c(tmp, partable[-idx]) # add start column if (start) { if (is.null(partable$start)) { partable$start <- lav_start(start.method = "simple", lavpartable = partable) } } attr(partable, "ovda") <- ovda attr(partable, "vnames") <- lav_partable_vnames(partable, "*") partable } lavaan/R/lav_bootstrap.R0000644000176200001440000004717014627656441014747 0ustar liggesusers# main function used by various bootstrap related functions # this function draws the bootstrap samples, and estimates the # free parameters for each bootstrap sample # # return COEF matrix of size R x npar (R = number of bootstrap samples) # # Ed. 9 mar 2012 # # Notes: - faulty runs are simply ignored (with a warning) # - default R=1000 # # Updates: - now we have a separate @Data slot, we only need to transform once # for the bollen.stine bootstrap (13 dec 2011) # - bug fix: we need to 'update' the fixed.x variances/covariances # for each bootstrap draw!! # # Question: if fixed.x=TRUE, should we not keep X fixed, and bootstrap Y # only, conditional on X?? How to implement the conditional part? # YR 27 Aug: - add keep.idx argument # - always return 'full' set of bootstrap results, including # failed runs (as NAs) # - idx nonadmissible/error solutions as an attribute # - thanks to keep.idx, it is easy to replicate/investigate these # cases if needed bootstrapLavaan <- function(object, R = 1000L, type = "ordinary", verbose = FALSE, FUN = "coef", # return.boot = FALSE, # no use, as boot stores # # sample indices differently keep.idx = FALSE, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, iseed = NULL, h0.rmsea = NULL, ...) { # checks type. <- tolower(type) # overwritten if nonparametric stopifnot( inherits(object, "lavaan"), type. %in% c( "nonparametric", "ordinary", "bollen.stine", "parametric", "yuan" ) ) if (!missing(verbose)) { current.verbose <- lav_verbose() if (lav_verbose(verbose)) on.exit(lav_verbose(current.verbose), TRUE) } if (type. == "nonparametric") { type. <- "ordinary" } if (missing(parallel)) { parallel <- "no" } parallel <- match.arg(parallel) # check if options$se is not bootstrap, otherwise, we get an infinite loop if (object@Options$se == "bootstrap") { object@Options$se <- "standard" } # check if options$test is not bollen.stine if ("bollen.stine" %in% object@Options$test) { object@Options$test <- "standard" } # check for conditional.x = TRUE if (object@Model@conditional.x) { lav_msg_stop(gettext( "this function is not (yet) available if conditional.x = TRUE")) } lavoptions. <- list( parallel = parallel, ncpus = ncpus, cl = cl, iseed = iseed ) out <- lav_bootstrap_internal( object = object, lavdata. = NULL, lavmodel. = NULL, lavsamplestats. = NULL, lavoptions. = lavoptions., lavpartable. = NULL, R = R, type = type., FUN = FUN, keep.idx = keep.idx, h0.rmsea = h0.rmsea, ... ) # new in 0.6-12: always warn for failed and nonadmissible runs nfailed <- length(attr(out, "error.idx")) # zero if NULL if (nfailed > 0L) { lav_msg_warn(gettextf( "%s bootstrap runs failed or did not converge.", nfailed)) } notok <- length(attr(out, "nonadmissible")) # zero if NULL if (notok > 0L) { lav_msg_warn(gettextf( "%s bootstrap runs resulted in nonadmissible solutions.", notok)) } out } # we need an internal version to be called from VCOV and lav_model_test # when there is no lavaan object yet! lav_bootstrap_internal <- function(object = NULL, lavdata. = NULL, lavmodel. = NULL, lavsamplestats. = NULL, lavoptions. = NULL, lavpartable. = NULL, R = 1000L, type = "ordinary", FUN = "coef", check.post = TRUE, keep.idx = FALSE, # return.boot = FALSE, h0.rmsea = NULL, ...) { # warning: avoid use of 'options', 'sample' (both are used as functions # below... # options -> opt # sample -> samp mc <- match.call() # object slots FUN.orig <- FUN if (!is.null(object)) { lavdata <- object@Data lavmodel <- object@Model lavsamplestats <- object@SampleStats lavoptions <- object@Options if (!is.null(lavoptions.)) { lavoptions$parallel <- lavoptions.$parallel lavoptions$ncpus <- lavoptions.$ncpus lavoptions$cl <- lavoptions.$cl lavoptions$iseed <- lavoptions.$iseed } lavpartable <- object@ParTable FUN <- match.fun(FUN) t0 <- FUN(object, ...) t.star <- matrix(as.numeric(NA), R, length(t0)) colnames(t.star) <- names(t0) } else { # internal version! lavdata <- lavdata. lavmodel <- lavmodel. lavsamplestats <- lavsamplestats. lavoptions <- lavoptions. lavpartable <- lavpartable. if (FUN == "coef") { t.star <- matrix(as.numeric(NA), R, lavmodel@nx.free) lavoptions$test <- "none" } else if (FUN == "test") { t.star <- matrix(as.numeric(NA), R, 1L) lavoptions$test <- "standard" } else if (FUN == "coeftest") { t.star <- matrix(as.numeric(NA), R, lavmodel@nx.free + 1L) lavoptions$test <- "standard" } } # always shut off some options: current.verbose <- lav_verbose() if (lav_verbose(FALSE)) on.exit(lav_verbose(current.verbose)) lavoptions$check.start <- FALSE lavoptions$check.post <- FALSE lavoptions$optim.attempts <- 1L # can save a lot of time # if internal or FUN == "coef", we can shut off even more if (is.null(object) || (is.character(FUN.orig) && FUN.orig == "coef")) { lavoptions$baseline <- FALSE lavoptions$h1 <- FALSE lavoptions$loglik <- FALSE lavoptions$implied <- FALSE lavoptions$store.vcov <- FALSE lavoptions$se <- "none" if (FUN.orig == "coef") { lavoptions$test <- "none" } } # bollen.stine, yuan, or parametric: we need the Sigma.hat values if (type == "bollen.stine" || type == "parametric" || type == "yuan") { Sigma.hat <- computeSigmaHat(lavmodel = lavmodel) Mu.hat <- computeMuHat(lavmodel = lavmodel) } # can we use the original data, or do we need to transform it first? if (type == "bollen.stine" || type == "yuan") { # check if data is continuous if (lavmodel@categorical) { lav_msg_stop(gettext( "bollen.stine/yuan bootstrap not available for categorical/ordinal data" )) } # check if data is complete if (lavoptions$missing != "listwise") { lav_msg_stop(gettext( "bollen.stine/yuan bootstrap not available for missing data")) } dataX <- vector("list", length = lavdata@ngroups) } else { dataX <- lavdata@X } # if bollen.stine, transform data here if (type == "bollen.stine") { for (g in 1:lavsamplestats@ngroups) { sigma.sqrt <- lav_matrix_symmetric_sqrt(Sigma.hat[[g]]) S.inv.sqrt <- lav_matrix_symmetric_sqrt(lavsamplestats@icov[[g]]) # center (needed???) X <- scale(lavdata@X[[g]], center = TRUE, scale = FALSE) # transform X <- X %*% S.inv.sqrt %*% sigma.sqrt # add model-based mean if (lavmodel@meanstructure) { X <- scale(X, center = (-1 * Mu.hat[[g]]), scale = FALSE) } # transformed data dataX[[g]] <- X } # if yuan, transform data here } else if (type == "yuan") { # page numbers refer to Yuan et al, 2007 # Define a function to find appropriate value of a # (p. 272); code supplied 16 jun 2016 by Cheng & Wu search.a <- function(F0, d, p) { if (F0 == 0) { a0 <- 0 return(a0) } max.a <- 1 / (1 - min(d)) - 1e-3 # starting value; Yuan p. 272 a0 <- min(sqrt(2 * F0 / sum((d - 1)^2)), max.a) # See Yuan p. 280 for (i in 1:50) { dia <- a0 * d + (1 - a0) g1 <- -sum(log(dia)) + sum(dia) - p dif <- g1 - F0 if (abs(dif) < 1e-6) { return(a0) } g2 <- a0 * sum((d - 1)^2 / dia) a0 <- min(max(a0 - dif / g2, 0), max.a) } # if search fails to converge in 50 iterations lav_msg_warn(gettext("yuan bootstrap search for `a` did not converge. h0.rmsea may be too large.")) a0 } # Now use g.a within each group for (g in 1:lavsamplestats@ngroups) { S <- lavsamplestats@cov[[g]] # test is in Fit slot ghat <- object@test[[1]]$stat.group[[g]] df <- object@test[[1]]$df Sigmahat <- Sigma.hat[[g]] nmv <- nrow(Sigmahat) n <- nrow(lavdata@X[[g]]) # Calculate tauhat_1, middle p. 267. # Yuan et al note that tauhat_1 could be negative; # if so, we need to let S.a = Sigmahat. (see middle p 275) ifelse(length(h0.rmsea) == 0, tau.hat <- (ghat - df) / (n - 1), # middle p 267 tau.hat <- df * (h0.rmsea * h0.rmsea) ) # middle p 273 if (tau.hat >= 0) { # from Cheng and Wu EL <- t(chol(Sigmahat)) ESE <- forwardsolve(EL, t(forwardsolve(EL, S))) d <- eigen(ESE, symmetric = TRUE, only.values = TRUE)$values if ("a" %in% names(list(...))) { a <- list(...)$a } else { # Find a to minimize g.a a <- search.a(tau.hat, d, nmv) } # Calculate S_a (p. 267) S.a <- a * S + (1 - a) * Sigmahat } else { S.a <- Sigmahat } # Transform the data (p. 263) S.a.sqrt <- lav_matrix_symmetric_sqrt(S.a) S.inv.sqrt <- lav_matrix_symmetric_sqrt(lavsamplestats@icov[[g]]) X <- lavdata@X[[g]] X <- X %*% S.inv.sqrt %*% S.a.sqrt # transformed data dataX[[g]] <- X } } # run bootstraps fn <- function(b) { # create bootstrap sample, and generate new 'data' object if (type == "bollen.stine" || type == "ordinary" || type == "yuan") { # take a bootstrap sample for each group BOOT.idx <- vector("list", length = lavdata@ngroups) # Note: we generate the bootstrap indices separately for each # group, in order to ensure the group sizes do not change! for (g in 1:lavdata@ngroups) { stopifnot(nrow(lavdata@X[[g]]) > 1L) boot.idx <- sample.int(nrow(lavdata@X[[g]]), replace = TRUE) BOOT.idx[[g]] <- boot.idx dataX[[g]] <- dataX[[g]][boot.idx, , drop = FALSE] } newData <- lav_data_update( lavdata = lavdata, newX = dataX, BOOT.idx = BOOT.idx, lavoptions = lavoptions ) } else { # parametric! for (g in 1:lavdata@ngroups) { dataX[[g]] <- MASS::mvrnorm( n = lavdata@nobs[[g]], Sigma = Sigma.hat[[g]], mu = Mu.hat[[g]] ) } newData <- lav_data_update( lavdata = lavdata, newX = dataX, lavoptions = lavoptions ) } # verbose lav_verbose(current.verbose) # reset if needed if (lav_verbose()) cat(" ... bootstrap draw number:", sprintf("%4d", b)) bootSampleStats <- try(lav_samplestats_from_data( lavdata = newData, lavoptions = lavoptions ), silent = TRUE) if (inherits(bootSampleStats, "try-error")) { if (lav_verbose()) { cat(" FAILED: creating sample statistics\n") cat(bootSampleStats[1]) } out <- as.numeric(NA) attr(out, "nonadmissible.flag") <- TRUE if (keep.idx) { attr(out, "BOOT.idx") <- BOOT.idx } return(out) } # do we need to update Model slot? only if we have fixed exogenous # covariates, as their variances/covariances are stored in GLIST if (lavmodel@fixed.x && length(vnames(lavpartable, "ov.x")) > 0L) { model.boot <- NULL } else { model.boot <- lavmodel } # override option # fit model on bootstrap sample fit.boot <- suppressWarnings(lavaan( slotOptions = lavoptions, slotParTable = lavpartable, slotModel = model.boot, slotSampleStats = bootSampleStats, slotData = lavdata )) if (!fit.boot@optim$converged) { if (lav_verbose()) cat(" FAILED: no convergence\n") out <- as.numeric(NA) attr(out, "nonadmissible.flag") <- TRUE if (keep.idx) { attr(out, "BOOT.idx") <- BOOT.idx } return(out) } # extract information we need if (is.null(object)) { # internal use only! if (FUN == "coef") { out <- fit.boot@optim$x } else if (FUN == "test") { out <- fit.boot@test[[1L]]$stat } else if (FUN == "coeftest") { out <- c(fit.boot@optim$x, fit.boot@test[[1L]]$stat) } } else { # general use out <- try(as.numeric(FUN(fit.boot, ...)), silent = TRUE) } if (inherits(out, "try-error")) { if (lav_verbose()) cat(" FAILED: applying FUN to fit.boot\n") out <- as.numeric(NA) attr(out, "nonadmissible.flag") <- TRUE if (keep.idx) { attr(out, "BOOT.idx") <- BOOT.idx } return(out) } # check if the solution is admissible admissible.flag <- suppressWarnings(lavInspect(fit.boot, "post.check")) attr(out, "nonadmissible.flag") <- !admissible.flag if (lav_verbose()) { cat( " OK -- niter = ", sprintf("%3d", fit.boot@optim$iterations), " fx = ", sprintf("%11.9f", fit.boot@optim$fx), if (admissible.flag) " " else "n", "\n" ) } if (keep.idx) { # add BOOT.idx (for all groups) attr(out, "BOOT.idx") <- BOOT.idx } out } # end-of-fn # get parallelization options parallel <- lavoptions$parallel[1] ncpus <- lavoptions$ncpus cl <- lavoptions[["cl"]] # often NULL iseed <- lavoptions[["iseed"]] # often NULL # the next 10 lines are borrowed from the boot package have_mc <- have_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { have_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") have_snow <- TRUE if (!have_mc && !have_snow) ncpus <- 1L loadNamespace("parallel") # before recording seed! } # iseed: # this follows a proposal of Shu Fai Cheung (see github issue #240) # - iseed is used for both serial and parallel # - if iseed is not set, iseed is generated + .Random.seed created/updated # -> tmp.seed <- NA # - if iseed is set: don't touch .Random.seed (if it exists) # -> tmp.seed <- .Random.seed (if it exists) # -> tmp.seed <- NULL (if it does not exist) if (is.null(iseed)) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { runif(1) } # identical(temp.seed, NA): Will not change .Random.seed in GlobalEnv temp.seed <- NA iseed <- runif(1, 0, 999999999) } else { if (exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { temp.seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) } else { # is.null(temp.seed): Will remove .Random.seed in GlobalEnv # if serial. # If parallel, .Random.seed will not be touched. temp.seed <- NULL } } if (!(ncpus > 1L && (have_mc || have_snow))) { # Only for serial set.seed(iseed) } # this is adapted from the boot function in package boot RR <- R if (lav_verbose()) { cat("\n") } res <- if (ncpus > 1L && (have_mc || have_snow)) { if (have_mc) { RNGkind_old <- RNGkind() # store current kind RNGkind("L'Ecuyer-CMRG") # to allow for reproducible results set.seed(iseed) parallel::mclapply(seq_len(RR), fn, mc.cores = ncpus) } else if (have_snow) { list(...) # evaluate any promises if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) # # No need for # if(RNGkind()[1L] == "L'Ecuyer-CMRG") # clusterSetRNGStream() always calls `RNGkind("L'Ecuyer-CMRG")` parallel::clusterSetRNGStream(cl, iseed = iseed) res <- parallel::parLapply(cl, seq_len(RR), fn) parallel::stopCluster(cl) res } else { parallel::parLapply(cl, seq_len(RR), fn) } } } else { lapply(seq_len(RR), fn) } # restore old RNGkind() if (ncpus > 1L && have_mc) { RNGkind(RNGkind_old[1], RNGkind_old[2], RNGkind_old[3]) } # fill in container t.star[] <- do.call("rbind", res) # handle errors error.idx <- which(sapply(res, function(x) is.na(x[1L]))) attr(t.star, "error.idx") <- error.idx # could be integer(0L) # handle nonadmissible solutions if (check.post) { notok <- which(sapply(res, attr, "nonadmissible.flag")) if (length(error.idx) > 0L) { notok <- notok[-which(notok %in% error.idx)] } attr(t.star, "nonadmissible") <- notok } # store iseed attr(t.star, "seed") <- iseed # handle temp.seed if (!is.null(temp.seed) && !identical(temp.seed, NA)) { assign(".Random.seed", temp.seed, envir = .GlobalEnv) } else if (is.null(temp.seed) && !(ncpus > 1L && (have_mc || have_snow))) { # serial rm(.Random.seed, pos = 1) } else if (is.null(temp.seed) && (ncpus > 1L && have_mc)) { # parallel/multicore only rm(.Random.seed, pos = 1) # because set used set.seed() } # store BOOT.idx per group if (keep.idx) { BOOT.idx <- vector("list", length = lavsamplestats@ngroups) for (g in 1:lavsamplestats@ngroups) { # note that failed runs (NULL) are removed (for now) BOOT.idx[[g]] <- do.call( "rbind", lapply(res, function(x) attr(x, "BOOT.idx")[[g]]) ) } attr(t.star, "boot.idx") <- BOOT.idx } # # No use, as boot package stores the sample indices differently # # See boot:::boot.array() versus lav_utils_bootstrap_indices() # if(return.boot) { # # mimic output boot function # # if(is.null(object)) { # stop("lavaan ERROR: return.boot = TRUE requires a full lavaan object") # } # # # we start with ordinary only for now # stopifnot(type == "ordinary") # # if(! type %in% c("ordinary", "parametric")) { # stop("lavaan ERROR: only ordinary and parametric bootstrap are supported if return.boot = TRUE") # } else { # sim <- type # } # # statistic. <- function(data, idx) { # data.boot <- data[idx,] # fit.boot <- update(object, data = data.boot) # out <- try(FUN(fit.boot, ...), silent = TRUE) # if(inherits(out, "try-error")) { # out <- rep(as.numeric(NA), length(t0)) # } # out # } # attr(t.star, "seed") <- NULL # attr(t.star, "nonadmissible") <- NULL # out <- list(t0 = t0, t = t.star, R = RR, # data = lavInspect(object, "data"), # seed = iseed, statistic = statistic., # sim = sim, call = mc) # # #if(sim == "parametric") { # # ran.gen. <- function() {} # TODO # # out <- c(out, list(ran.gen = ran.gen, mle = mle)) # #} else if(sim == "ordinary") { # stype <- "i" # strata <- rep(1, nobs(object)) # weights <- 1/tabulate(strata)[strata] # out <- c(out, list(stype = stype, strata = strata, # weights = weights)) # #} # # class(out) <- "boot" # return(out) # } # t.star } lavaan/R/lav_model_implied.R0000644000176200001440000000670714627656441015536 0ustar liggesusers# compute model implied statistics # per block # YR 7 May 2022: add cov.x and mean.x if conditional.x (so that we do # no longer depend on SampleStats) lav_model_implied <- function(lavmodel = NULL, GLIST = NULL, delta = TRUE) { stopifnot(inherits(lavmodel, "lavModel")) # state or final? if (is.null(GLIST)) GLIST <- lavmodel@GLIST # model-implied variance/covariance matrix ('sigma hat') Sigma.hat <- computeSigmaHat( lavmodel = lavmodel, GLIST = GLIST, delta = delta ) # model-implied mean structure ('mu hat') if (lavmodel@meanstructure) { Mu.hat <- computeMuHat(lavmodel = lavmodel, GLIST = GLIST) } else { Mu.hat <- vector("list", length = lavmodel@nblocks) } # if conditional.x, slopes, cov.x, mean.x if (lavmodel@conditional.x) { SLOPES <- computePI(lavmodel = lavmodel, GLIST = GLIST) # per block, because for some blocks, cov.x may not exist COV.X <- vector("list", lavmodel@nblocks) MEAN.X <- vector("list", lavmodel@nblocks) for (b in seq_len(lavmodel@nblocks)) { mm.in.block <- (seq_len(lavmodel@nmat[b]) + cumsum(c(0, lavmodel@nmat))[b]) MLIST <- lavmodel@GLIST[mm.in.block] cov.x.idx <- which(names(MLIST) == "cov.x") if (length(cov.x.idx) > 0L) { COV.X[[b]] <- MLIST[[cov.x.idx]] } else { COV.X[[b]] <- matrix(0, 0L, 0L) } mean.x.idx <- which(names(MLIST) == "mean.x") if (length(mean.x.idx) > 0L) { MEAN.X[[b]] <- MLIST[[mean.x.idx]] } else { MEAN.X[[b]] <- matrix(0, 0L, 1L) } } } else { SLOPES <- vector("list", length = lavmodel@nblocks) } # if categorical, model-implied thresholds if (lavmodel@categorical) { TH <- computeTH(lavmodel = lavmodel, GLIST = GLIST) } else { TH <- vector("list", length = lavmodel@nblocks) } if (lavmodel@group.w.free) { w.idx <- which(names(lavmodel@GLIST) == "gw") GW <- unname(GLIST[w.idx]) GW <- lapply(GW, as.numeric) } else { GW <- vector("list", length = lavmodel@nblocks) } if (lavmodel@conditional.x) { implied <- list( res.cov = Sigma.hat, res.int = Mu.hat, res.slopes = SLOPES, cov.x = COV.X, mean.x = MEAN.X, res.th = TH, group.w = GW ) } else { implied <- list(cov = Sigma.hat, mean = Mu.hat, th = TH, group.w = GW) } implied } # convert 'conditional.x = TRUE' to 'conditional.x = FALSE' lav_model_implied_cond2uncond <- function(lavimplied) { # check for res.cov if (is.null(lavimplied$res.cov[[1]])) { # already unconditional return(lavimplied) } else { nblocks <- length(lavimplied$res.cov) } COV <- vector("list", length = nblocks) MEAN <- vector("list", length = nblocks) # reconstruct COV/MEAN per block for (b in seq_len(nblocks)) { res.Sigma <- lavimplied$res.cov[[b]] res.slopes <- lavimplied$res.slopes[[b]] res.int <- lavimplied$res.int[[b]] S.xx <- lavimplied$cov.x[[b]] M.x <- lavimplied$mean.x[[b]] S.yx <- res.slopes %*% S.xx S.xy <- t(S.yx) S.yy <- res.Sigma + tcrossprod(S.yx, res.slopes) COV[[b]] <- rbind(cbind(S.yy, S.yx), cbind(S.xy, S.xx)) Mu.y <- as.vector(res.int + res.slopes %*% M.x) Mu.x <- as.vector(M.x) MEAN[[b]] <- matrix(c(Mu.y, Mu.x), ncol = 1L) } # we ignore res.th for now, as we do not support categorical data # in the two-level setting anyway implied <- list( cov = COV, mean = MEAN, th = lavimplied$res.th, group.w = lavimplied$group.w ) implied } lavaan/R/lav_representation_lisrel.R0000644000176200001440000024414014627656441017342 0ustar liggesusers# and matrix-representation specific functions: # - computeSigmaHat # - computeMuHat # - derivative.F # initital version: YR 2011-01-21: LISREL stuff # updates: YR 2011-12-01: group specific extraction # YR 2012-05-17: thresholds # YR 2021-10-04: rename representation.LISREL -> lav_lisrel lav_lisrel <- function(lavpartable = NULL, target = NULL, extra = FALSE, remove.nonexisting = TRUE) { # prepare target list if (is.null(target)) target <- lavpartable stopifnot(!is.null(target$block)) # prepare output N <- length(target$lhs) tmp.mat <- character(N) tmp.row <- integer(N) tmp.col <- integer(N) # global settings meanstructure <- any(lavpartable$op == "~1") categorical <- any(lavpartable$op == "|") group.w.free <- any(lavpartable$lhs == "group" & lavpartable$op == "%") # gamma?only if conditional.x if (any(lavpartable$op %in% c("~", "<~") & lavpartable$exo == 1L)) { gamma <- TRUE } else { gamma <- FALSE } # number of blocks nblocks <- lav_partable_nblocks(lavpartable) # multilevel? nlevels <- lav_partable_nlevels(lavpartable) ngroups <- lav_partable_ngroups(lavpartable) ov.dummy.names.nox <- vector("list", nblocks) ov.dummy.names.x <- vector("list", nblocks) if (extra) { REP.mmNames <- vector("list", nblocks) REP.mmNumber <- vector("list", nblocks) REP.mmRows <- vector("list", nblocks) REP.mmCols <- vector("list", nblocks) REP.mmDimNames <- vector("list", nblocks) REP.mmSymmetric <- vector("list", nblocks) } for (g in 1:nblocks) { # info from user model per block if (gamma) { ov.names <- vnames(lavpartable, "ov.nox", block = g) } else { ov.names <- vnames(lavpartable, "ov", block = g) } nvar <- length(ov.names) lv.names <- vnames(lavpartable, "lv", block = g) nfac <- length(lv.names) ov.th <- vnames(lavpartable, "th", block = g) nth <- length(ov.th) ov.names.x <- vnames(lavpartable, "ov.x", block = g) nexo <- length(ov.names.x) ov.names.nox <- vnames(lavpartable, "ov.nox", block = g) # in this representation, we need to create 'phantom/dummy' latent # variables for all `x' and `y' variables not in lv.names # (only y if conditional.x = TRUE) # regression dummys if (gamma) { tmp.names <- unique(lavpartable$lhs[(lavpartable$op == "~" | lavpartable$op == "<~") & lavpartable$block == g]) # new in 0.6-12: fix for multilevel + conditional.x: splitted ov.x # are removed from ov.x if (nlevels > 1L) { if (ngroups == 1L) { OTHER.BLOCK.NAMES <- lav_partable_vnames(lavpartable, "ov", block = seq_len(nblocks)[-g] ) } else { # TEST ME this.group <- ceiling(g / nlevels) blocks.within.group <- (this.group - 1L) * nlevels + seq_len(nlevels) OTHER.BLOCK.NAMES <- lav_partable_vnames(lavpartable, "ov", block = blocks.within.group[-g] ) } if (length(ov.names.x) > 0L) { idx <- which(ov.names.x %in% OTHER.BLOCK.NAMES) if (length(idx) > 0L) { tmp.names <- unique(c(tmp.names, ov.names.x[idx])) ov.names.nox <- unique(c(ov.names.nox, ov.names.x[idx])) ov.names.x <- ov.names.x[-idx] nexo <- length(ov.names.x) ov.names <- ov.names.nox nvar <- length(ov.names) } } } } else { tmp.names <- unique(c( lavpartable$lhs[(lavpartable$op == "~" | lavpartable$op == "<~") & lavpartable$block == g], lavpartable$rhs[(lavpartable$op == "~" | lavpartable$op == "<~") & lavpartable$block == g] )) } dummy.names1 <- tmp.names[!tmp.names %in% lv.names] # covariances involving dummys dummy.cov.idx <- which(lavpartable$op == "~~" & lavpartable$block == g & (lavpartable$lhs %in% dummy.names1 | lavpartable$rhs %in% dummy.names1)) # new in 0.5-21: also include covariances involving these covariances... dummy.cov.idx1 <- which(lavpartable$op == "~~" & lavpartable$block == g & (lavpartable$lhs %in% lavpartable$lhs[dummy.cov.idx] | lavpartable$rhs %in% lavpartable$rhs[dummy.cov.idx])) dummy.cov.idx <- unique(c(dummy.cov.idx, dummy.cov.idx1)) dummy.names2 <- unique(c( lavpartable$lhs[dummy.cov.idx], lavpartable$rhs[dummy.cov.idx] )) # new in 0.6-7: ~~ between latent and observed dummy.cov.ov.lv.idx1 <- which(lavpartable$op == "~~" & lavpartable$block == g & lavpartable$lhs %in% ov.names & lavpartable$rhs %in% lv.names) dummy.cov.ov.lv.idx2 <- which(lavpartable$op == "~~" & lavpartable$block == g & lavpartable$lhs %in% lv.names & lavpartable$rhs %in% ov.names) dummy.names3 <- unique(c( lavpartable$lhs[dummy.cov.ov.lv.idx1], lavpartable$rhs[dummy.cov.ov.lv.idx2] )) # new in 0.6-10: ~~ between observed and observed, but not in ~ dummy.orphan.idx <- which(lavpartable$op == "~~" & lavpartable$block == g & lavpartable$lhs %in% ov.names & lavpartable$rhs %in% ov.names & (!lavpartable$lhs %in% c( dummy.names1, dummy.names2 ) | !lavpartable$rhs %in% c( dummy.names1, dummy.names2 ))) # collect all dummy variables dummy.names <- unique(c(dummy.names1, dummy.names2, dummy.names3)) if (length(dummy.names)) { # make sure order is the same as ov.names ov.dummy.names.nox[[g]] <- ov.names.nox[ov.names.nox %in% dummy.names] ov.dummy.names.x[[g]] <- ov.names.x[ov.names.x %in% dummy.names] # combine them, make sure order is identical to ov.names tmp <- ov.names[ov.names %in% dummy.names] # same for ov.names.x (if they are not in ov.names) (conditional.x) if (length(ov.names.x) > 0L) { tmp.x <- ov.names.x[ov.names.x %in% dummy.names] tmp <- unique(c(tmp, tmp.x)) } # extend lv.names lv.names <- c(lv.names, tmp) nfac <- length(lv.names) # add 'dummy' =~ entries dummy.mat <- rep("lambda", length(dummy.names)) } else { ov.dummy.names.nox[[g]] <- character(0) ov.dummy.names.x[[g]] <- character(0) } # 1a. "=~" regular indicators idx <- which(target$block == g & target$op == "=~" & !(target$rhs %in% lv.names)) tmp.mat[idx] <- "lambda" tmp.row[idx] <- match(target$rhs[idx], ov.names) tmp.col[idx] <- match(target$lhs[idx], lv.names) # 1b. "=~" regular higher-order lv indicators idx <- which(target$block == g & target$op == "=~" & !(target$rhs %in% ov.names)) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$rhs[idx], lv.names) tmp.col[idx] <- match(target$lhs[idx], lv.names) # 1c. "=~" indicators that are both in ov and lv idx <- which(target$block == g & target$op == "=~" & target$rhs %in% ov.names & target$rhs %in% lv.names) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$rhs[idx], lv.names) tmp.col[idx] <- match(target$lhs[idx], lv.names) # 2. "~" regressions if (gamma) { # gamma idx <- which(target$rhs %in% ov.names.x & target$block == g & (target$op == "~" | target$op == "<~")) tmp.mat[idx] <- "gamma" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], ov.names.x) # beta idx <- which(!target$rhs %in% ov.names.x & target$block == g & (target$op == "~" | target$op == "<~")) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], lv.names) } else { idx <- which(target$block == g & (target$op == "~" | target$op == "<~")) tmp.mat[idx] <- "beta" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], lv.names) } # 3a. "~~" ov idx <- which(target$block == g & target$op == "~~" & !(target$lhs %in% lv.names)) tmp.mat[idx] <- "theta" tmp.row[idx] <- match(target$lhs[idx], ov.names) tmp.col[idx] <- match(target$rhs[idx], ov.names) # 3aa. "~~" ov.x if (gamma) { idx <- which(target$block == g & target$op == "~~" & (target$lhs %in% ov.names.x)) tmp.mat[idx] <- "cov.x" tmp.row[idx] <- match(target$lhs[idx], ov.names.x) tmp.col[idx] <- match(target$rhs[idx], ov.names.x) } # 3b. "~~" lv idx <- which(target$block == g & target$op == "~~" & target$rhs %in% lv.names) tmp.mat[idx] <- "psi" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- match(target$rhs[idx], lv.names) # 4a. "~1" ov idx <- which(target$block == g & target$op == "~1" & !(target$lhs %in% lv.names)) tmp.mat[idx] <- "nu" tmp.row[idx] <- match(target$lhs[idx], ov.names) tmp.col[idx] <- 1L # 4aa, "~1" ov.x if (gamma) { idx <- which(target$block == g & target$op == "~1" & (target$lhs %in% ov.names.x)) tmp.mat[idx] <- "mean.x" tmp.row[idx] <- match(target$lhs[idx], ov.names.x) tmp.col[idx] <- 1L } # 4b. "~1" lv idx <- which(target$block == g & target$op == "~1" & target$lhs %in% lv.names) tmp.mat[idx] <- "alpha" tmp.row[idx] <- match(target$lhs[idx], lv.names) tmp.col[idx] <- 1L # 5. "|" th LABEL <- paste(target$lhs, target$op, target$rhs, sep = "") idx <- which(target$block == g & target$op == "|" & LABEL %in% ov.th) TH <- paste(target$lhs[idx], "|", target$rhs[idx], sep = "") tmp.mat[idx] <- "tau" tmp.row[idx] <- match(TH, ov.th) tmp.col[idx] <- 1L # 6. "~*~" scales idx <- which(target$block == g & target$op == "~*~") tmp.mat[idx] <- "delta" tmp.row[idx] <- match(target$lhs[idx], ov.names) tmp.col[idx] <- 1L # new 0.5-12: catch lower-elements in theta/psi idx.lower <- which(tmp.mat %in% c("theta", "psi") & tmp.row > tmp.col) if (length(idx.lower) > 0L) { tmp <- tmp.row[idx.lower] tmp.row[idx.lower] <- tmp.col[idx.lower] tmp.col[idx.lower] <- tmp } # new 0.5-16: group weights idx <- which(target$block == g & target$lhs == "group" & target$op == "%") tmp.mat[idx] <- "gw" tmp.row[idx] <- 1L tmp.col[idx] <- 1L if (extra) { # mRows mmRows <- list( tau = nth, delta = nvar, nu = nvar, lambda = nvar, theta = nvar, alpha = nfac, beta = nfac, gamma = nfac, cov.x = nexo, mean.x = nexo, gw = 1L, psi = nfac ) # mCols mmCols <- list( tau = 1L, delta = 1L, nu = 1L, lambda = nfac, theta = nvar, alpha = 1L, beta = nfac, gamma = nexo, cov.x = nexo, mean.x = 1L, gw = 1L, psi = nfac ) # dimNames for LISREL model matrices mmDimNames <- list( tau = list(ov.th, "threshold"), delta = list(ov.names, "scales"), nu = list(ov.names, "intercept"), lambda = list(ov.names, lv.names), theta = list(ov.names, ov.names), alpha = list(lv.names, "intercept"), beta = list(lv.names, lv.names), gamma = list(lv.names, ov.names.x), cov.x = list(ov.names.x, ov.names.x), mean.x = list(ov.names.x, "intercepts"), gw = list("group", "weight"), psi = list(lv.names, lv.names) ) # isSymmetric mmSymmetric <- list( tau = FALSE, delta = FALSE, nu = FALSE, lambda = FALSE, theta = TRUE, alpha = FALSE, beta = FALSE, gamma = FALSE, cov.x = TRUE, mean.x = FALSE, gw = FALSE, psi = TRUE ) # which mm's do we need? (always include lambda, theta and psi) # new: 0.6 this block only!! IDX <- which(target$block == g) mmNames <- c("lambda", "theta", "psi") if ("beta" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "beta") } if (meanstructure) { mmNames <- c(mmNames, "nu", "alpha") } if ("tau" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "tau") } if ("delta" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "delta") } if ("gamma" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "gamma") } if ("gw" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "gw") } if ("cov.x" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "cov.x") } if ("mean.x" %in% tmp.mat[IDX]) { mmNames <- c(mmNames, "mean.x") } REP.mmNames[[g]] <- mmNames REP.mmNumber[[g]] <- length(mmNames) REP.mmRows[[g]] <- unlist(mmRows[mmNames]) REP.mmCols[[g]] <- unlist(mmCols[mmNames]) REP.mmDimNames[[g]] <- mmDimNames[mmNames] REP.mmSymmetric[[g]] <- unlist(mmSymmetric[mmNames]) } # extra } # nblocks REP <- list( mat = tmp.mat, row = tmp.row, col = tmp.col ) # remove non-existing (NAs)? # here we remove `non-existing' parameters; this depends on the matrix # representation (eg in LISREL rep, there is no ~~ between lv and ov) # if(remove.nonexisting) { # idx <- which( nchar(REP$mat) > 0L & # !is.na(REP$row) & REP$row > 0L & # !is.na(REP$col) & REP$col > 0L ) # # but keep ==, :=, etc. # idx <- c(idx, which(lavpartable$op %in% c("==", ":=", "<", ">"))) # REP$mat <- REP$mat[idx] # REP$row <- REP$row[idx] # REP$col <- REP$col[idx] # # always add 'ov.dummy.*.names' attributes attr(REP, "ov.dummy.names.nox") <- ov.dummy.names.nox attr(REP, "ov.dummy.names.x") <- ov.dummy.names.x if (extra) { attr(REP, "mmNames") <- REP.mmNames attr(REP, "mmNumber") <- REP.mmNumber attr(REP, "mmRows") <- REP.mmRows attr(REP, "mmCols") <- REP.mmCols attr(REP, "mmDimNames") <- REP.mmDimNames attr(REP, "mmSymmetric") <- REP.mmSymmetric } REP } # ETA: # 1) EETA # 2) EETAx # 3) VETA # 4) VETAx # 1) EETA # compute E(ETA): expected value of latent variables (marginal over x) # - if no eXo (and GAMMA): # E(ETA) = (I-B)^-1 ALPHA # - if eXo and GAMMA: # E(ETA) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA mean.x computeEETA.LISREL <- function(MLIST = NULL, mean.x = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { BETA <- MLIST$beta GAMMA <- MLIST$gamma # ALPHA? (reconstruct, but no 'fix') ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # BETA? if (!is.null(BETA)) { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) # GAMMA? if (!is.null(GAMMA)) { eeta <- as.vector(IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x) } else { eeta <- as.vector(IB.inv %*% ALPHA) } } else { # GAMMA? if (!is.null(GAMMA)) { eeta <- as.vector(ALPHA + GAMMA %*% mean.x) } else { eeta <- as.vector(ALPHA) } } eeta } # 2) EETAx # compute E(ETA|x_i): conditional expected value of latent variable, # given specific value of x_i # - if no eXo (and GAMMA): # E(ETA) = (I-B)^-1 ALPHA # we return a matrix of size [nobs x nfac] replicating E(ETA) # - if eXo and GAMMA: # E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i # we return a matrix of size [nobs x nfac] # computeEETAx.LISREL <- function(MLIST = NULL, eXo = NULL, N = nrow(eXo), sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { LAMBDA <- MLIST$lambda BETA <- MLIST$beta GAMMA <- MLIST$gamma nfac <- ncol(LAMBDA) # if eXo, N must be nrow(eXo) if (!is.null(eXo)) { N <- nrow(eXo) } # ALPHA? ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # construct [nobs x nfac] matrix (repeating ALPHA) EETA <- matrix(ALPHA, N, nfac, byrow = TRUE) # put back eXo values if dummy if (length(ov.x.dummy.lv.idx) > 0L) { EETA[, ov.x.dummy.lv.idx] <- eXo } # BETA? if (!is.null(BETA)) { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) EETA <- EETA %*% t(IB.inv) } # GAMMA? if (!is.null(GAMMA)) { if (!is.null(BETA)) { EETA <- EETA + eXo %*% t(IB.inv %*% GAMMA) } else { EETA <- EETA + eXo %*% t(GAMMA) } } EETA } # 3) VETA # compute V(ETA): variances/covariances of latent variables # - if no eXo (and GAMMA) # V(ETA) = (I-B)^-1 PSI (I-B)^-T # - if eXo and GAMMA: (cfr lisrel submodel 3a with ksi=x) # V(ETA) = (I-B)^-1 [ GAMMA cov.x t(GAMMA) + PSI] (I-B)^-T computeVETA.LISREL <- function(MLIST = NULL) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) PSI <- MLIST$psi THETA <- MLIST$theta BETA <- MLIST$beta GAMMA <- MLIST$gamma if (!is.null(GAMMA)) { COV.X <- MLIST$cov.x # we treat 'x' as 'ksi' in the LISREL model; cov.x is PHI PSI <- tcrossprod(GAMMA %*% COV.X, GAMMA) + PSI } # beta? if (is.null(BETA)) { VETA <- PSI } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) VETA <- tcrossprod(IB.inv %*% PSI, IB.inv) } VETA } # 4) VETAx # compute V(ETA|x_i): variances/covariances of latent variables # V(ETA) = (I-B)^-1 PSI (I-B)^-T + remove dummies computeVETAx.LISREL <- function(MLIST = NULL, lv.dummy.idx = NULL) { PSI <- MLIST$psi BETA <- MLIST$beta # beta? if (is.null(BETA)) { VETA <- PSI } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) VETA <- tcrossprod(IB.inv %*% PSI, IB.inv) } # remove dummy lv? if (!is.null(lv.dummy.idx)) { VETA <- VETA[-lv.dummy.idx, -lv.dummy.idx, drop = FALSE] } VETA } # Y # 1) EY # 2) EYx # 3) EYetax # 4) VY # 5) VYx # 6) VYetax # 1) EY # compute E(Y): expected value of observed # E(Y) = NU + LAMBDA %*% E(eta) # = NU + LAMBDA %*% (IB.inv %*% ALPHA) # no exo, no GAMMA # = NU + LAMBDA %*% (IB.inv %*% ALPHA + IB.inv %*% GAMMA %*% mean.x) # eXo # if DELTA -> E(Y) = delta * E(Y) # # this is similar to computeMuHat but: # - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE # - never used if GAMMA, since we then have categorical variables, and the # 'part 1' structure contains the (thresholds +) intercepts, not # the means computeEY.LISREL <- function(MLIST = NULL, mean.x = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda # get NU, but do not 'fix' NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # compute E(ETA) EETA <- computeEETA.LISREL( MLIST = MLIST, sample.mean = sample.mean, mean.x = mean.x, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # EY EY <- as.vector(NU) + as.vector(LAMBDA %*% EETA) # if delta, scale if (delta && !is.null(MLIST$delta)) { EY <- EY * as.vector(MLIST$delta) } EY } # 2) EYx # compute E(Y|x_i): expected value of observed, conditional on x_i # E(Y|x_i) = NU + LAMBDA %*% E(eta|x_i) # - if no eXo (and GAMMA): # E(ETA|x_i) = (I-B)^-1 ALPHA # we return a matrix of size [nobs x nfac] replicating E(ETA) # - if eXo and GAMMA: # E(ETA|x_i) = (I-B)^-1 ALPHA + (I-B)^-1 GAMMA x_i # we return a matrix of size [nobs x nfac] # # - we ALWAYS compute NU+ALPHA, even if meanstructure=FALSE # - never used if GAMMA, since we then have categorical variables, and the # 'part 1' structure contains the (thresholds +) intercepts, not # the means computeEYx.LISREL <- function(MLIST = NULL, eXo = NULL, N = nrow(eXo), sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda # get NU, but do not 'fix' NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # compute E(ETA|x_i) EETAx <- computeEETAx.LISREL( MLIST = MLIST, eXo = eXo, N = N, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # EYx EYx <- sweep(tcrossprod(EETAx, LAMBDA), 2L, STATS = NU, FUN = "+") # if delta, scale if (delta && !is.null(MLIST$delta)) { EYx <- sweep(EYx, 2L, STATS = MLIST$delta, FUN = "*") } EYx } # 3) EYetax # compute E(Y|eta_i,x_i): conditional expected value of observed variable # given specific value of eta_i AND x_i # # E(y*_i|eta_i, x_i) = NU + LAMBDA eta_i + KAPPA x_i # # where eta_i = predict(fit) = factor scores OR specific values for eta_i # (as in GH integration) # # if nexo = 0, and eta_i is single row, YHAT is the same for each observation # in this case, we return a single row, unless Nobs > 1L, in which case # we return Nobs identical rows # # NOTE: we assume that any effect of x_i on eta_i has already been taken # care off # categorical version computeEYetax.LISREL <- function(MLIST = NULL, eXo = NULL, ETA = NULL, N = nrow(eXo), sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda BETA <- MLIST$beta if (!is.null(eXo)) { N <- nrow(eXo) } else if (!is.null(N)) { # nothing to do } else { N <- 1L } # create ETA matrix if (nrow(ETA) == 1L) { ETA <- matrix(ETA, N, ncol(ETA), byrow = TRUE) } # always augment ETA with 'dummy values' (0 for ov.y, eXo for ov.x) # ndummy <- length(c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx)) # if(ndummy > 0L) { # ETA2 <- cbind(ETA, matrix(0, N, ndummy)) # } else { ETA2 <- ETA # } # only if we have dummy ov.y, we need to compute the 'yhat' values # beforehand if (length(ov.y.dummy.lv.idx) > 0L) { # insert eXo values if (length(ov.x.dummy.lv.idx) > 0L) { ETA2[, ov.x.dummy.lv.idx] <- eXo } # zero ov.y values if (length(ov.y.dummy.lv.idx) > 0L) { ETA2[, ov.y.dummy.lv.idx] <- 0 } # ALPHA? (reconstruct, but no 'fix') ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # BETA? if (!is.null(BETA)) { ETA2 <- sweep(tcrossprod(ETA2, BETA), 2L, STATS = ALPHA, FUN = "+") } else { ETA2 <- sweep(ETA2, 2L, STATS = ALPHA, FUN = "+") } # put back eXo values if (length(ov.x.dummy.lv.idx) > 0L) { ETA2[, ov.x.dummy.lv.idx] <- eXo } # put back ETA values for the 'real' latent variables dummy.idx <- c(ov.x.dummy.lv.idx, ov.y.dummy.lv.idx) if (length(dummy.idx) > 0L) { lv.regular.idx <- seq_len(min(dummy.idx) - 1L) ETA2[, lv.regular.idx] <- ETA[, lv.regular.idx, drop = FALSE] } } # get NU, but do not 'fix' NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # EYetax EYetax <- sweep(tcrossprod(ETA2, LAMBDA), 2L, STATS = NU, FUN = "+") # if delta, scale if (delta && !is.null(MLIST$delta)) { EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*") } EYetax } # unconditional version computeEYetax2.LISREL <- function(MLIST = NULL, ETA = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda BETA <- MLIST$beta # only if we have dummy ov.y, we need to compute the 'yhat' values # beforehand, and impute them in ETA[,ov.y] if (length(ov.y.dummy.lv.idx) > 0L) { # ALPHA? (reconstruct, but no 'fix') ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # keep all, but ov.y values OV.NOY <- ETA[, -ov.y.dummy.lv.idx, drop = FALSE] # ov.y rows, non-ov.y cols BETAY <- BETA[ov.y.dummy.lv.idx, -ov.y.dummy.lv.idx, drop = FALSE] # ov.y intercepts ALPHAY <- ALPHA[ov.y.dummy.lv.idx, , drop = FALSE] # impute ov.y values in ETA ETA[, ov.y.dummy.lv.idx] <- sweep(tcrossprod(OV.NOY, BETAY), 2L, STATS = ALPHAY, FUN = "+") } # get NU, but do not 'fix' NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # EYetax EYetax <- sweep(tcrossprod(ETA, LAMBDA), 2L, STATS = NU, FUN = "+") # if delta, scale if (delta && !is.null(MLIST$delta)) { EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*") } EYetax } # unconditional version computeEYetax3.LISREL <- function(MLIST = NULL, ETA = NULL, sample.mean = NULL, mean.x = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda # special case: empty lambda if (ncol(LAMBDA) == 0L) { return(matrix(sample.mean, nrow(ETA), length(sample.mean), byrow = TRUE )) } # lv idx dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) if (length(dummy.idx) > 0L) { nondummy.idx <- seq_len(min(dummy.idx) - 1L) } else { nondummy.idx <- seq_len(ncol(MLIST$lambda)) } # beta? if (is.null(MLIST$beta) || length(ov.y.dummy.lv.idx) == 0L || length(nondummy.idx) == 0L) { LAMBDA..IB.inv <- LAMBDA } else { # only keep those columns of BETA that correspond to the # the `regular' latent variables # (ie. ignore the structural part altogether) MLIST2 <- MLIST MLIST2$beta[, dummy.idx] <- 0 IB.inv <- .internal_get_IB.inv(MLIST = MLIST2) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # compute model-implied means EY <- computeEY.LISREL( MLIST = MLIST, mean.x = mean.x, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) EETA <- computeEETA.LISREL( MLIST = MLIST, sample.mean = sample.mean, mean.x = mean.x, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # center regular lv only ETA[, nondummy.idx] <- sweep(ETA[, nondummy.idx, drop = FALSE], 2L, STATS = EETA[nondummy.idx], FUN = "-" ) # project from lv to ov, if we have any lv if (length(nondummy.idx) > 0) { EYetax <- sweep( tcrossprod( ETA[, nondummy.idx, drop = FALSE], LAMBDA..IB.inv[, nondummy.idx, drop = FALSE] ), 2L, STATS = EY, FUN = "+" ) } else { EYetax <- ETA } # put back eXo variables if (length(ov.x.dummy.lv.idx) > 0L) { EYetax[, ov.x.dummy.ov.idx] <- ETA[, ov.x.dummy.lv.idx, drop = FALSE] } # if delta, scale if (delta && !is.null(MLIST$delta)) { EYetax <- sweep(EYetax, 2L, STATS = MLIST$delta, FUN = "*") } EYetax } # 4) VY # compute the *un*conditional variance/covariance of y: V(Y) or V(Y*) # 'unconditional' model-implied (co)variances # - same as Sigma.hat if all Y are continuous # - diagonal is 1.0 (or delta^2) if categorical # - if also Gamma, cov.x is used (only if conditional.x) # only in THIS case, VY is different from diag(VYx) # # V(Y) = LAMBDA V(ETA) t(LAMBDA) + THETA computeVY.LISREL <- function(MLIST = NULL) { LAMBDA <- MLIST$lambda THETA <- MLIST$theta VETA <- computeVETA.LISREL(MLIST = MLIST) VY <- tcrossprod(LAMBDA %*% VETA, LAMBDA) + THETA VY } # 5) VYx # compute V(Y*|x_i) == model-implied covariance matrix # this equals V(Y*) if no (explicit) eXo no GAMMA computeVYx.LISREL <- computeSigmaHat.LISREL <- function(MLIST = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) PSI <- MLIST$psi THETA <- MLIST$theta BETA <- MLIST$beta # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # compute V(Y*|x_i) VYx <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA # if delta, scale if (delta && !is.null(MLIST$delta)) { DELTA <- diag(MLIST$delta[, 1L], nrow = nvar, ncol = nvar) VYx <- DELTA %*% VYx %*% DELTA } VYx } # 6) VYetax # V(Y | eta_i, x_i) = THETA computeVYetax.LISREL <- function(MLIST = NULL, delta = TRUE) { VYetax <- MLIST$theta nvar <- nrow(MLIST$theta) # if delta, scale if (delta && !is.null(MLIST$delta)) { DELTA <- diag(MLIST$delta[, 1L], nrow = nvar, ncol = nvar) VYetax <- DELTA %*% VYetax %*% DELTA } VYetax } ### compute model-implied sample statistics # # 1) MuHat (similar to EY, but continuous only) # 2) TH # 3) PI # 4) SigmaHat == VYx # compute MuHat for a single block/group; only for the continuous case (no eXo) # # this is a special case of E(Y) where # - we have no (explicit) eXogenous variables # - only continuous computeMuHat.LISREL <- function(MLIST = NULL) { NU <- MLIST$nu ALPHA <- MLIST$alpha LAMBDA <- MLIST$lambda BETA <- MLIST$beta # shortcut if (is.null(ALPHA) || is.null(NU)) { return(matrix(0, nrow(LAMBDA), 1L)) } # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # compute Mu Hat Mu.hat <- NU + LAMBDA..IB.inv %*% ALPHA Mu.hat } # compute TH for a single block/group computeTH.LISREL <- function(MLIST = NULL, th.idx = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) BETA <- MLIST$beta TAU <- MLIST$tau nth <- nrow(TAU) # missing alpha if (is.null(MLIST$alpha)) { ALPHA <- matrix(0, nfac, 1L) } else { ALPHA <- MLIST$alpha } # missing nu if (is.null(MLIST$nu)) { NU <- matrix(0, nvar, 1L) } else { NU <- MLIST$nu } if (is.null(th.idx)) { th.idx <- seq_len(nth) nlev <- rep(1L, nvar) K_nu <- diag(nvar) } else { nlev <- tabulate(th.idx, nbins = nvar) nlev[nlev == 0L] <- 1L K_nu <- matrix(0, sum(nlev), nvar) K_nu[cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times = nlev))] <- 1.0 } # shortcut if (is.null(TAU)) { return(matrix(0, length(th.idx), 1L)) } # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # compute pi0 pi0 <- NU + LAMBDA..IB.inv %*% ALPHA # interleave th's with zeros where we have numeric variables th <- numeric(length(th.idx)) th[th.idx > 0L] <- TAU[, 1L] # compute TH TH <- th - (K_nu %*% pi0) # if delta, scale if (delta && !is.null(MLIST$delta)) { DELTA.diag <- MLIST$delta[, 1L] DELTA.star.diag <- rep(DELTA.diag, times = nlev) TH <- TH * DELTA.star.diag } as.vector(TH) } # compute PI for a single block/group computePI.LISREL <- function(MLIST = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda BETA <- MLIST$beta GAMMA <- MLIST$gamma # shortcut if (is.null(GAMMA)) { return(matrix(0, nrow(LAMBDA), 0L)) } # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # compute PI PI <- LAMBDA..IB.inv %*% GAMMA # if delta, scale if (delta && !is.null(MLIST$delta)) { DELTA.diag <- MLIST$delta[, 1L] PI <- PI * DELTA.diag } PI } computeLAMBDA.LISREL <- function(MLIST = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, remove.dummy.lv = FALSE) { lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) # fix LAMBDA LAMBDA <- MLIST$lambda if (length(ov.y.dummy.ov.idx) > 0L) { LAMBDA[ov.y.dummy.ov.idx, ] <- MLIST$beta[ov.y.dummy.lv.idx, ] } # remove dummy lv? if (remove.dummy.lv && length(lv.dummy.idx) > 0L) { LAMBDA <- LAMBDA[, -lv.dummy.idx, drop = FALSE] } LAMBDA } computeTHETA.LISREL <- function(MLIST = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) # fix THETA THETA <- MLIST$theta if (length(ov.dummy.idx) > 0L) { THETA[ov.dummy.idx, ov.dummy.idx] <- MLIST$psi[lv.dummy.idx, lv.dummy.idx] } THETA } computeNU.LISREL <- function(MLIST = NULL, sample.mean = sample.mean, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { # get NU, but do not 'fix' NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # ALPHA? (reconstruct, but no 'fix') ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) # fix NU if (length(ov.dummy.idx) > 0L) { NU[ov.dummy.idx, 1] <- ALPHA[lv.dummy.idx, 1] } NU } # compute IB.inv .internal_get_IB.inv <- function(MLIST = NULL) { BETA <- MLIST$beta nr <- nrow(MLIST$psi) if (!is.null(BETA)) { tmp <- -BETA tmp[lav_matrix_diag_idx(nr)] <- 1 IB.inv <- solve(tmp) } else { IB.inv <- diag(nr) } IB.inv } # only if ALPHA=NULL but we need it anyway # we 'reconstruct' ALPHA here (including dummy entries), no fixing # # without any dummy variables, this is just the zero vector # but if we have dummy variables, we need to fill in their values # # .internal_get_ALPHA <- function(MLIST = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { if (!is.null(MLIST$alpha)) { return(MLIST$alpha) } LAMBDA <- MLIST$lambda nfac <- ncol(LAMBDA) ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) if (length(ov.dummy.idx) > 0L) { ALPHA <- matrix(0, nfac, 1L) # Note: instead of sample.mean, we need 'intercepts' # sample.mean = NU + LAMBDA..IB.inv %*% ALPHA # so, # solve(LAMBDA..IB.inv) %*% (sample.mean - NU) = ALPHA # where # - LAMBDA..IB.inv only contains 'dummy' variables, and is square # - NU elements are not needed (since not in ov.dummy.idx) IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv LAMBDA..IB.inv.dummy <- LAMBDA..IB.inv[ov.dummy.idx, lv.dummy.idx] ALPHA[lv.dummy.idx] <- solve(LAMBDA..IB.inv.dummy) %*% sample.mean[ov.dummy.idx] } else { ALPHA <- matrix(0, nfac, 1L) } ALPHA } # only if NU=NULL but we need it anyway # # since we have no meanstructure, we can assume NU is unrestricted # and contains either: # 1) the sample means (if not eXo) # 2) the intercepts, if we have exogenous covariates # since sample.mean = NU + LAMBDA %*% E(eta) # we have NU = sample.mean - LAMBDA %*% E(eta) .internal_get_NU <- function(MLIST = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL) { if (!is.null(MLIST$nu)) { return(MLIST$nu) } # if nexo > 0, substract lambda %*% EETA if (length(ov.x.dummy.ov.idx) > 0L) { EETA <- computeEETA.LISREL(MLIST, mean.x = NULL, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # 'regress' NU on X NU <- sample.mean - MLIST$lambda %*% EETA # just to make sure we have exact zeroes for all dummies NU[c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx)] <- 0 } else { # unrestricted mean NU <- sample.mean } NU } .internal_get_KAPPA <- function(MLIST = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, nexo = NULL) { nvar <- nrow(MLIST$lambda) if (!is.null(MLIST$gamma)) { this.nexo <- ncol(MLIST$gamma) } else if (!is.null(nexo)) { this.nexo <- nexo } else { lav_msg_stop(gettext("nexo not known")) } # create KAPPA KAPPA <- matrix(0, nvar, this.nexo) if (!is.null(MLIST$gamma)) { KAPPA[ov.y.dummy.ov.idx, ] <- MLIST$gamma[ov.y.dummy.lv.idx, , drop = FALSE] } else if (length(ov.x.dummy.ov.idx) > 0L) { KAPPA[ov.y.dummy.ov.idx, ] <- MLIST$beta[ov.y.dummy.lv.idx, ov.x.dummy.lv.idx, drop = FALSE ] } KAPPA } # old version of computeEYetax (using 'fixing') computeYHATetax.LISREL <- function(MLIST = NULL, eXo = NULL, ETA = NULL, sample.mean = NULL, ov.y.dummy.ov.idx = NULL, ov.x.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL, ov.x.dummy.lv.idx = NULL, Nobs = 1L) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) lv.dummy.idx <- c(ov.y.dummy.lv.idx, ov.x.dummy.lv.idx) ov.dummy.idx <- c(ov.y.dummy.ov.idx, ov.x.dummy.ov.idx) # exogenous variables? if (is.null(eXo)) { nexo <- 0L } else { nexo <- ncol(eXo) # check ETA rows if (!(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))) { lav_msg_stop(gettext("!(nrow(ETA) == 1L || nrow(ETA) == nrow(eXo))")) } } # get NU NU <- .internal_get_NU( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # ALPHA? (reconstruct, but no 'fix') ALPHA <- .internal_get_ALPHA( MLIST = MLIST, sample.mean = sample.mean, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx ) # fix NU if (length(lv.dummy.idx) > 0L) { NU[ov.dummy.idx, 1L] <- ALPHA[lv.dummy.idx, 1L] } # fix LAMBDA (remove dummies) ## FIXME -- needed? LAMBDA <- MLIST$lambda if (length(lv.dummy.idx) > 0L) { LAMBDA <- LAMBDA[, -lv.dummy.idx, drop = FALSE] nfac <- ncol(LAMBDA) LAMBDA[ov.y.dummy.ov.idx, ] <- MLIST$beta[ov.y.dummy.lv.idx, seq_len(nfac), drop = FALSE] } # compute YHAT YHAT <- sweep(ETA %*% t(LAMBDA), MARGIN = 2, NU, "+") # Kappa + eXo? # note: Kappa elements are either in Gamma or in Beta if (nexo > 0L) { # create KAPPA KAPPA <- .internal_get_KAPPA( MLIST = MLIST, ov.y.dummy.ov.idx = ov.y.dummy.ov.idx, ov.x.dummy.ov.idx = ov.x.dummy.ov.idx, ov.y.dummy.lv.idx = ov.y.dummy.lv.idx, ov.x.dummy.lv.idx = ov.x.dummy.lv.idx, nexo = nexo ) # expand YHAT if ETA only has 1 row if (nrow(YHAT) == 1L) { YHAT <- sweep(eXo %*% t(KAPPA), MARGIN = 2, STATS = YHAT, FUN = "+") } else { # add fixed part YHAT <- YHAT + (eXo %*% t(KAPPA)) } # put back eXo if (length(ov.x.dummy.ov.idx) > 0L) { YHAT[, ov.x.dummy.ov.idx] <- eXo } } else { # duplicate? if (is.numeric(Nobs) && Nobs > 1L && nrow(YHAT) == 1L) { YHAT <- matrix(YHAT, Nobs, nvar, byrow = TRUE) # YHAT <- YHAT[ rep(1L, Nobs), ] } } # delta? # FIXME: not used here? # if(!is.null(DELTA)) { # YHAT <- sweep(YHAT, MARGIN=2, DELTA, "*") # } YHAT } # deal with 'dummy' OV.X latent variables # create additional matrices (eg GAMMA), and resize # remove all ov.x related entries MLIST2MLISTX <- function(MLIST = NULL, ov.x.dummy.ov.idx = NULL, ov.x.dummy.lv.idx = NULL) { lv.idx <- ov.x.dummy.lv.idx ov.idx <- ov.x.dummy.ov.idx if (length(lv.idx) == 0L) { return(MLIST) } if (!is.null(MLIST$gamma)) { nexo <- ncol(MLIST$gamma) } else { nexo <- length(ov.x.dummy.ov.idx) } nvar <- nrow(MLIST$lambda) nfac <- ncol(MLIST$lambda) - length(lv.idx) # copy MLISTX <- MLIST # fix LAMBDA: # - remove all ov.x related columns/rows MLISTX$lambda <- MLIST$lambda[-ov.idx, -lv.idx, drop = FALSE] # fix THETA: # - remove ov.x related columns/rows MLISTX$theta <- MLIST$theta[-ov.idx, -ov.idx, drop = FALSE] # fix PSI: # - remove ov.x related columns/rows MLISTX$psi <- MLIST$psi[-lv.idx, -lv.idx, drop = FALSE] # create GAMMA if (length(ov.x.dummy.lv.idx) > 0L) { MLISTX$gamma <- MLIST$beta[-lv.idx, lv.idx, drop = FALSE] } # fix BETA (remove if empty) if (!is.null(MLIST$beta)) { MLISTX$beta <- MLIST$beta[-lv.idx, -lv.idx, drop = FALSE] if (ncol(MLISTX$beta) == 0L) MLISTX$beta <- NULL } # fix NU if (!is.null(MLIST$nu)) { MLISTX$nu <- MLIST$nu[-ov.idx, 1L, drop = FALSE] } # fix ALPHA if (!is.null(MLIST$alpha)) { MLISTX$alpha <- MLIST$alpha[-lv.idx, 1L, drop = FALSE] } MLISTX } # create MLIST from MLISTX MLISTX2MLIST <- function(MLISTX = NULL, ov.x.dummy.ov.idx = NULL, ov.x.dummy.lv.idx = NULL, mean.x = NULL, cov.x = NULL) { lv.idx <- ov.x.dummy.lv.idx ndum <- length(lv.idx) ov.idx <- ov.x.dummy.ov.idx if (length(lv.idx) == 0L) { return(MLISTX) } stopifnot(!is.null(cov.x), !is.null(mean.x)) nvar <- nrow(MLISTX$lambda) nfac <- ncol(MLISTX$lambda) # copy MLIST <- MLISTX # resize matrices MLIST$lambda <- rbind( cbind(MLISTX$lambda, matrix(0, nvar, ndum)), matrix(0, ndum, nfac + ndum) ) MLIST$psi <- rbind( cbind(MLISTX$psi, matrix(0, nfac, ndum)), matrix(0, ndum, nfac + ndum) ) MLIST$theta <- rbind( cbind(MLISTX$theta, matrix(0, nvar, ndum)), matrix(0, ndum, nvar + ndum) ) if (!is.null(MLISTX$beta)) { MLIST$beta <- rbind( cbind(MLISTX$beta, matrix(0, nfac, ndum)), matrix(0, ndum, nfac + ndum) ) } if (!is.null(MLISTX$alpha)) { MLIST$alpha <- rbind(MLISTX$alpha, matrix(0, ndum, 1)) } if (!is.null(MLISTX$nu)) { MLIST$nu <- rbind(MLISTX$nu, matrix(0, ndum, 1)) } # fix LAMBDA: # - add columns for all dummy latent variables MLIST$lambda[cbind(ov.idx, lv.idx)] <- 1 # fix PSI # - move cov.x elements to PSI MLIST$psi[lv.idx, lv.idx] <- cov.x # move (ov.x.dummy elements of) GAMMA to BETA MLIST$beta[seq_len(nfac), ov.x.dummy.lv.idx] <- MLISTX$gamma MLIST$gamma <- NULL # fix ALPHA if (!is.null(MLIST$alpha)) { MLIST$alpha[lv.idx] <- mean.x } MLIST } # if DELTA parameterization, compute residual elements (in theta, or psi) # of observed categorical variables, as a function of other model parameters setResidualElements.LISREL <- function(MLIST = NULL, num.idx = NULL, ov.y.dummy.ov.idx = NULL, ov.y.dummy.lv.idx = NULL) { # remove num.idx from ov.y.dummy.* if (length(num.idx) > 0L && length(ov.y.dummy.ov.idx) > 0L) { n.idx <- which(ov.y.dummy.ov.idx %in% num.idx) if (length(n.idx) > 0L) { ov.y.dummy.ov.idx <- ov.y.dummy.ov.idx[-n.idx] ov.y.dummy.lv.idx <- ov.y.dummy.lv.idx[-n.idx] } } # force non-numeric theta elements to be zero if (length(num.idx) > 0L) { diag(MLIST$theta)[-num.idx] <- 0.0 } else { diag(MLIST$theta) <- 0.0 } if (length(ov.y.dummy.ov.idx) > 0L) { MLIST$psi[cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx)] <- 0.0 } # special case: PSI=0, and lambda=I (eg ex3.12) if (ncol(MLIST$psi) > 0L && sum(diag(MLIST$psi)) == 0.0 && all(diag(MLIST$lambda) == 1)) { ### FIXME: more elegant/general solution?? diag(MLIST$psi) <- 1 Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) diag.Sigma <- diag(Sigma.hat) - 1.0 } else if (ncol(MLIST$psi) == 0L) { diag.Sigma <- rep(0, ncol(MLIST$theta)) } else { Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) diag.Sigma <- diag(Sigma.hat) } if (is.null(MLIST$delta)) { delta <- rep(1, length(diag.Sigma)) } else { delta <- MLIST$delta } # theta = DELTA^(-2) - diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) ) RESIDUAL <- as.vector(1 / (delta * delta) - diag.Sigma) if (length(num.idx) > 0L) { diag(MLIST$theta)[-num.idx] <- RESIDUAL[-num.idx] } else { diag(MLIST$theta) <- RESIDUAL } # move ov.y.dummy 'RESIDUAL' elements from THETA to PSI if (length(ov.y.dummy.ov.idx) > 0L) { MLIST$psi[cbind(ov.y.dummy.lv.idx, ov.y.dummy.lv.idx)] <- MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)] MLIST$theta[cbind(ov.y.dummy.ov.idx, ov.y.dummy.ov.idx)] <- 0.0 } MLIST } # if THETA parameterization, compute delta elements # of observed categorical variables, as a function of other model parameters setDeltaElements.LISREL <- function(MLIST = NULL, num.idx = NULL) { Sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) diag.Sigma <- diag(Sigma.hat) # (1/delta^2) = diag( LAMBDA (I-B)^-1 PSI (I-B)^-T t(LAMBDA) ) + THETA # tmp <- diag.Sigma + THETA tmp <- diag.Sigma tmp[tmp < 0] <- as.numeric(NA) MLIST$delta[, 1L] <- sqrt(1 / tmp) # numeric delta's stay 1.0 if (length(num.idx) > 0L) { MLIST$delta[num.idx] <- 1.0 } MLIST } # compute Sigma/ETA: variances/covariances of BOTH observed and latent variables computeCOV.LISREL <- function(MLIST = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) PSI <- MLIST$psi nlat <- nrow(PSI) THETA <- MLIST$theta BETA <- MLIST$beta # 'extend' matrices LAMBDA2 <- rbind(LAMBDA, diag(nlat)) THETA2 <- lav_matrix_bdiag(THETA, matrix(0, nlat, nlat)) # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA2 } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA2 %*% IB.inv } # compute augment COV matrix COV <- tcrossprod(LAMBDA..IB.inv %*% PSI, LAMBDA..IB.inv) + THETA2 # if delta, scale if (delta && !is.null(MLIST$delta)) { DELTA <- diag(MLIST$delta[, 1L], nrow = nvar, ncol = nvar) COV[seq_len(nvar), seq_len(nvar)] <- DELTA %*% COV[seq_len(nvar), seq_len(nvar)] %*% DELTA } # if GAMMA, also x part GAMMA <- MLIST$gamma if (!is.null(GAMMA)) { COV.X <- MLIST$cov.x if (is.null(BETA)) { SX <- tcrossprod(GAMMA %*% COV.X, GAMMA) } else { IB.inv..GAMMA <- IB.inv %*% GAMMA SX <- tcrossprod(IB.inv..GAMMA %*% COV.X, IB.inv..GAMMA) } COV[(nvar + 1):(nvar + nlat), (nvar + 1):(nvar + nlat)] <- COV[(nvar + 1):(nvar + nlat), (nvar + 1):(nvar + nlat)] + SX } COV } # derivative of the objective function derivative.F.LISREL <- function(MLIST = NULL, Omega = NULL, Omega.mu = NULL) { LAMBDA <- MLIST$lambda PSI <- MLIST$psi BETA <- MLIST$beta ALPHA <- MLIST$alpha # beta? if (is.null(BETA)) { LAMBDA..IB.inv <- LAMBDA } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # meanstructure? meanstructure <- FALSE if (!is.null(Omega.mu)) meanstructure <- TRUE # group weight? group.w.free <- FALSE if (!is.null(MLIST$gw)) group.w.free <- TRUE # pre-compute some values tLAMBDA..IB.inv <- t(LAMBDA..IB.inv) if (!is.null(BETA)) { Omega..LAMBDA..IB.inv..PSI..tIB.inv <- (Omega %*% LAMBDA..IB.inv %*% PSI %*% t(IB.inv)) } else { Omega..LAMBDA <- Omega %*% LAMBDA } # 1. LAMBDA if (!is.null(BETA)) { if (meanstructure) { LAMBDA.deriv <- -1.0 * (Omega.mu %*% t(ALPHA) %*% t(IB.inv) + Omega..LAMBDA..IB.inv..PSI..tIB.inv) } else { LAMBDA.deriv <- -1.0 * Omega..LAMBDA..IB.inv..PSI..tIB.inv } } else { # no BETA if (meanstructure) { LAMBDA.deriv <- -1.0 * (Omega.mu %*% t(ALPHA) + Omega..LAMBDA %*% PSI) } else { LAMBDA.deriv <- -1.0 * (Omega..LAMBDA %*% PSI) } } # 2. BETA if (!is.null(BETA)) { if (meanstructure) { BETA.deriv <- -1.0 * ((t(IB.inv) %*% (t(LAMBDA) %*% Omega.mu %*% t(ALPHA)) %*% t(IB.inv)) + (tLAMBDA..IB.inv %*% Omega..LAMBDA..IB.inv..PSI..tIB.inv)) } else { BETA.deriv <- -1.0 * (tLAMBDA..IB.inv %*% Omega..LAMBDA..IB.inv..PSI..tIB.inv) } } else { BETA.deriv <- NULL } # 3. PSI PSI.deriv <- -1.0 * (tLAMBDA..IB.inv %*% Omega %*% LAMBDA..IB.inv) diag(PSI.deriv) <- 0.5 * diag(PSI.deriv) # 4. THETA THETA.deriv <- -1.0 * Omega diag(THETA.deriv) <- 0.5 * diag(THETA.deriv) if (meanstructure) { # 5. NU NU.deriv <- -1.0 * Omega.mu # 6. ALPHA ALPHA.deriv <- -1.0 * t(t(Omega.mu) %*% LAMBDA..IB.inv) } else { NU.deriv <- NULL ALPHA.deriv <- NULL } if (group.w.free) { GROUP.W.deriv <- 0.0 } else { GROUP.W.deriv <- NULL } list( lambda = LAMBDA.deriv, beta = BETA.deriv, theta = THETA.deriv, psi = PSI.deriv, nu = NU.deriv, alpha = ALPHA.deriv, gw = GROUP.W.deriv ) } # dSigma/dx -- per model matrix # note: # we avoid using the duplication and elimination matrices # for now (perhaps until we'll use the Matrix package) derivative.sigma.LISREL_OLD <- function(m = "lambda", # all model matrix elements, or only a few? # NOTE: for symmetric matrices, # we assume that the have full size # (nvar*nvar) (but already correct for # symmetry) idx = seq_len(length(MLIST[[m]])), MLIST = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) PSI <- MLIST$psi # only lower.tri part of sigma (not same order as elimination matrix?) v.idx <- lav_matrix_vech_idx(nvar) pstar <- nvar * (nvar + 1) / 2 # shortcut for gamma, nu, alpha and tau: empty matrix if (m == "nu" || m == "alpha" || m == "tau" || m == "gamma" || m == "gw" || m == "cov.x" || m == "mean.x") { return(matrix(0.0, nrow = pstar, ncol = length(idx))) } # Delta? delta.flag <- FALSE if (delta && !is.null(MLIST$delta)) { DELTA <- MLIST$delta delta.flag <- TRUE } else if (m == "delta") { # modindices? return(matrix(0.0, nrow = pstar, ncol = length(idx))) } # beta? if (!is.null(MLIST$ibeta.inv)) { IB.inv <- MLIST$ibeta.inv } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) } # pre if (m == "lambda" || m == "beta") { IK <- diag(nvar * nvar) + lav_matrix_commutation(nvar, nvar) } if (m == "lambda" || m == "beta") { IB.inv..PSI..tIB.inv..tLAMBDA <- IB.inv %*% PSI %*% t(IB.inv) %*% t(LAMBDA) } if (m == "beta" || m == "psi") { LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # here we go: if (m == "lambda") { DX <- IK %*% t(IB.inv..PSI..tIB.inv..tLAMBDA %x% diag(nvar)) if (delta.flag) { DX <- DX * as.vector(DELTA %x% DELTA) } } else if (m == "beta") { DX <- IK %*% (t(IB.inv..PSI..tIB.inv..tLAMBDA) %x% LAMBDA..IB.inv) # this is not really needed (because we select idx=m.el.idx) # but just in case we need all elements of beta... DX[, lav_matrix_diag_idx(nfac)] <- 0.0 if (delta.flag) { DX <- DX * as.vector(DELTA %x% DELTA) } } else if (m == "psi") { DX <- (LAMBDA..IB.inv %x% LAMBDA..IB.inv) # symmetry correction, but keeping all duplicated elements # since we depend on idx=m.el.idx # otherwise, we could simply postmultiply with the duplicationMatrix # we sum up lower.tri + upper.tri (but not the diagonal elements!) # imatrix <- matrix(1:nfac^2,nfac,nfac) # lower.idx <- imatrix[lower.tri(imatrix, diag=FALSE)] # upper.idx <- imatrix[upper.tri(imatrix, diag=FALSE)] lower.idx <- lav_matrix_vech_idx(nfac, diagonal = FALSE) upper.idx <- lav_matrix_vechru_idx(nfac, diagonal = FALSE) # NOTE YR: upper.idx (see 3 lines up) is wrong in MH patch! # fixed again 13/06/2012 after bug report of Mijke Rhemtulla. offdiagSum <- DX[, lower.idx] + DX[, upper.idx] DX[, c(lower.idx, upper.idx)] <- cbind(offdiagSum, offdiagSum) if (delta.flag) { DX <- DX * as.vector(DELTA %x% DELTA) } } else if (m == "theta") { DX <- diag(nvar * nvar) # very sparse... # symmetry correction not needed, since all off-diagonal elements # are zero? if (delta.flag) { DX <- DX * as.vector(DELTA %x% DELTA) } } else if (m == "delta") { Omega <- computeSigmaHat.LISREL(MLIST, delta = FALSE) DD <- diag(DELTA[, 1], nvar, nvar) DD.Omega <- (DD %*% Omega) A <- DD.Omega %x% diag(nvar) B <- diag(nvar) %x% DD.Omega DX <- A[, lav_matrix_diag_idx(nvar), drop = FALSE] + B[, lav_matrix_diag_idx(nvar), drop = FALSE] } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } DX <- DX[v.idx, idx, drop = FALSE] DX } # dSigma/dx -- per model matrix derivative.sigma.LISREL <- function(m = "lambda", # all model matrix elements, or only a few? # NOTE: for symmetric matrices, # we assume that the have full size # (nvar*nvar) (but already correct for # symmetry) idx = seq_len(length(MLIST[[m]])), MLIST = NULL, vech = TRUE, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) PSI <- MLIST$psi # only lower.tri part of sigma (not same order as elimination matrix?) v.idx <- lav_matrix_vech_idx(nvar) pstar <- nvar * (nvar + 1) / 2 # shortcut for gamma, nu, alpha, tau,.... : empty matrix if (m == "nu" || m == "alpha" || m == "tau" || m == "gamma" || m == "gw" || m == "cov.x" || m == "mean.x") { return(matrix(0.0, nrow = pstar, ncol = length(idx))) } # Delta? delta.flag <- FALSE if (delta && !is.null(MLIST$delta)) { DELTA <- MLIST$delta delta.flag <- TRUE } else if (m == "delta") { # modindices? return(matrix(0.0, nrow = pstar, ncol = length(idx))) } # beta? if (!is.null(MLIST$ibeta.inv)) { IB.inv <- MLIST$ibeta.inv } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) } # pre # if(m == "lambda" || m == "beta") # IK <- diag(nvar*nvar) + lav_matrix_commutation(nvar, nvar) if (m == "lambda" || m == "beta") { L1 <- LAMBDA %*% IB.inv %*% PSI %*% t(IB.inv) } if (m == "beta" || m == "psi") { LAMBDA..IB.inv <- LAMBDA %*% IB.inv } # here we go: if (m == "lambda") { KOL.idx <- matrix(1:(nvar * nfac), nvar, nfac, byrow = TRUE)[idx] DX <- (L1 %x% diag(nvar))[, idx, drop = FALSE] + (diag(nvar) %x% L1)[, KOL.idx, drop = FALSE] } else if (m == "beta") { KOL.idx <- matrix(1:(nfac * nfac), nfac, nfac, byrow = TRUE)[idx] DX <- (L1 %x% LAMBDA..IB.inv)[, idx, drop = FALSE] + (LAMBDA..IB.inv %x% L1)[, KOL.idx, drop = FALSE] # this is not really needed (because we select idx=m.el.idx) # but just in case we need all elements of beta... DX[, which(idx %in% lav_matrix_diag_idx(nfac))] <- 0.0 } else if (m == "psi") { DX <- (LAMBDA..IB.inv %x% LAMBDA..IB.inv) # symmetry correction, but keeping all duplicated elements # since we depend on idx=m.el.idx lower.idx <- lav_matrix_vech_idx(nfac, diagonal = FALSE) upper.idx <- lav_matrix_vechru_idx(nfac, diagonal = FALSE) offdiagSum <- DX[, lower.idx] + DX[, upper.idx] DX[, c(lower.idx, upper.idx)] <- cbind(offdiagSum, offdiagSum) DX <- DX[, idx, drop = FALSE] } else if (m == "theta") { # DX <- diag(nvar*nvar) # very sparse... DX <- matrix(0, nvar * nvar, length(idx)) DX[cbind(idx, seq_along(idx))] <- 1 # symmetry correction not needed, since all off-diagonal elements # are zero? } else if (m == "delta") { Omega <- computeSigmaHat.LISREL(MLIST, delta = FALSE) DD <- diag(DELTA[, 1], nvar, nvar) DD.Omega <- (DD %*% Omega) A <- DD.Omega %x% diag(nvar) B <- diag(nvar) %x% DD.Omega DX <- A[, lav_matrix_diag_idx(nvar), drop = FALSE] + B[, lav_matrix_diag_idx(nvar), drop = FALSE] DX <- DX[, idx, drop = FALSE] } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } if (delta.flag && !m == "delta") { DX <- DX * as.vector(DELTA %x% DELTA) } # vech? if (vech) { DX <- DX[v.idx, , drop = FALSE] } DX } # dMu/dx -- per model matrix derivative.mu.LISREL <- function(m = "alpha", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) # shortcut for empty matrices if (m == "gamma" || m == "psi" || m == "theta" || m == "tau" || m == "delta" || m == "gw" || m == "cov.x" || m == "mean.x") { return(matrix(0.0, nrow = nvar, ncol = length(idx))) } # missing alpha if (is.null(MLIST$alpha)) { ALPHA <- matrix(0, nfac, 1L) } else { ALPHA <- MLIST$alpha } # beta? if (!is.null(MLIST$ibeta.inv)) { IB.inv <- MLIST$ibeta.inv } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) } if (m == "nu") { DX <- diag(nvar) } else if (m == "lambda") { DX <- t(IB.inv %*% ALPHA) %x% diag(nvar) } else if (m == "beta") { DX <- t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv) # this is not really needed (because we select idx=m.el.idx) DX[, lav_matrix_diag_idx(nfac)] <- 0.0 } else if (m == "alpha") { DX <- LAMBDA %*% IB.inv } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } DX <- DX[, idx, drop = FALSE] DX } # dTh/dx -- per model matrix derivative.th.LISREL <- function(m = "tau", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), th.idx = NULL, MLIST = NULL, delta = TRUE) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) TAU <- MLIST$tau nth <- nrow(TAU) # missing alpha if (is.null(MLIST$alpha)) { ALPHA <- matrix(0, nfac, 1L) } else { ALPHA <- MLIST$alpha } # missing nu if (is.null(MLIST$nu)) { NU <- matrix(0, nvar, 1L) } else { NU <- MLIST$nu } # Delta? delta.flag <- FALSE if (delta && !is.null(MLIST$delta)) { DELTA <- MLIST$delta delta.flag <- TRUE } if (is.null(th.idx)) { th.idx <- seq_len(nth) nlev <- rep(1L, nvar) K_nu <- diag(nvar) } else { nlev <- tabulate(th.idx, nbins = nvar) nlev[nlev == 0L] <- 1L K_nu <- matrix(0, sum(nlev), nvar) K_nu[cbind(seq_len(sum(nlev)), rep(seq_len(nvar), times = nlev))] <- 1.0 } # shortcut for empty matrices if (m == "gamma" || m == "psi" || m == "theta" || m == "gw" || m == "cov.x" || m == "mean.x") { return(matrix(0.0, nrow = length(th.idx), ncol = length(idx))) } # beta? if (!is.null(MLIST$ibeta.inv)) { IB.inv <- MLIST$ibeta.inv } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) } if (m == "tau") { DX <- matrix(0, nrow = length(th.idx), ncol = nth) DX[th.idx > 0L, ] <- diag(nth) if (delta.flag) { DX <- DX * as.vector(K_nu %*% DELTA) } } else if (m == "nu") { DX <- (-1) * K_nu if (delta.flag) { DX <- DX * as.vector(K_nu %*% DELTA) } } else if (m == "lambda") { DX <- (-1) * t(IB.inv %*% ALPHA) %x% diag(nvar) DX <- K_nu %*% DX if (delta.flag) { DX <- DX * as.vector(K_nu %*% DELTA) } } else if (m == "beta") { DX <- (-1) * t(IB.inv %*% ALPHA) %x% (LAMBDA %*% IB.inv) # this is not really needed (because we select idx=m.el.idx) DX[, lav_matrix_diag_idx(nfac)] <- 0.0 DX <- K_nu %*% DX if (delta.flag) { DX <- DX * as.vector(K_nu %*% DELTA) } } else if (m == "alpha") { DX <- (-1) * LAMBDA %*% IB.inv DX <- K_nu %*% DX if (delta.flag) { DX <- DX * as.vector(K_nu %*% DELTA) } } else if (m == "delta") { DX1 <- matrix(0, nrow = length(th.idx), ncol = 1) DX1[th.idx > 0L, ] <- TAU DX2 <- NU + LAMBDA %*% IB.inv %*% ALPHA DX2 <- K_nu %*% DX2 DX <- K_nu * as.vector(DX1 - DX2) } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } DX <- DX[, idx, drop = FALSE] DX } # dPi/dx -- per model matrix derivative.pi.LISREL <- function(m = "lambda", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { LAMBDA <- MLIST$lambda nvar <- nrow(LAMBDA) nfac <- ncol(LAMBDA) GAMMA <- MLIST$gamma nexo <- ncol(GAMMA) # Delta? delta.flag <- FALSE if (!is.null(MLIST$delta)) { DELTA.diag <- MLIST$delta[, 1L] delta.flag <- TRUE } # shortcut for empty matrices if (m == "tau" || m == "nu" || m == "alpha" || m == "psi" || m == "theta" || m == "gw" || m == "cov.x" || m == "mean.x") { return(matrix(0.0, nrow = nvar * nexo, ncol = length(idx))) } # beta? if (!is.null(MLIST$ibeta.inv)) { IB.inv <- MLIST$ibeta.inv } else { IB.inv <- .internal_get_IB.inv(MLIST = MLIST) } if (m == "lambda") { DX <- t(IB.inv %*% GAMMA) %x% diag(nvar) if (delta.flag) { DX <- DX * DELTA.diag } } else if (m == "beta") { DX <- t(IB.inv %*% GAMMA) %x% (LAMBDA %*% IB.inv) # this is not really needed (because we select idx=m.el.idx) DX[, lav_matrix_diag_idx(nfac)] <- 0.0 if (delta.flag) { DX <- DX * DELTA.diag } } else if (m == "gamma") { DX <- diag(nexo) %x% (LAMBDA %*% IB.inv) if (delta.flag) { DX <- DX * DELTA.diag } } else if (m == "delta") { PRE <- rep(1, nexo) %x% diag(nvar) DX <- PRE * as.vector(LAMBDA %*% IB.inv %*% GAMMA) } else { lav_msg_stop(gettext("wrong model matrix names:"), m) } DX <- DX[, idx, drop = FALSE] DX } # dGW/dx -- per model matrix derivative.gw.LISREL <- function(m = "gw", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { # shortcut for empty matrices if (m != "gw") { return(matrix(0.0, nrow = 1L, ncol = length(idx))) } else { # m == "gw" DX <- matrix(1.0, 1, 1) } DX <- DX[, idx, drop = FALSE] DX } # dlambda/dx -- per model matrix derivative.lambda.LISREL <- function(m = "lambda", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { LAMBDA <- MLIST$lambda # shortcut for empty matrices if (m != "lambda") { return(matrix(0.0, nrow = length(LAMBDA), ncol = length(idx))) } else { # m == "lambda" DX <- diag(1, nrow = length(LAMBDA), ncol = length(LAMBDA)) } DX <- DX[, idx, drop = FALSE] DX } # dpsi/dx -- per model matrix - FIXME!!!!! derivative.psi.LISREL <- function(m = "psi", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { PSI <- MLIST$psi nfac <- nrow(PSI) v.idx <- lav_matrix_vech_idx(nfac) # shortcut for empty matrices if (m != "psi") { DX <- matrix(0.0, nrow = length(PSI), ncol = length(idx)) return(DX[v.idx, , drop = FALSE]) } else { # m == "psi" DX <- diag(1, nrow = length(PSI), ncol = length(PSI)) } DX <- DX[v.idx, idx, drop = FALSE] DX } # dtheta/dx -- per model matrix derivative.theta.LISREL <- function(m = "theta", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { THETA <- MLIST$theta nvar <- nrow(THETA) v.idx <- lav_matrix_vech_idx(nvar) # shortcut for empty matrices if (m != "theta") { DX <- matrix(0.0, nrow = length(THETA), ncol = length(idx)) return(DX[v.idx, , drop = FALSE]) } else { # m == "theta" DX <- diag(1, nrow = length(THETA), ncol = length(THETA)) } DX <- DX[v.idx, idx, drop = FALSE] DX } # dbeta/dx -- per model matrix derivative.beta.LISREL <- function(m = "beta", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { BETA <- MLIST$beta # shortcut for empty matrices if (m != "beta") { return(matrix(0.0, nrow = length(BETA), ncol = length(idx))) } else { # m == "beta" DX <- diag(1, nrow = length(BETA), ncol = length(BETA)) } DX <- DX[, idx, drop = FALSE] DX } # dgamma/dx -- per model matrix derivative.gamma.LISREL <- function(m = "gamma", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { GAMMA <- MLIST$gamma # shortcut for empty matrices if (m != "gamma") { return(matrix(0.0, nrow = length(GAMMA), ncol = length(idx))) } else { # m == "gamma" DX <- diag(1, nrow = length(GAMMA), ncol = length(GAMMA)) } DX <- DX[, idx, drop = FALSE] DX } # dnu/dx -- per model matrix derivative.nu.LISREL <- function(m = "nu", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { NU <- MLIST$nu # shortcut for empty matrices if (m != "nu") { return(matrix(0.0, nrow = length(NU), ncol = length(idx))) } else { # m == "nu" DX <- diag(1, nrow = length(NU), ncol = length(NU)) } DX <- DX[, idx, drop = FALSE] DX } # dtau/dx -- per model matrix derivative.tau.LISREL <- function(m = "tau", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { TAU <- MLIST$tau # shortcut for empty matrices if (m != "tau") { return(matrix(0.0, nrow = length(TAU), ncol = length(idx))) } else { # m == "tau" DX <- diag(1, nrow = length(TAU), ncol = length(TAU)) } DX <- DX[, idx, drop = FALSE] DX } # dalpha/dx -- per model matrix derivative.alpha.LISREL <- function(m = "alpha", # all model matrix elements, or only a few? idx = seq_len(length(MLIST[[m]])), MLIST = NULL) { ALPHA <- MLIST$alpha # shortcut for empty matrices if (m != "alpha") { return(matrix(0.0, nrow = length(ALPHA), ncol = length(idx))) } else { # m == "alpha" DX <- diag(1, nrow = length(ALPHA), ncol = length(ALPHA)) } DX <- DX[, idx, drop = FALSE] DX } # MLIST = NULL; meanstructure=TRUE; th=TRUE; delta=TRUE; pi=TRUE; gw=FALSE # lav_matrix_vech_idx <- lavaan:::lav_matrix_vech_idx; lav_matrix_vechru_idx <- lavaan:::lav_matrix_vechru_idx # vec <- lavaan:::vec; lav_func_jacobian_complex <- lavaan:::lav_func_jacobian_complex # computeSigmaHat.LISREL <- lavaan:::computeSigmaHat.LISREL # setDeltaElements.LISREL <- lavaan:::setDeltaElements.LISREL TESTING_derivatives.LISREL <- function(MLIST = NULL, nvar = NULL, nfac = NULL, nexo = NULL, th.idx = NULL, num.idx = NULL, meanstructure = TRUE, th = TRUE, delta = TRUE, pi = TRUE, gw = FALSE, theta = FALSE) { if (is.null(MLIST)) { # create artificial matrices, compare 'numerical' vs 'analytical' # derivatives # nvar <- 12; nfac <- 3; nexo <- 4 # this combination is special? if (is.null(nvar)) { nvar <- 20 } if (is.null(nfac)) { nfac <- 6 } if (is.null(nexo)) { nexo <- 5 } if (is.null(num.idx)) { num.idx <- sort(sample(seq_len(nvar), ceiling(nvar / 2))) } if (is.null(th.idx)) { th.idx <- integer(0L) for (i in seq_len(nvar)) { if (i %in% num.idx) { th.idx <- c(th.idx, 0) } else { th.idx <- c(th.idx, rep(i, sample(c(1, 1, 2, 6), 1L))) } } } nth <- sum(th.idx > 0L) MLIST <- list() MLIST$lambda <- matrix(0, nvar, nfac) MLIST$beta <- matrix(0, nfac, nfac) MLIST$theta <- matrix(0, nvar, nvar) MLIST$psi <- matrix(0, nfac, nfac) if (meanstructure) { MLIST$alpha <- matrix(0, nfac, 1L) MLIST$nu <- matrix(0, nvar, 1L) } if (th) MLIST$tau <- matrix(0, nth, 1L) if (delta) MLIST$delta <- matrix(0, nvar, 1L) MLIST$gamma <- matrix(0, nfac, nexo) if (gw) MLIST$gw <- matrix(0, 1L, 1L) # feed random numbers MLIST <- lapply(MLIST, function(x) { x[, ] <- rnorm(length(x)) x }) # fix diag(MLIST$beta) <- 0.0 diag(MLIST$theta) <- diag(MLIST$theta) * diag(MLIST$theta) * 10 diag(MLIST$psi) <- diag(MLIST$psi) * diag(MLIST$psi) * 10 MLIST$psi[lav_matrix_vechru_idx(nfac)] <- MLIST$psi[lav_matrix_vech_idx(nfac)] MLIST$theta[lav_matrix_vechru_idx(nvar)] <- MLIST$theta[lav_matrix_vech_idx(nvar)] if (delta) MLIST$delta[, ] <- abs(MLIST$delta) * 10 } else { nvar <- nrow(MLIST$lambda) } compute.sigma <- function(x, mm = "lambda", MLIST = NULL) { mlist <- MLIST if (mm %in% c("psi", "theta")) { mlist[[mm]] <- lav_matrix_vech_reverse(x) } else { mlist[[mm]][, ] <- x } if (theta) { mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx) } lav_matrix_vech(computeSigmaHat.LISREL(mlist)) } compute.mu <- function(x, mm = "lambda", MLIST = NULL) { mlist <- MLIST if (mm %in% c("psi", "theta")) { mlist[[mm]] <- lav_matrix_vech_reverse(x) } else { mlist[[mm]][, ] <- x } if (theta) { mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx) } computeMuHat.LISREL(mlist) } compute.th2 <- function(x, mm = "tau", MLIST = NULL, th.idx) { mlist <- MLIST if (mm %in% c("psi", "theta")) { mlist[[mm]] <- lav_matrix_vech_reverse(x) } else { mlist[[mm]][, ] <- x } if (theta) { mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx) } computeTH.LISREL(mlist, th.idx = th.idx) } compute.pi <- function(x, mm = "lambda", MLIST = NULL) { mlist <- MLIST if (mm %in% c("psi", "theta")) { mlist[[mm]] <- lav_matrix_vech_reverse(x) } else { mlist[[mm]][, ] <- x } if (theta) { mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx) } computePI.LISREL(mlist) } compute.gw <- function(x, mm = "gw", MLIST = NULL) { mlist <- MLIST if (mm %in% c("psi", "theta")) { mlist[[mm]] <- lav_matrix_vech_reverse(x) } else { mlist[[mm]][, ] <- x } if (theta) { mlist <- setDeltaElements.LISREL(MLIST = mlist, num.idx = num.idx) } mlist$gw[1, 1] } # if theta, set MLIST$delta if (theta) { MLIST <- setDeltaElements.LISREL(MLIST = MLIST, num.idx = num.idx) } for (mm in names(MLIST)) { if (mm %in% c("psi", "theta")) { x <- lav_matrix_vech(MLIST[[mm]]) } else { x <- lav_matrix_vec(MLIST[[mm]]) } if (mm == "delta" && theta) next if (lav_debug()) { cat("### mm = ", mm, "\n") } # 1. sigma DX1 <- lav_func_jacobian_complex(func = compute.sigma, x = x, mm = mm, MLIST = MLIST) DX2 <- derivative.sigma.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST, delta = !theta ) if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE) if (length(idx) > 0L) DX2 <- DX2[, -idx] } if (theta) { sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) R <- lav_deriv_cov2cor(sigma.hat, num.idx = num.idx) DX3 <- DX2 DX2 <- R %*% DX2 } if (lav_debug()) { cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n") print(zapsmall(DX1)) cat("\n") cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n") print(DX2) cat("\n") if (theta) { cat("[SIGMA] mm = ", sprintf("%-8s:", mm), "DX3 (analytical):\n") print(DX3) cat("\n") } } cat( "[SIGMA] mm = ", sprintf("%-8s:", mm), "sum delta = ", sprintf("%12.9f", sum(DX1 - DX2)), " max delta = ", sprintf("%12.9f", max(DX1 - DX2)), "\n" ) # 2. mu DX1 <- lav_func_jacobian_complex(func = compute.mu, x = x, mm = mm, MLIST = MLIST) DX2 <- derivative.mu.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST ) if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE) if (length(idx) > 0L) DX2 <- DX2[, -idx] } cat( "[MU ] mm = ", sprintf("%-8s:", mm), "sum delta = ", sprintf("%12.9f", sum(DX1 - DX2)), " max delta = ", sprintf("%12.9f", max(DX1 - DX2)), "\n" ) if (lav_debug()) { cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n") print(zapsmall(DX1)) cat("\n") cat("[MU ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n") print(DX2) cat("\n") } # 3. th if (th) { DX1 <- lav_func_jacobian_complex( func = compute.th2, x = x, mm = mm, MLIST = MLIST, th.idx = th.idx ) DX2 <- derivative.th.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST, th.idx = th.idx, delta = TRUE ) if (theta) { # 1. compute dDelta.dx dxSigma <- derivative.sigma.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST, delta = !theta ) var.idx <- which(!lav_matrix_vech_idx(nvar) %in% lav_matrix_vech_idx(nvar, diagonal = FALSE)) sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) dsigma <- diag(sigma.hat) # dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma)) dDelta.dx <- dxSigma[var.idx, ] * -0.5 / (dsigma * sqrt(dsigma)) # 2. compute dth.dDelta dth.dDelta <- derivative.th.LISREL( m = "delta", idx = seq_len(length(MLIST[["delta"]])), MLIST = MLIST, th.idx = th.idx ) # 3. add dth.dDelta %*% dDelta.dx no.num.idx <- which(th.idx > 0) DX2[no.num.idx, ] <- DX2[no.num.idx, , drop = FALSE] + (dth.dDelta %*% dDelta.dx)[no.num.idx, , drop = FALSE] # DX2 <- DX2 + dth.dDelta %*% dDelta.dx } if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE) if (length(idx) > 0L) DX2 <- DX2[, -idx] } cat( "[TH ] mm = ", sprintf("%-8s:", mm), "sum delta = ", sprintf("%12.9f", sum(DX1 - DX2)), " max delta = ", sprintf("%12.9f", max(DX1 - DX2)), "\n" ) if (lav_debug()) { cat("[TH ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n") print(zapsmall(DX1)) cat("\n") cat("[TH ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n") print(DX2) cat("\n") } } # 4. pi if (pi) { DX1 <- lav_func_jacobian_complex(func = compute.pi, x = x, mm = mm, MLIST = MLIST) DX2 <- derivative.pi.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST ) if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE) if (length(idx) > 0L) DX2 <- DX2[, -idx] } if (theta) { # 1. compute dDelta.dx dxSigma <- derivative.sigma.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST, delta = !theta ) if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(dxSigma)), diagonal = FALSE) if (length(idx) > 0L) dxSigma <- dxSigma[, -idx] } var.idx <- which(!lav_matrix_vech_idx(nvar) %in% lav_matrix_vech_idx(nvar, diagonal = FALSE)) sigma.hat <- computeSigmaHat.LISREL(MLIST = MLIST, delta = FALSE) dsigma <- diag(sigma.hat) # dy/ddsigma = -0.5/(ddsigma*sqrt(ddsigma)) dDelta.dx <- dxSigma[var.idx, ] * -0.5 / (dsigma * sqrt(dsigma)) # 2. compute dpi.dDelta dpi.dDelta <- derivative.pi.LISREL( m = "delta", idx = seq_len(length(MLIST[["delta"]])), MLIST = MLIST ) # 3. add dpi.dDelta %*% dDelta.dx no.num.idx <- which(!seq.int(1L, nvar) %in% num.idx) no.num.idx <- rep(seq.int(0, nexo - 1) * nvar, each = length(no.num.idx) ) + no.num.idx DX2[no.num.idx, ] <- DX2[no.num.idx, , drop = FALSE] + (dpi.dDelta %*% dDelta.dx)[no.num.idx, , drop = FALSE] } cat( "[PI ] mm = ", sprintf("%-8s:", mm), "sum delta = ", sprintf("%12.9f", sum(DX1 - DX2)), " max delta = ", sprintf("%12.9f", max(DX1 - DX2)), "\n" ) if (lav_debug()) { cat("[PI ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n") print(zapsmall(DX1)) cat("\n") cat("[PI ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n") print(DX2) cat("\n") } } # 5. gw if (gw) { DX1 <- lav_func_jacobian_complex(func = compute.gw, x = x, mm = mm, MLIST = MLIST) DX2 <- derivative.gw.LISREL( m = mm, idx = seq_len(length(MLIST[[mm]])), MLIST = MLIST ) if (mm %in% c("psi", "theta")) { # remove duplicated columns of symmetric matrices idx <- lav_matrix_vechru_idx(sqrt(ncol(DX2)), diagonal = FALSE) if (length(idx) > 0L) DX2 <- DX2[, -idx] } cat( "[GW ] mm = ", sprintf("%-8s:", mm), "sum delta = ", sprintf("%12.9f", sum(DX1 - DX2)), " max delta = ", sprintf("%12.9f", max(DX1 - DX2)), "\n" ) if (lav_debug()) { cat("[GW ] mm = ", sprintf("%-8s:", mm), "DX1 (numerical):\n") print(DX1) cat("\n\n") cat("[GW ] mm = ", sprintf("%-8s:", mm), "DX2 (analytical):\n") print(DX2) cat("\n\n") } } } MLIST$th.idx <- th.idx MLIST$num.idx <- num.idx MLIST } lavaan/R/lav_options_default.R0000644000176200001440000004450114627656441016124 0ustar liggesusers# LDW 26 Mar 2024: use option settings and store in cache environment lavaan_cache_env <- new.env(parent = emptyenv()) # functions to handle warn/debug/verbose options # (no longer in 'standard' options) # if x not present returns the current value of opt.warn/debug/verbose # if x present # if x different from current value, assign x to current value and return TRUE # else return FALSE lav_warn <- function(x) { optwarn <- get0("opt.warn", lavaan_cache_env, ifnotfound = TRUE) if (missing(x)) { return(optwarn) } else { setwarn <- as.logical(x) if (setwarn != optwarn) { if (setwarn) { # because default TRUE, removing value is the same as setting to TRUE rm("opt.warn", envir = lavaan_cache_env) } else { assign("opt.warn", FALSE, lavaan_cache_env) } return(TRUE) } else { return(FALSE) } } } lav_debug <- function(x) { optdebug <- get0("opt.debug", lavaan_cache_env, ifnotfound = FALSE) if (missing(x)) { return(optdebug) } else { setdebug <- as.logical(x) if (setdebug != optdebug) { if (setdebug) { assign("opt.debug", TRUE, lavaan_cache_env) } else { # because default FALSE, removing value is the same as setting to FALSE rm("opt.debug", envir = lavaan_cache_env) } return(TRUE) } else { return(FALSE) } } } lav_verbose <- function(x) { optverbose <- get0("opt.verbose", lavaan_cache_env, ifnotfound = FALSE) if (missing(x)) { return(optverbose) } else { setverbose <- as.logical(x) if (setverbose != optverbose) { if (setverbose) { assign("opt.verbose", TRUE, lavaan_cache_env) } else { # because default FALSE, removing value is the same as setting to FALSE rm("opt.verbose", envir = lavaan_cache_env) } return(TRUE) } else { return(FALSE) } } } # set the default options (including unspecified values "default") lav_options_default <- function() { if (exists("opt.default", lavaan_cache_env)) { opt <- get("opt.default", lavaan_cache_env) return(opt) } # ---------------- preparation ----------------- opt.default <- list() opt.check <- list() elm <- function( name = NULL, # name of option, if length 2 first is sublist name dflt = NULL, # default value chr = NULL, # valid strings (names) and replacement values nm = NULL, # valid numeric interval bl = FALSE, # logical OK? oklen = c(1L, 1L), # lengte > 1 OK, second negative to have a warning # and not an error when length greater then abs(oklen[2]) num2int = FALSE # should numerical values be converted to int? ) { stopifnot(any(length(name) == 1:2)) stopifnot(is.null(nm) || (length(nm) == 1 && grepl("^[][].*,.*[][]$", nm))) stopifnot(length(bl) == 1, is.logical(bl)) stopifnot(is.null(chr) || is.character(chr)) stopifnot(length(oklen) == 2, oklen[1] <= abs(oklen[2])) stopifnot(length(num2int) == 1, is.logical(num2int)) # prepare list to store for checking option list2store <- list(oklen = oklen) if (!is.null(chr)) { if (is.null(names(chr))) names(chr) <- chr list2store$chr <- chr } if (!is.null(nm)) { first.in <- grepl("^\\[", nm) last.in <- grepl("\\]$", nm) elems <- as.numeric(strsplit(gsub("[][ ]", "", nm), ",")[[1]]) if (num2int) { elems[elems == -Inf] <- -2e9 elems[elems == Inf] <- 2e9 elems <- as.integer(elems) } list2store$nm <- list(bounds = elems, first.in = first.in, last.in = last.in) } if (bl) list2store$bl <- TRUE if (num2int) list2store$num2int <- TRUE # store default and list for checking if (length(name) == 1) name <- c("", name) if (name[1] != "") { if (is.null(opt.default[[name[1]]])) { # make sure sublists exist opt.default[[name[1]]] <<- list() sublist <- list() attr(sublist, "SUB") <- TRUE # indicate as sublist opt.check[[name[1]]] <<- sublist } opt.default[[name[1]]][[name[2]]] <<- dflt if (is.null(dflt)) opt.default[[name[1]]][name[2]] <<- list(NULL) opt.check[[name[1]]][[name[2]]] <<- list2store } else { opt.default[[name[2]]] <<- dflt if (is.null(dflt)) opt.default[name[2]] <<- list(NULL) opt.check[[name[2]]] <<- list2store } NULL } elmdup <- function( name = NULL, # name(s) of option from = NULL # name(s) of option to duplicatie ) { if (length(name) == 1) name <- c("", name) if (length(from) == 1) from <- c("", from) if (from[1] != "") { from.default <- opt.default[[from[1]]][[from[2]]] from.check <- opt.check[[from[1]]][[from[2]]] } else { from.default <- opt.default[[from[2]]] from.check <- opt.check[[from[2]]] } if (name[1] != "") { if (is.null(opt.default[[name[1]]])) { # make sure sublists exist opt.default[[name[1]]] <<- list() sublist <- list() attr(sublist, "SUB") <- TRUE # indicate as sublist opt.check[[name[1]]] <<- sublist } opt.default[[name[1]]][[name[2]]] <<- from.default opt.check[[name[1]]][[name[2]]] <<- from.check } else { opt.default[[name[2]]] <<- from.default opt.check[[name[2]]] <<- from.check } } # ------------------------- store options -------------------------- elm("model.type", "sem", chr = c(lavaan = "lavaan", cfa = "cfa", growth = "growth", sem = "sem", efa = "efa", path = "path", unrestricted = "unrestricted")) # global elm("mimic", "lavaan", chr = c(default = "lavaan", lavaan = "lavaan", regression = "lm", lisrel = "EQS", eqs = "EQS", lm = "lm", mplus = "Mplus" )) # model modifiers elm("meanstructure", "default", chr = "default", bl = TRUE) elm("int.ov.free", FALSE, bl = TRUE) elm("int.lv.free", FALSE, bl = TRUE) elm("marker.int.zero", FALSE, bl = TRUE) # fix maker intercepts # free lv means elm("conditional.x", "default", chr = "default", bl = TRUE) elm("fixed.x", "default", chr = "default", bl = TRUE) elm("orthogonal", FALSE, bl = TRUE) elm("orthogonal.x", FALSE, bl = TRUE) elm("orthogonal.y", FALSE, bl = TRUE) elm("std.lv", FALSE, bl = TRUE) elm("correlation", FALSE, bl = TRUE) # correlation structure elm("effect.coding", FALSE, chr = c("", "loadings", "intercepts", "mg.lv.efa.variances", "mg.lv.variances", "mg.lv.means", "mg.lv.intercepts"), bl = TRUE, oklen = c(0L, 6L)) elm("ceq.simple", FALSE, bl = TRUE) # treat simple eq cons special? elm("parameterization", "default", c( "default", "mml", "delta", "theta")) elm("auto.fix.first", FALSE, bl = TRUE) elm("auto.fix.single", FALSE, bl = TRUE) elm("auto.var", FALSE, bl = TRUE) elm("auto.cov.lv.x", FALSE, bl = TRUE) elm("auto.cov.y", FALSE, bl = TRUE) elm("auto.th", FALSE, bl = TRUE) elm("auto.delta", FALSE, bl = TRUE) elm("auto.efa", FALSE, bl = TRUE) # rotation elm("rotation", "geomin", chr = c(crawfer = "cf", crawford.ferguson = "cf", crawfordferguson = "cf", cf = "cf", varimax = "varimax", quartimax = "quartimax", orthomax = "orthomax", oblimin = "oblimin", quartimin = "quartimin", geomin = "geomin", entropy = "entropy", mccammon = "mccammon", infomax = "infomax", tandem1 = "tandem1", tandem2 = "tandem2", none = "none", promax = "promax", oblimax = "oblimax", bentler = "bentler", simplimax = "simplimax", target = "target", pst = "pst", cf.quartimax = "cf-quartimax", cf.varimax = "cf-varimax", cf.equamax = "cf-equamax", cf.parsimax = "cf-parsimax", cf.facparsim = "cf-facparsim", bi.quartimin = "biquartimin", biquartimin = "biquartimin", bi.geomin = "bigeomin", bigeomin = "bigeomin" )) elm("rotation.se", "bordered", chr = c("delta", "bordered")) # rotation-args sublist elm(c("rotation.args", "orthogonal"), FALSE, bl = TRUE) elm(c("rotation.args", "row.weights"), "default", chr = c( default = "default", kaiser = "kaiser", none = "none", cureton.mulaik = "cm", cm = "cm")) elm(c("rotation.args", "std.ov"), TRUE, bl = TRUE) elm(c("rotation.args", "geomin.epsilon"), 0.001, nm = "]0, 0.01]") # was 0.01 < 0.6-10 elm(c("rotation.args", "orthomax.gamma"), 1, nm = "[0, 1]") elm(c("rotation.args", "cf.gamma"), 0, nm = "[0, 1]") elm(c("rotation.args", "oblimin.gamma"), 0, nm = "[0, 1000]") elm(c("rotation.args", "promax.kappa"), 4, nm = "[0, 1000]") elm(c("rotation.args", "target"), matrix(0, 0, 0), oklen = c(0L, 1000L)) elm(c("rotation.args", "target.mask"), matrix(0, 0, 0), oklen = c(0L, 1000L)) elm(c("rotation.args", "rstarts"), 30L, nm = "[0, 1000000]") elm(c("rotation.args", "algorithm"), "gpa", chr = c("gpa", "pairwise")) elm(c("rotation.args", "reflect"), TRUE, bl = TRUE) elm(c("rotation.args", "order.lv.by"), "index", chr = c("sumofsquares", "index", "none")) elm(c("rotation.args", "gpa.tol"), 1e-05, nm = "]0, 0.01]") elm(c("rotation.args", "tol"), 1e-08, nm = "]0, 0.01]") elm(c("rotation.args", "warn"), FALSE, bl = TRUE) elm(c("rotation.args", "verbose"), FALSE, bl = TRUE) elm(c("rotation.args", "jac.init.rot"), TRUE, bl = TRUE) elm(c("rotation.args", "max.iter"), 10000L, nm = "[0, 1000000]") # full data elm("std.ov", FALSE, bl = TRUE) elm("missing", "default", chr = c( default = "default", ml = "ml", direct = "ml", ml.x = "ml.x", direct.x = "ml.x", fiml.x = "ml.x", fiml = "ml", two.stage = "two.stage", twostage = "two.stage", two.step = "two.stage", twostep = "two.stage", robust.two.stage = "robust.two.stage", robust.twostage = "robust.two.stage", robust.two.step = "robust.two.stage", robust.twostep = "robust.two.stage", two.stage.robust = "robust.two.stage", twostage.robust = "robust.two.stage", two.step.robust = "robust.two.stage", twostep.robust = "robust.two.stage", listwise = "listwise", pairwise = "pairwise", available.cases = "available.cases", doubly.robust = "doubly.robust")) elm("sampling.weights.normalization", "total", chr = c( "total", "group", "none")) elm("samplestats", TRUE, bl = TRUE) # summary data elm("sample.cov.rescale", "default", bl = TRUE) elm("sample.cov.robust", FALSE, bl = TRUE) elm("sample.icov", TRUE, bl = TRUE) elm("ridge", FALSE, bl = TRUE) elm("ridge.constant", "default", chr = "default", nm = "[0, Inf[") # multiple groups !!! group.label and group.partial capitals OK !!! elm("group.label", NULL, oklen = c(0L, 100L)) # no checks elm("group.equal", "", chr = c("", "none", "loadings", "intercepts", "means", "composite.loadings", "regressions", "residuals", "residual.covariances", "thresholds", "lv.variances", "lv.covariances"), oklen = c(0L, 100L)) elm("group.partial", "", oklen = c(0L, 100L)) # no checks elm("group.w.free", FALSE, bl = TRUE) # clusters elm("level.label", NULL, oklen = c(0L, 100L)) # no checks # estimation elm("estimator", "default", chr = c( none = "none", default = "default", wlsmv = "wlsmv", ml = "ml", mlr = "mlr", mlf = "mlf", mlm = "mlm", mlmv = "mlmv", mlmvs = "mlmvs", gls = "gls", wls = "wls", wlsm = "wlsm", uls = "uls", ulsm = "ulsm", ulsmv = "ulsmv", pml = "pml", dls = "dls", ntrls = "ntrls", catml = "catml", dwls = "dwls", wlsmvs = "wlsmvs", ulsmvs = "ulsmvs", fml = "fml", umn = "fml", reml = "reml", mml = "mml", fabin = "fabin2", fabin2 = "fabin2", fabin3 = "fabin3", mgm = "mgm", guttman = "mgm", gutman = "mgm", gutmann = "mgm", guttman1952 = "mgm", js = "js", jsa = "jsa", james.stein = "js", james.stein.aggregated = "jsa", bentler = "bentler1982", bentler1982 = "bentler1982", miiv = "miiv", iv = "miiv", miiv.2sls = "miiv" )) elmdup("estimator.orig", "estimator") elm("estimator.args", list(), oklen = c(0L, 100L)) elm("likelihood", "default", chr = c("default", "normal", "wishart")) elm("link", "default", chr = c("default", "logit", "probit")) elm("representation", "default", chr = c( default = "LISREL", lisrel = "LISREL", ram = "RAM")) elm("do.fit", TRUE, bl = TRUE) elm("bounds", "none", chr = c( "none", "default", "standard", "user", "wide", "wide.zerovar", "pos.var", "pos.ov.var", "pos.lv.var")) # new in 0.6-6 elm("rstarts", 0L, nm = "[0, 1000]", num2int = TRUE) # new in 0.6-18 # inference elm("se", "default", chr = c( default = "default", none = "none", standard = "standard", robust.huber.white = "robust.huber.white", robust = "robust", robust.cluster = "robust.cluster", robust.cluster.sem = "robust.cluster.sem", sandwich = "robust.huber.white", robust.sem = "robust.sem", two.stage = "two.stage", robust.two.stage = "robust.two.stage", bootstrap = "bootstrap", boot = "bootstrap", first.order = "first.order", robust.mlm = "robust.sem", robust.mlr = "robust.huber.white", observed = "observed", expected = "expected"), oklen = c(1L, -1L) ) elm("test", "default", oklen = c(1L, 100L)) # checks for 'test' are in lav_test_rename !!! # information (se + test) elm("information", c("default", "default"), chr = c( "default", "expected", "observed", "first.order"), oklen = c(1L, 2L)) elm("h1.information", c("structured", "structured"), chr = c( "structured", "unstructured"), oklen = c(1L, 2L)) elm("observed.information", c("hessian", "default"), chr = c( "default", "hessian", "h1"), oklen = c(1L, 2L)) # information se only elm("information.meat", "default", chr = c(default = "first.order", first.order = "first.order")) elm("h1.information.meat", "default", chr = c( "default", "structured", "unstructured")) # information for 'Omega' (yuan-benter test only) elm("omega.information", "default", chr = c( "default", "expected", "observed" )) elm("omega.h1.information", "default", chr = c( "default", "structured", "unstructured" )) elm("omega.information.meat", "default", chr = c( default = "first.order", first.order = "first.order" )) elm("omega.h1.information.meat", "default", chr = c( "default", "structured", "unstructured" )) # test statistic for scaling elm("scaled.test", "standard", oklen = c(1L, 100L)) # old approach trace.UGamma2 elm("ug2.old.approach", FALSE, bl = TRUE) # bootstrap elm("bootstrap", 1000L, nm = "[1, Inf[", num2int = TRUE) # gamma elm("gamma.n.minus.one", FALSE, bl = TRUE) elm("gamma.unbiased", FALSE, bl = TRUE) # optimization elm("control", list(), oklen = c(0L, 100L)) elm("optim.method", "default", chr = c( "nlminb", "gn", "default", "noniter", "none", "em" )) # gn for DLS, nlminb rest elm("optim.attempts", 4L, nm = "[1, 4]") elm("optim.force.converged", FALSE, bl = TRUE) elm("optim.gradient", "analytic", chr = c( analytic = "analytic", analytical = "analytic", numeric = "numerical", numerical = "numerical" )) elm("optim.init_nelder_mead", FALSE, bl = TRUE) elm("optim.var.transform", "none", chr = c( "none", "sqrt" )) elm("optim.parscale", "none", chr = c( none = "none", st = "standardized", stand = "standardized", standardize = "standardized", standardized = "standardized" )) elm("optim.partrace", FALSE, bl = TRUE) elm("optim.dx.tol", 1e-03, nm = "]0, 0.01]") # not too strict elm("optim.bounds", list(), oklen = c(0L, 100L)) elm("em.iter.max", 10000L, nm = "[100, 1e8]", num2int = TRUE) elm("em.fx.tol", 1e-08, nm = "]0, 0.01]") elm("em.dx.tol", 1e-04, nm = "]0, 0.01]") elm("em.zerovar.offset", 0.0001, nm = "]0, 0.01]") elm("em.h1.iter.max", 500L, nm = "[10, 1e7]", num2int = TRUE) elm("em.h1.tol", 1e-05, nm = "]0, 0.01]") # was 1e-06 < 0.6-9 elm("em.h1.warn", TRUE, bl = TRUE) elm("optim.gn.iter.max", 200L, nm = "[100, 1e8]", num2int = TRUE) elm("optim.gn.stephalf.max", 10L, nm = "[1, 1e8]", num2int = TRUE) elm("optim.gn.tol.x", 1e-05, nm = "]0, 0.01]") # numerical integration elm("integration.ngh", 21L, nm = "[1, 1000]", num2int = TRUE) # parallel elm("parallel", "no", chr = c( "no", "multicore", "snow" )) maxcpu <- parallel::detectCores() - 1L elm("ncpus", maxcpu, nm = paste0("[1,", maxcpu, "]")) elm("cl", NULL, oklen = c(0L, 1L)) elm("iseed", NULL, oklen = c(0L, 1L)) # categorical elm("zero.add", c(0.5, 0.0), chr = "default", nm = "[0, 1]", oklen = c(1L, -2L)) elm("zero.keep.margins", "default", chr = "default", bl = TRUE) elm("zero.cell.warn", FALSE, bl = TRUE) # since 0.6-1 elm("cat.wls.w", TRUE, bl = TRUE) # since 0.6-18 # starting values (char values checked in lav_options_set()) elm("start", "default", oklen = c(1L, 1000L)) # sanity checks elm("check.start", TRUE, bl = TRUE) elm("check.post", TRUE, bl = TRUE) elm("check.gradient", TRUE, bl = TRUE) elm("check.vcov", TRUE, bl = TRUE) elm("check.lv.names", TRUE, bl = TRUE) elm("check.lv.interaction", TRUE, bl = TRUE) # more models/info elm("h1", TRUE, bl = TRUE) elm("baseline", TRUE, bl = TRUE) elm("baseline.conditional.x.free.slopes", TRUE, bl = TRUE) elm("implied", TRUE, bl = TRUE) elm("loglik", TRUE, bl = TRUE) # storage of information elm("store.vcov", "default", chr = "default", bl = TRUE) # internal elm("parser", "new", chr = c(old = "old", orig = "old", new = "new", classic = "old")) # categorical elm("categorical", "default", chr = "default", bl = TRUE) # ------------- store info in lavaan environment --------------- assign("opt.default", opt.default, lavaan_cache_env) assign("opt.check", opt.check, lavaan_cache_env) # return defaults return(opt.default) } # public function lavOptions <- function(x = NULL, default = NULL, mimic = "lavaan") { # nolint lavoptions <- lav_options_default() # selection only if (!is.null(x)) { if (is.character(x)) { # lower case only x <- tolower(x) # check if x is in names(lavoptions) not.ok <- which(!x %in% names(lavoptions)) if (length(not.ok) > 0L) { lav_msg_warn(gettextf( "option(s) %s not available", lav_msg_view(x[not.ok])) ) x <- x[-not.ok] } # return requested option(s) if (length(x) == 0L) { return(default) } else { lavoptions[x] } } else { lav_msg_stop(gettext("`x' must be a character string")) } } else { lavoptions } } lavaan/R/lav_samplestats_step2.R0000644000176200001440000000567714627656441016415 0ustar liggesuserslav_samplestats_step2 <- function(UNI = NULL, wt = NULL, ov.names = NULL, # error message only # polychoric and empty cells zero.add = c(0.5, 0.0), zero.keep.margins = TRUE, zero.cell.warn = FALSE, # keep track of tables with zero cells? zero.cell.tables = TRUE) { nvar <- length(UNI) COR <- diag(nvar) if (zero.cell.tables) { zero.var1 <- character(0L) zero.var2 <- character(0L) } # one-by-one (for now) for (j in seq_len(nvar - 1L)) { for (i in (j + 1L):nvar) { if (is.null(UNI[[i]]$th.idx) && is.null(UNI[[j]]$th.idx)) { rho <- lav_bvreg_cor_twostep_fit( fit.y1 = UNI[[i]], # linear fit.y2 = UNI[[j]], # linear wt = wt, Y1.name = ov.names[i], Y2.name = ov.names[j] ) COR[i, j] <- COR[j, i] <- rho } else if (is.null(UNI[[i]]$th.idx) && !is.null(UNI[[j]]$th.idx)) { # polyserial rho <- lav_bvmix_cor_twostep_fit( fit.y1 = UNI[[i]], # linear fit.y2 = UNI[[j]], # ordinal wt = wt, Y1.name = ov.names[i], Y2.name = ov.names[j] ) COR[i, j] <- COR[j, i] <- rho } else if (is.null(UNI[[j]]$th.idx) && !is.null(UNI[[i]]$th.idx)) { # polyserial rho <- lav_bvmix_cor_twostep_fit( fit.y1 = UNI[[j]], # linear fit.y2 = UNI[[i]], # ordinal wt = wt, Y1.name = ov.names[j], Y2.name = ov.names[i] ) COR[i, j] <- COR[j, i] <- rho } else if (!is.null(UNI[[i]]$th.idx) && !is.null(UNI[[j]]$th.idx)) { # polychoric correlation rho <- lav_bvord_cor_twostep_fit( fit.y1 = UNI[[j]], # ordinal fit.y2 = UNI[[i]], # ordinal wt = wt, zero.add = zero.add, zero.keep.margins = zero.keep.margins, zero.cell.warn = zero.cell.warn, zero.cell.flag = zero.cell.tables, Y1.name = ov.names[i], Y2.name = ov.names[j] ) if (zero.cell.tables) { if (attr(rho, "zero.cell.flag")) { zero.var1 <- c(zero.var1, ov.names[j]) zero.var2 <- c(zero.var2, ov.names[i]) } attr(rho, "zero.cell.flag") <- NULL } COR[i, j] <- COR[j, i] <- rho } # check for near 1.0 correlations if (abs(COR[i, j]) > 0.99) { lav_msg_warn(gettextf( "correlation between variables %1$s and %2$s is (nearly) 1.0", ov.names[i], ov.names[j])) } } } # keep track of tables with zero cells if (zero.cell.tables) { zero.cell.tables <- cbind(zero.var1, zero.var2) attr(COR, "zero.cell.tables") <- zero.cell.tables } COR } lavaan/R/lav_mvnorm_missing.R0000644000176200001440000013227714627656441016004 0ustar liggesusers# the multivariate normal distribution + missing values # (so-called 'FIML') # 1) loglikelihood (from raw data, or sample statitics) # 2) derivatives with respect to mu, Sigma, vech(Sigma) # 3) casewise scores with respect to mu, vech(Sigma), mu + vech(Sigma) # 4) hessian of mu + vech(Sigma) # 5) (unit) information of mu + vech(Sigma) # 5a: (unit) expected information # 5b: (unit) observed information # 5c: (unit) first.order information # 5d: lav_mvnorm_missing_information_both (both observed + first.order) # 6) inverted information h0 mu + vech(Sigma) # 6a: / # 6b: / # 6c: / # 7) ACOV h0 mu + vech(Sigma) # 7a: 1/N * inverted expected information # 7b: 1/N * inverted observed information # 7c: 1/N * inverted first-order information # 7d: sandwich acov # 10) additional functions # - lav_mvnorm_missing_impute_pattern # - lav_mvnorm_missing_estep # YR 09 Feb 2016: first version # YR 19 Mar 2017: 10) # YR 03 Okt 2018: a few functions gain a wt= argument # YR 01 Jul 2018: first_order functions gain cluster.idx= argument # 1) likelihood # 1a: input is raw data # - two strategies: 1) using missing patterns (pattern = TRUE) # 2) truly case per case (pattern = FALSE) # depending on the sample size, missing patterns, etc... one can be # (much) faster than the other lav_mvnorm_missing_loglik_data <- function(Y = NULL, Mu = NULL, wt = NULL, Sigma = NULL, x.idx = integer(0L), casewise = FALSE, pattern = TRUE, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE) { if (pattern) { llik <- lav_mvnorm_missing_llik_pattern( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, log2pi = log2pi, minus.two = minus.two ) } else { llik <- lav_mvnorm_missing_llik_casewise( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, log2pi = log2pi, minus.two = minus.two ) } if (casewise) { loglik <- llik } else { loglik <- sum(llik, na.rm = TRUE) } loglik } # 1b: input are sample statistics (mean, cov, N) per pattern lav_mvnorm_missing_loglik_samplestats <- function(Yp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), x.mean = NULL, x.cov = NULL, Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE) { LOG.2PI <- log(2 * pi) pat.N <- length(Yp) P <- length(Yp[[1]]$var.idx) # global inverse + logdet Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) Sigma.logdet <- attr(Sigma.inv, "logdet") # DIST/logdet per pattern DIST <- logdet <- P.LOG.2PI <- numeric(pat.N) # for each pattern, compute sigma.inv/logdet; compute DIST for all # observations of this pattern for (p in seq_len(pat.N)) { # observed variables for this pattern var.idx <- Yp[[p]]$var.idx # missing values for this pattern na.idx <- which(!var.idx) # constant P.LOG.2PI[p] <- sum(var.idx) * LOG.2PI * Yp[[p]]$freq # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = TRUE, S.logdet = Sigma.logdet ) logdet[p] <- attr(sigma.inv, "logdet") * Yp[[p]]$freq } else { sigma.inv <- Sigma.inv logdet[p] <- Sigma.logdet * Yp[[p]]$freq } TT <- Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx]) DIST[p] <- sum(sigma.inv * TT) * Yp[[p]]$freq } # loglikelihood all data if (log2pi) { loglik <- sum(-(P.LOG.2PI + logdet + DIST) / 2) } else { loglik <- sum(-(logdet + DIST) / 2) } if (minus.two) { loglik <- -2 * loglik } # x.idx if (length(x.idx) > 0L) { stopifnot(!is.null(x.cov)) # Note: x.cov should be identical to Sigma[x.idx, x.idx] # so we don't really need x.cov N <- sum(sapply(Yp, "[[", "freq")) loglik.x <- lav_mvnorm_h1_loglik_samplestats( sample.cov = x.cov, sample.nobs = N ) loglik <- loglik - loglik.x } loglik } ## casewise loglikelihoods # casewise Sinv.method lav_mvnorm_missing_llik_casewise <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE) { P <- NCOL(Y) LOG.2PI <- log(2 * pi) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) # global inverse + logdet Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) Sigma.logdet <- attr(Sigma.inv, "logdet") # subtract Mu Yc <- t(t(Y) - Mu) # DIST/logdet per case DIST <- logdet <- P.LOG.2PI <- rep(as.numeric(NA), NY) # missing pattern per case OBS <- !is.na(Y) P.i <- rowSums(OBS) # constant P.LOG.2PI <- P.i * LOG.2PI # complete cases first (only an advantage if we have mostly complete # observations) other.idx <- seq_len(NY) complete.idx <- which(P.i == P) if (length(complete.idx) > 0L) { other.idx <- other.idx[-complete.idx] DIST[complete.idx] <- rowSums(Yc[complete.idx, , drop = FALSE] %*% Sigma.inv * Yc[complete.idx, , drop = FALSE]) logdet[complete.idx] <- Sigma.logdet } # non-complete cases for (i in other.idx) { na.idx <- which(!OBS[i, ]) # catch empty cases if (length(na.idx) == P) next # invert Sigma for this pattern sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = TRUE, S.logdet = Sigma.logdet ) logdet[i] <- attr(sigma.inv, "logdet") # distance for this case DIST[i] <- sum(sigma.inv * crossprod(Yc[i, OBS[i, ], drop = FALSE])) } # compute casewise loglikelihoods if (log2pi) { llik <- -(P.LOG.2PI + logdet + DIST) / 2 } else { llik <- -(logdet + DIST) / 2 } # minus.two if (minus.two) { llik <- -2 * llik } # weights? if (!is.null(wt)) { llik <- llik * wt } # x.idx if (length(x.idx) > 0L) { llik.x <- lav_mvnorm_missing_llik_casewise( Y = Y[, x.idx, drop = FALSE], wt = wt, Mu = Mu[x.idx], Sigma = Sigma[x.idx, x.idx, drop = FALSE], x.idx = integer(0L), Sinv.method = Sinv.method, log2pi = log2pi, minus.two = minus.two ) llik <- llik - llik.x } llik } # pattern-based, but casewise loglikelihoods lav_mvnorm_missing_llik_pattern <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", log2pi = TRUE, minus.two = FALSE) { P <- NCOL(Y) LOG.2PI <- log(2 * pi) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) # global inverse + logdet Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = TRUE, Sinv.method = Sinv.method ) Sigma.logdet <- attr(Sigma.inv, "logdet") # subtract Mu Yc <- t(t(Y) - Mu) # DIST/logdet per case DIST <- logdet <- P.LOG.2PI <- rep(as.numeric(NA), NY) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # for each pattern, compute sigma.inv/logdet; compute DIST for all # observations of this pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # identify cases with this pattern case.idx <- Mp$case.idx[[p]] # constant P.LOG.2PI[case.idx] <- sum(var.idx) * LOG.2PI # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = TRUE, S.logdet = Sigma.logdet ) logdet[case.idx] <- attr(sigma.inv, "logdet") } else { sigma.inv <- Sigma.inv logdet[case.idx] <- Sigma.logdet } if (Mp$freq[p] == 1L) { DIST[case.idx] <- sum(sigma.inv * crossprod(Yc[case.idx, var.idx, drop = FALSE])) } else { DIST[case.idx] <- rowSums(Yc[case.idx, var.idx, drop = FALSE] %*% sigma.inv * Yc[case.idx, var.idx, drop = FALSE]) } } # compute casewise loglikelihoods if (log2pi) { llik <- -(P.LOG.2PI + logdet + DIST) / 2 } else { llik <- -(logdet + DIST) / 2 } # minus.two if (minus.two) { llik <- -2 * llik } # weights? if (!is.null(wt)) { llik <- llik * wt } # x.idx -- using casewise (as patterns for Y may not be the same as # patterns for Y[,-x.idx]) if (length(x.idx) > 0L) { llik.x <- lav_mvnorm_missing_llik_casewise( Y = Y[, x.idx, drop = FALSE], wt = wt, Mu = Mu[x.idx], Sigma = Sigma[x.idx, x.idx, drop = FALSE], x.idx = integer(0L), Sinv.method = Sinv.method, log2pi = log2pi, minus.two = minus.two ) llik <- llik - llik.x } llik } # 2. Derivatives # 2a: derivative logl with respect to mu lav_mvnorm_missing_dlogl_dmu <- function(Y = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { SC <- lav_mvnorm_missing_scores_mu( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sigma.inv = Sigma.inv, Sinv.method = Sinv.method ) colSums(SC, na.rm = TRUE) } # 2abis: using samplestats lav_mvnorm_missing_dlogl_dmu_samplestats <- function(Yp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { pat.N <- length(Yp) P <- length(Yp[[1]]$var.idx) if (is.null(Sigma.inv)) { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # dmu dmu <- numeric(P) # for each pattern, compute sigma.inv for (p in seq_len(pat.N)) { # observed variables for this pattern var.idx <- Yp[[p]]$var.idx # missing values for this pattern na.idx <- which(!var.idx) # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } # dmu for this pattern dmu.pattern <- as.numeric(sigma.inv %*% (Yp[[p]]$MY - Mu[var.idx])) # update mu dmu[var.idx] <- dmu[var.idx] + (dmu.pattern * Yp[[p]]$freq) } # fixed.x? if (length(x.idx) > 0L) { dmu[x.idx] <- 0 } dmu } # 2b: derivative logl with respect to Sigma (full matrix, ignoring symmetry) lav_mvnorm_missing_dlogl_dSigma <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # subtract Mu Yc <- t(t(Y) - Mu) # dvechSigma dSigma <- matrix(0, P, P) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # for each pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # cases with this pattern case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } if (!is.null(wt)) { FREQ <- sum(wt[case.idx]) } else { FREQ <- Mp$freq[p] } if (length(case.idx) > 1L) { if (!is.null(wt)) { out <- stats::cov.wt(Y[case.idx, var.idx, drop = FALSE], wt = wt[Mp$case.idx[[p]]], method = "ML" ) SY <- out$cov MY <- out$center W.tilde <- SY + tcrossprod(MY - Mu[var.idx]) } else { W.tilde <- crossprod(Yc[case.idx, var.idx, drop = FALSE]) / FREQ } } else { W.tilde <- tcrossprod(Yc[case.idx, var.idx]) } # dSigma for this pattern dSigma.pattern <- matrix(0, P, P) dSigma.pattern[var.idx, var.idx] <- -(1 / 2) * (sigma.inv - (sigma.inv %*% W.tilde %*% sigma.inv)) # update dSigma dSigma <- dSigma + (dSigma.pattern * FREQ) } # fixed.x? if (length(x.idx) > 0L) { dSigma[x.idx, x.idx] <- 0 } dSigma } # 2bbis: using samplestats lav_mvnorm_missing_dlogl_dSigma_samplestats <- function(Yp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { pat.N <- length(Yp) P <- length(Yp[[1]]$var.idx) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # dvechSigma dSigma <- matrix(0, P, P) # for each pattern for (p in seq_len(pat.N)) { # observed variables for this pattern var.idx <- Yp[[p]]$var.idx # missing values for this pattern na.idx <- which(!var.idx) # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } W.tilde <- Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx]) # dSigma for this pattern dSigma.pattern <- matrix(0, P, P) dSigma.pattern[var.idx, var.idx] <- -(1 / 2) * (sigma.inv - (sigma.inv %*% W.tilde %*% sigma.inv)) # update dSigma dSigma <- dSigma + (dSigma.pattern * Yp[[p]]$freq) } # fixed.x? if (length(x.idx) > 0L) { dSigma[x.idx, x.idx] <- 0 } dSigma } # 2c: derivative logl with respect to vech(Sigma) lav_mvnorm_missing_dlogl_dvechSigma <- function(Y = NULL, wt = NULL, Mu = NULL, x.idx = integer(0L), Sigma = NULL, Sigma.inv = NULL, Sinv.method = "eigen") { dSigma <- lav_mvnorm_missing_dlogl_dSigma( Y = Y, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sigma.inv = Sigma.inv, Sinv.method = Sinv.method ) dvechSigma <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(dSigma)) )) dvechSigma } # 2cbis: using samplestats lav_mvnorm_missing_dlogl_dvechSigma_samplestats <- function(Yp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { pat.N <- length(Yp) P <- length(Yp[[1]]$var.idx) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # dvechSigma dvechSigma <- numeric(P * (P + 1) / 2) # for each pattern for (p in seq_len(pat.N)) { # observed variables for this pattern var.idx <- Yp[[p]]$var.idx # missing values for this pattern na.idx <- which(!var.idx) # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } W.tilde <- Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx]) # dSigma for this pattern dSigma.pattern <- matrix(0, P, P) dSigma.pattern[var.idx, var.idx] <- -(1 / 2) * (sigma.inv - (sigma.inv %*% W.tilde %*% sigma.inv)) # fixed.x? if (length(x.idx) > 0L) { dSigma.pattern[x.idx, x.idx] <- 0 } # convert to vechSigma dvechSigma.pattern <- as.numeric(lav_matrix_duplication_pre( as.matrix(lav_matrix_vec(dSigma.pattern)) )) # update dvechSigma dvechSigma <- dvechSigma + (dvechSigma.pattern * Yp[[p]]$freq) } dvechSigma } # 3. Casewise scores # 3a: casewise scores with respect to mu lav_mvnorm_missing_scores_mu <- function(Y = NULL, wt = NULL, Mp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # subtract Mu Yc <- t(t(Y) - Mu) # dmu per case dmu <- matrix(as.numeric(NA), NY, P) # for each pattern, compute sigma.inv for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } # compute dMu for all observations of this pattern dmu[case.idx, var.idx] <- Yc[case.idx, var.idx, drop = FALSE] %*% sigma.inv } # weights if (!is.null(wt)) { dmu <- dmu * wt } # fixed.x? if (length(x.idx) > 0L) { dmu[, x.idx] <- 0 } dmu } # 3b: casewise scores with respect to vech(Sigma) lav_mvnorm_missing_scores_vech_sigma <- function(Y = NULL, wt = NULL, Mp = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # for the tcrossprod idx1 <- lav_matrix_vech_col_idx(P) idx2 <- lav_matrix_vech_row_idx(P) # vech(Sigma.inv) iSigma <- lav_matrix_vech(Sigma.inv) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # subtract Mu Yc <- t(t(Y) - Mu) # SC SC <- matrix(as.numeric(NA), nrow = NY, ncol = length(iSigma)) # for each pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # cases with this pattern case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) tmp <- matrix(0, P, P) tmp[var.idx, var.idx] <- sigma.inv isigma <- lav_matrix_vech(tmp) } else { sigma.inv <- Sigma.inv isigma <- iSigma } # postmultiply these cases with sigma.inv Yc[case.idx, var.idx] <- Yc[case.idx, var.idx] %*% sigma.inv # tcrossprod SC[case.idx, ] <- Yc[case.idx, idx1] * Yc[case.idx, idx2] # substract isigma from each row SC[case.idx, ] <- t(t(SC[case.idx, , drop = FALSE]) - isigma) } # adjust for vech SC[, lav_matrix_diagh_idx(P)] <- SC[, lav_matrix_diagh_idx(P)] / 2 # weights if (!is.null(wt)) { SC <- SC * wt } # fixed.x? if (length(x.idx) > 0L) { SC[, lav_matrix_vech_which_idx(n = P, idx = x.idx)] <- 0 } SC } # 3c: casewise scores with respect to mu + vech(Sigma) lav_mvnorm_missing_scores_mu_vech_sigma <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) Mu <- as.numeric(Mu) if (!is.null(wt)) { N <- sum(wt) } else { N <- NROW(Y) } NY <- NROW(Y) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # for the tcrossprod idx1 <- lav_matrix_vech_col_idx(P) idx2 <- lav_matrix_vech_row_idx(P) # vech(Sigma.inv) iSigma <- lav_matrix_vech(Sigma.inv) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # subtract Mu Yc <- t(t(Y) - Mu) # dmu per case dmu <- matrix(as.numeric(NA), NY, P) # SC SC <- matrix(as.numeric(NA), nrow = NY, ncol = length(iSigma)) # for each pattern, compute Yc %*% sigma.inv for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # cases with this pattern case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) tmp <- matrix(0, P, P) tmp[var.idx, var.idx] <- sigma.inv isigma <- lav_matrix_vech(tmp) } else { sigma.inv <- Sigma.inv isigma <- iSigma } # compute dMu for all observations of this pattern dmu[case.idx, var.idx] <- Yc[case.idx, var.idx, drop = FALSE] %*% sigma.inv # postmultiply these cases with sigma.inv Yc[case.idx, var.idx] <- Yc[case.idx, var.idx] %*% sigma.inv # tcrossprod SC[case.idx, ] <- Yc[case.idx, idx1] * Yc[case.idx, idx2] # substract isigma from each row SC[case.idx, ] <- t(t(SC[case.idx, , drop = FALSE]) - isigma) } # adjust for vech SC[, lav_matrix_diagh_idx(P)] <- SC[, lav_matrix_diagh_idx(P)] / 2 out <- cbind(dmu, SC) # weights if (!is.null(wt)) { out <- out * wt } # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx(n = P, idx = x.idx, #diagonal = !correlation, add.idx.at.start = TRUE) out[, not.x] <- 0 } out } # 4) Hessian of logl lav_mvnorm_missing_logl_hessian_data <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { # missing patterns Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp, wt = wt) lav_mvnorm_missing_logl_hessian_samplestats( Yp = Yp, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) } lav_mvnorm_missing_logl_hessian_samplestats <- function(Yp = NULL, # wt not needed Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { pat.N <- length(Yp) P <- length(Yp[[1]]$var.idx) if (is.null(Sigma.inv)) { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } H11 <- matrix(0, P, P) H21 <- matrix(0, P * (P + 1) / 2, P) H22 <- matrix(0, P * (P + 1) / 2, P * (P + 1) / 2) # for each pattern, compute sigma.inv for (p in seq_len(pat.N)) { # observed variables var.idx <- Yp[[p]]$var.idx pat.freq <- Yp[[p]]$freq # missing values for this pattern na.idx <- which(!var.idx) # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } S.inv <- matrix(0, P, P) S.inv[var.idx, var.idx] <- sigma.inv tmp21 <- matrix(0, P, 1) tmp21[var.idx, 1] <- sigma.inv %*% (Yp[[p]]$MY - Mu[var.idx]) W.tilde <- Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx]) AAA <- (sigma.inv %*% (2 * W.tilde - Sigma[var.idx, var.idx, drop = FALSE]) %*% sigma.inv) tmp22 <- matrix(0, P, P) tmp22[var.idx, var.idx] <- AAA i11 <- S.inv i21 <- lav_matrix_duplication_pre(tmp21 %x% S.inv) i22 <- (1 / 2) * lav_matrix_duplication_pre_post(S.inv %x% tmp22) H11 <- H11 + pat.freq * i11 H21 <- H21 + pat.freq * i21 H22 <- H22 + pat.freq * i22 } H12 <- t(H21) out <- -1 * rbind( cbind(H11, H12), cbind(H21, H22) ) # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx(n = P, idx = x.idx, #diagonal = !correlation, add.idx.at.start = TRUE) out[, not.x] <- 0 out[not.x, ] <- 0 } out } # 5) Information # 5a: expected unit information Mu and vech(Sigma) # (only useful under MCAR) # (old term: Abeta, expected) lav_mvnorm_missing_information_expected <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, # unused Sigma = NULL, x.idx = integer(0L), Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) if (is.null(Sigma.inv)) { Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # N if (!is.null(wt)) { if (length(Mp$empty.idx) > 0L) { N <- sum(wt) - sum(wt[Mp$empty.idx]) } else { N <- sum(wt) } } else { N <- sum(Mp$freq) # removed empty cases! } I11 <- matrix(0, P, P) I22 <- matrix(0, P * (P + 1) / 2, P * (P + 1) / 2) # for each pattern, compute sigma.inv for (p in seq_len(Mp$npatterns)) { # observed variables var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) } else { sigma.inv <- Sigma.inv } S.inv <- matrix(0, P, P) S.inv[var.idx, var.idx] <- sigma.inv S2.inv <- 0.5 * lav_matrix_duplication_pre_post(S.inv %x% S.inv) if (!is.null(wt)) { FREQ <- sum(wt[Mp$case.idx[[p]]]) } else { FREQ <- Mp$freq[p] } I11 <- I11 + FREQ * S.inv I22 <- I22 + FREQ * S2.inv } out <- lav_matrix_bdiag(I11, I22) / N # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx(n = P, idx = x.idx, # diagonal = !correlation, add.idx.at.start = TRUE) out[not.x, ] <- 0 out[, not.x] <- 0 } out } # 5b: unit observed information Mu and vech(Sigma) from raw data # (old term: Abeta, observed) lav_mvnorm_missing_information_observed_data <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # N if (!is.null(wt)) { if (length(Mp$empty.idx) > 0L) { N <- sum(wt) - sum(wt[Mp$empty.idx]) } else { N <- sum(wt) } } else { N <- sum(Mp$freq) # removed empty cases! } # observed information observed <- lav_mvnorm_missing_logl_hessian_data( Y = Y, Mp = Mp, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) -observed / N } # 5b-bis: unit observed information Mu and vech(Sigma) from samplestats lav_mvnorm_missing_information_observed_samplestats <- function(Yp = NULL, # wt not needed Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { N <- sum(sapply(Yp, "[[", "freq")) # implicitly: removed empty cases! # observed information observed <- lav_mvnorm_missing_logl_hessian_samplestats( Yp = Yp, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) -observed / N } # 5c: unit first-order information Mu and vech(Sigma) from raw data # (old term: Bbeta) lav_mvnorm_missing_information_firstorder <- function(Y = NULL, Mp = NULL, wt = NULL, cluster.idx = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL) { # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } # N if (!is.null(wt)) { if (length(Mp$empty.idx) > 0L) { N <- sum(wt) - sum(wt[Mp$empty.idx]) } else { N <- sum(wt) } } else { N <- sum(Mp$freq) # removed empty cases! } SC <- lav_mvnorm_missing_scores_mu_vech_sigma( Y = Y, Mp = Mp, wt = wt, Mu = Mu, Sigma = Sigma, x.idx = x.idx, Sinv.method = Sinv.method, Sigma.inv = Sigma.inv ) # handle clustering if (!is.null(cluster.idx)) { # take the sum within each cluster SC <- rowsum(SC, group = cluster.idx, reorder = FALSE, na.rm = TRUE) # lower bias is number of clusters is not very high nC <- nrow(SC) correction.factor <- nC / (nC - 1) SC <- SC * sqrt(correction.factor) } lav_matrix_crossprod(SC) / N } # 5d: both unit first-order information and expected/observed information # from raw data, in one go for efficiency lav_mvnorm_missing_information_both <- function(Y = NULL, Mp = NULL, wt = NULL, cluster.idx = NULL, Mu = NULL, Sigma = NULL, x.idx = integer(0L), Sinv.method = "eigen", Sigma.inv = NULL, information = "observed") { P <- NCOL(Y) Mu <- as.numeric(Mu) if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # for the tcrossprod idx1 <- lav_matrix_vech_col_idx(P) idx2 <- lav_matrix_vech_row_idx(P) # vech(Sigma.inv) iSigma <- lav_matrix_vech(Sigma.inv) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } if (information == "observed") { Yp <- lav_samplestats_missing_patterns(Y = Y, Mp = Mp, wt = wt) } # N if (!is.null(wt)) { if (length(Mp$empty.idx) > 0L) { N <- sum(wt) - sum(wt[Mp$empty.idx]) } else { N <- sum(wt) } } else { N <- sum(Mp$freq) # removed empty cases! } # subtract Mu Yc <- t(t(Y) - Mu) # dmu per case dmu <- matrix(as.numeric(NA), nrow = NROW(Y), ncol = P) # SC SC <- matrix(as.numeric(NA), nrow = NROW(Y), ncol = length(iSigma)) # expected/observed information I11 <- matrix(0, P, P) I22 <- matrix(0, P * (P + 1) / 2, P * (P + 1) / 2) if (information == "observed") { I21 <- matrix(0, P * (P + 1) / 2, P) } # for each pattern, compute Yc %*% sigma.inv for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # missing values for this pattern na.idx <- which(!var.idx) # cases with this pattern case.idx <- Mp$case.idx[[p]] # invert Sigma for this pattern if (length(na.idx) > 0L) { sigma.inv <- lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ) tmp <- matrix(0, P, P) tmp[var.idx, var.idx] <- sigma.inv isigma <- lav_matrix_vech(tmp) } else { sigma.inv <- Sigma.inv isigma <- iSigma } # information S.inv <- matrix(0, P, P) S.inv[var.idx, var.idx] <- sigma.inv if (!is.null(wt)) { FREQ <- sum(wt[case.idx]) } else { FREQ <- Mp$freq[p] } if (information == "expected") { S2.inv <- 0.5 * lav_matrix_duplication_pre_post(S.inv %x% S.inv) I11 <- I11 + FREQ * S.inv I22 <- I22 + FREQ * S2.inv } else { pat.freq <- Yp[[p]]$freq tmp21 <- matrix(0, P, 1) tmp21[var.idx, 1] <- sigma.inv %*% (Yp[[p]]$MY - Mu[var.idx]) W.tilde <- Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx]) AAA <- (sigma.inv %*% (2 * W.tilde - Sigma[var.idx, var.idx, drop = FALSE]) %*% sigma.inv) tmp22 <- matrix(0, P, P) tmp22[var.idx, var.idx] <- AAA i11 <- S.inv i21 <- lav_matrix_duplication_pre(tmp21 %x% S.inv) i22 <- (1 / 2) * lav_matrix_duplication_pre_post(S.inv %x% tmp22) I11 <- I11 + pat.freq * i11 I21 <- I21 + pat.freq * i21 I22 <- I22 + pat.freq * i22 } # compute dMu for all observations of this pattern dmu[case.idx, var.idx] <- Yc[case.idx, var.idx, drop = FALSE] %*% sigma.inv # postmultiply these cases with sigma.inv Yc[case.idx, var.idx] <- Yc[case.idx, var.idx] %*% sigma.inv # tcrossprod SC[case.idx, ] <- Yc[case.idx, idx1] * Yc[case.idx, idx2] # substract isigma from each row SC[case.idx, ] <- t(t(SC[case.idx, , drop = FALSE]) - isigma) } # adjust for vech SC[, lav_matrix_diagh_idx(P)] <- SC[, lav_matrix_diagh_idx(P)] / 2 # add dmu SC <- cbind(dmu, SC) # weights if (!is.null(wt)) { SC <- SC * wt } # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx(n = P, idx = x.idx, #diagonal = !correlation, add.idx.at.start = TRUE) SC[, not.x] <- 0 } # handle clustering if (!is.null(cluster.idx)) { # take the sum within each cluster SC <- rowsum(SC, group = cluster.idx, reorder = FALSE, na.rm = TRUE) # lower bias is number of clusters is not very high nC <- nrow(SC) correction.factor <- nC / (nC - 1) SC <- SC * sqrt(correction.factor) } # first order information Bbeta <- lav_matrix_crossprod(SC) / N # expected/observed information if (information == "expected") { Abeta <- lav_matrix_bdiag(I11, I22) / N } else { Abeta <- rbind( cbind(I11, t(I21)), cbind(I21, I22) ) / N } # fixed.x? if (length(x.idx) > 0L) { not.x <- lav_matrix_vech_which_idx(n = P, idx = x.idx, # diagonal = !correlation, add.idx.at.start = TRUE) Abeta[not.x, ] <- 0 Abeta[, not.x] <- 0 } list(Abeta = Abeta, Bbeta = Bbeta) } # 6) inverted information h0 mu + vech(Sigma) # 6a: (unit) inverted expected information # NOT USED: is not equal to solve(expected) # (although it does converge to the same solution eventually) # lav_mvnorm_missing_inverted_information_expected <- function(Y = NULL, # Mp = NULL, # Mu = NULL,# unused # Sigma = NULL) { # P <- NCOL(Y) # # # missing patterns # if(is.null(Mp)) { # Mp <- lav_data_missing_patterns(Y) # } # # # N # N <- sum(Mp$freq) # removed empty cases! # # I11 <- matrix(0, P, P) # I22 <- matrix(0, P*(P+1)/2, P*(P+1)/2) # # # for each pattern # for(p in seq_len(Mp$npatterns)) { # # # observed variables # var.idx <- Mp$pat[p,] # # sigma <- matrix(0, P, P) # sigma[var.idx, var.idx] <- Sigma[var.idx, var.idx] # sigma2 <- 2 * lav_matrix_duplication_ginv_pre_post(sigma %x% sigma) # # I11 <- I11 + Mp$freq[p] * sigma # I22 <- I22 + Mp$freq[p] * sigma2 # } # # lav_matrix_bdiag(I11, I22)/N # } # 6b: / # 6c: / # 7) ACOV h0 mu + vech(Sigma) # 7a: 1/N * inverted expected information # 7b: 1/N * inverted observed information # 7c: 1/N * inverted first-order information # 7d: sandwich acov # 10) other stuff # single imputation missing cells, under the normal model, pattern-based # FIXME: add wt lav_mvnorm_missing_impute_pattern <- function(Y = NULL, Mp = NULL, Mu = NULL, Sigma = NULL, Sigma.inv = NULL, Sinv.method = "eigen") { Mu <- as.numeric(Mu) # complete data Y.complete <- Y # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # subtract Mu Yc <- t(t(Y) - Mu) # fill in data per pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # if complete, nothing to do if (all(var.idx)) { next } # missing values for this pattern na.idx <- which(!var.idx) # extract observed data for these (centered) cases Oc <- Yc[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] # invert Sigma (Sigma_22, observed part only) for this pattern Sigma_22.inv <- try( lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ), silent = TRUE ) if (inherits(Sigma_22.inv, "try-error")) { lav_msg_stop(gettext("Sigma_22.inv cannot be inverted")) } # estimate missing values in this pattern Sigma_12 <- Sigma[!var.idx, var.idx, drop = FALSE] Y.missing <- t(Sigma_12 %*% Sigma_22.inv %*% t(Oc) + Mu[!var.idx]) # complete data for this pattern Y.complete[Mp$case.idx[[p]], !var.idx] <- Y.missing } Y.complete } # E-step: expectations of sum, sum of squares, sum of crossproducts # plus correction lav_mvnorm_missing_estep <- function(Y = NULL, Mp = NULL, wt = NULL, Mu = NULL, Sigma = NULL, Sigma.inv = NULL, Sinv.method = "eigen") { P <- NCOL(Y) Mu <- as.numeric(Mu) # missing patterns if (is.null(Mp)) { Mp <- lav_data_missing_patterns(Y) } if (is.null(Sigma.inv)) { # invert Sigma Sigma.inv <- lav_matrix_symmetric_inverse( S = Sigma, logdet = FALSE, Sinv.method = Sinv.method ) } # T1, T2 T1 <- numeric(P) T2 <- matrix(0, P, P) # update T1 and T2 per pattern for (p in seq_len(Mp$npatterns)) { # observed values for this pattern var.idx <- Mp$pat[p, ] # extract observed data O <- Y[Mp$case.idx[[p]], Mp$pat[p, ], drop = FALSE] # if complete, just compute first and second moments if (all(var.idx)) { if (!is.null(wt)) { WT <- wt[Mp$case.idx[[p]]] T1 <- T1 + colSums(WT * O) T2 <- T2 + crossprod(sqrt(WT) * O) } else { # complete pattern T1 <- T1 + colSums(O) T2 <- T2 + crossprod(O) } next } # missing values for this pattern na.idx <- which(!var.idx) # partition Sigma (1=missing, 2=complete) Sigma_11 <- Sigma[!var.idx, !var.idx, drop = FALSE] Sigma_12 <- Sigma[!var.idx, var.idx, drop = FALSE] Sigma_21 <- Sigma[var.idx, !var.idx, drop = FALSE] # invert Sigma (Sigma_22, observed part only) for this pattern Sigma_22.inv <- try( lav_matrix_symmetric_inverse_update( S.inv = Sigma.inv, rm.idx = na.idx, logdet = FALSE ), silent = TRUE ) if (inherits(Sigma_22.inv, "try-error")) { lav_msg_stop(gettext("Sigma_22.inv cannot be inverted")) } # estimate missing values in this pattern Oc <- t(t(O) - Mu[var.idx]) Y.missing <- t(Sigma_12 %*% Sigma_22.inv %*% t(Oc) + Mu[!var.idx]) # complete data for this pattern Y.complete <- matrix(0, Mp$freq[[p]], P) Y.complete[, var.idx] <- O Y.complete[, !var.idx] <- Y.missing if (!is.null(wt)) { WT <- wt[Mp$case.idx[[p]]] T1.pat <- colSums(WT * Y.complete) T2.pat <- crossprod(sqrt(WT) * Y.complete) } else { # 1. SUM `completed' pattern T1.pat <- colSums(Y.complete) # 2. CROSSPROD `completed' pattern T2.pat <- crossprod(Y.complete) } # correction for missing cells: conditional covariances T2.p11 <- Sigma_11 - (Sigma_12 %*% Sigma_22.inv %*% Sigma_21) if (!is.null(wt)) { T2.pat[!var.idx, !var.idx] <- T2.pat[!var.idx, !var.idx] + (T2.p11 * sum(WT)) } else { T2.pat[!var.idx, !var.idx] <- T2.pat[!var.idx, !var.idx] + (T2.p11 * Mp$freq[[p]]) } # accumulate T1 <- T1 + T1.pat T2 <- T2 + T2.pat } list(T1 = T1, T2 = T2) } lavaan/R/ctr_mplus2lavaan.R0000644000176200001440000014070214627656441015340 0ustar liggesusers# this code is written by Michael Hallquist # First draft of parser to convert Mplus model syntax to lavaan model syntax # idea: build parTable and run model from mplus syntax # then perhaps write export function: parTable2Mplus # and/or parTable2lavaan trimSpace <- function(string) { stringTrim <- sapply(string, function(x) { x <- sub("^\\s*", "", x, perl = TRUE) x <- sub("\\s*$", "", x, perl = TRUE) return(x) }, USE.NAMES = FALSE) return(stringTrim) } # small utility function to join strings in a regexp loop joinRegexExpand <- function(cmd, argExpand, matches, iterator, matchLength = "match.length") { if (iterator == 1 && matches[iterator] > 1) { pre <- substr(cmd, 1, matches[iterator] - 1) } else { pre <- "" } # if this is not the final match, then get sub-string between the end of this match and the beginning of the next # otherwise, match to the end of the command post.end <- ifelse(iterator < length(matches), matches[iterator + 1] - 1, nchar(cmd)) post <- substr(cmd, matches[iterator] + attr(matches, matchLength)[iterator], post.end) cmd.expand <- paste(pre, argExpand, post, sep = "") return(cmd.expand) } # expand Mplus hyphen syntax (will also expand constraints with hyphens) expandCmd <- function(cmd, alphaStart = TRUE) { # use negative lookahead and negative lookbehind to eliminate possibility of hyphen being used as a negative starting value (e.g., x*-1) # also avoid match of anything that includes a decimal point, such as a floating-point starting value -10.5*x1 # if alphaStart==TRUE, then require that the matches before and after hyphens begin with alpha character # this is used for variable names, whereas the more generic expansion works for numeric constraints and such # need to do a better job of this so that u1-u20* is supported... I don't think the regexp below is general enough # if (alphaStart) { # hyphens <- gregexpr("[_A-Za-z]+\\w*\\s*-\\s*[_A-Za-z]+\\w*", cmd, perl=TRUE)[[1]] # } else { # hyphens <- gregexpr("(?!<(\\*|\\.))\\w+(?!(\\*|\\.))\\s*-\\s*(?!<(\\*|\\.))\\w+(?!(\\*|\\.))", cmd, perl=TRUE)[[1]] # } # hyphens <- gregexpr("(?!<(\\*|\\.))\\w+(?!(\\*|\\.))\\s*-\\s*(?!<(\\*|\\.))\\w+(?!(\\*|\\.))", cmd, perl=TRUE)[[1]] # support trailing @XXX. Still still fail on Trait1-Trait3*XXX hyphens <- gregexpr("(?!<(\\*|\\.))\\w+(?!(\\*|\\.))\\s*-\\s*(?!<(\\*|\\.))\\w+(?!(\\*|\\.))(@[\\d\\.\\-]+)?", cmd, perl = TRUE)[[1]] # Promising, but this is still failing in the case of x3*1 -4.25*x4 # On either side of a hyphen, require alpha character followed by alphanumeric # This enforces that neither side of the hyphen can be a number # Alternatively, match digits on either side alone # hyphens <- gregexpr("([A-z]+\\w*\\s*-\\s*[A-z]+\\w*(@[\\d\\.-]+)?|\\d+\\s*-\\s*\\d+)", cmd, perl=TRUE)[[1]] if (hyphens[1L] > 0) { cmd.expand <- c() ep <- 1 for (v in 1:length(hyphens)) { # match one keyword before and after hyphen argsplit <- strsplit(substr(cmd, hyphens[v], hyphens[v] + attr(hyphens, "match.length")[v] - 1), "\\s*-\\s*", perl = TRUE)[[1]] v_pre <- argsplit[1] v_post <- argsplit[2] v_post.suffix <- sub("^([^@]+)(@[\\d\\-.]+)?$", "\\2", v_post, perl = TRUE) # will be empty string if not present v_post <- sub("@[\\d\\-.]+$", "", v_post, perl = TRUE) # trim @ suffix # If v_pre and v_post contain leading alpha characters, verify that these prefixes match. # Otherwise, there is nothing to expand, as in the case of MODEL CONSTRAINT: e1e2=e1-e2_n. v_pre.alpha <- sub("\\d+$", "", v_pre, perl = TRUE) v_post.alpha <- sub("\\d+$", "", v_post, perl = TRUE) # only enforce prefix match if we have leading alpha characters (i.e., not simple numeric 1 - 3 syntax) if (length(v_pre.alpha) > 0L && length(v_post.alpha) > 0L) { # if alpha prefixes do match, assume that the hyphen is not for expansion (e.g., in subtraction case) if (v_pre.alpha != v_post.alpha) { return(cmd) } } # the basic positive lookbehind blows up with pure numeric constraints (1 - 3) because no alpha char precedes digit # can use an non-capturing alternation grouping to allow for digits only or the final digits after alphas (as in v_post.num) v_pre.num <- as.integer(sub("\\w*(?<=[A-Za-z_])(\\d+)$", "\\1", v_pre, perl = TRUE)) # use positive lookbehind to avoid greedy \w+ match -- capture all digits v_post.match <- regexpr("^(?:\\w*(?<=[A-Za-z_])(\\d+)|(\\d+))$", v_post, perl = TRUE) stopifnot(v_post.match[1L] > 0) # match mat be under capture[1] or capture[2] because of alternation above whichCapture <- which(attr(v_post.match, "capture.start") > 0) v_post.num <- as.integer(substr(v_post, attr(v_post.match, "capture.start")[whichCapture], attr(v_post.match, "capture.start")[whichCapture] + attr(v_post.match, "capture.length")[whichCapture] - 1)) v_post.prefix <- substr(v_post, 1, attr(v_post.match, "capture.start")[whichCapture] - 1) # just trusting that pre and post match if (is.na(v_pre.num) || is.na(v_post.num)) lav_msg_stop( gettext("Cannot expand variables:"), v_pre, ", ", v_post) v_expand <- paste(v_post.prefix, v_pre.num:v_post.num, v_post.suffix, sep = "", collapse = " ") # for first hyphen, there may be non-hyphenated syntax preceding the initial match cmd.expand[ep] <- joinRegexExpand(cmd, v_expand, hyphens, v) # This won't really work because the cmd.expand element may contain other variables # that are at the beginning or end, prior to hyphen stuff # This is superseded by logic above where @ is included in hyphen match, then trapped as suffix # I don't think it will work yet for this Mplus syntax: y1-y10*5 -- the 5 wouldn't propagate # handle the case of @ fixed values or * starting values used in a list # example: Trait1-Trait3@1 ## if (grepl("@|\\*", cmd.expand[ep], perl=TRUE)) { ## exp_split <- strsplit(cmd.expand[ep], "\\s+", perl=TRUE)[[1]] ## suffixes <- sub("^([^@\\*]+)([@*][\\d\\.-]+)?$", "\\2", exp_split, perl=TRUE) ## variables <- sub("^([^@\\*]+)([@*][\\d\\.-]+)?$", "\\1", exp_split, perl=TRUE) ## suffixes <- suffixes[suffixes != ""] ## if (length(unique(suffixes)) > 1L) { ## browser() ## #stop("Don't know how to interpret syntax: ", cmd) ## } else { ## variables <- paste0(variables, suffixes[1]) ## cmd.expand[ep] <- paste(variables, collapse=" ") ## } ## } ep <- ep + 1 } return(paste(cmd.expand, collapse = "")) } else { return(cmd) # no hyphens to expand } } # handle starting values and fixed parameters on rhs parseFixStart <- function(cmd) { cmd.parse <- c() ep <- 1L # support ESEM-like syntax: F BY a1* a2* # The easy path: putting in 1s before we proceed on parsing # Mar2023 bugfix: support parenthesis after * in case a parameter constraint comes next cmd <- gsub("([A-z]+\\w*)\\s*\\*(?=\\s+\\(?[A-z]+|\\s*$)", "\\1*1", cmd, perl = TRUE) if ((fixed.starts <- gregexpr("[\\w\\.\\-$]+\\s*([@*])\\s*[\\w\\.\\-]+", cmd, perl = TRUE)[[1]])[1L] > 0) { # shouldn't it be \\*, not * ?! Come back to this. for (f in 1:length(fixed.starts)) { # capture above obtains the fixed/start character (@ or *), whereas match obtains the full regex match opchar <- substr(cmd, attr(fixed.starts, "capture.start")[f], attr(fixed.starts, "capture.start")[f] + attr(fixed.starts, "capture.length")[f] - 1) # match arguments around asterisk/at symbol argsplit <- strsplit(substr(cmd, fixed.starts[f], fixed.starts[f] + attr(fixed.starts, "match.length")[f] - 1), paste0("\\s*", ifelse(opchar == "*", "\\*", opchar), "\\s*"), perl = TRUE)[[1]] v_pre <- argsplit[1] v_post <- argsplit[2] if (suppressWarnings(is.na(as.numeric(v_pre)))) { # fixed.starts value post-multiplier var <- v_pre val <- v_post } else if (suppressWarnings(is.na(as.numeric(v_post)))) { # starting value pre-multiplier var <- v_post val <- v_pre } else { lav_msg_stop( gettext("Cannot parse Mplus fixed/starts values specification:"), v_pre, v_post) } if (opchar == "@") { cmd.parse[ep] <- joinRegexExpand(cmd, paste0(val, "*", var, sep = ""), fixed.starts, f) ep <- ep + 1L } else { cmd.parse[ep] <- joinRegexExpand(cmd, paste0("start(", val, ")*", var, sep = ""), fixed.starts, f) ep <- ep + 1L } } return(paste(cmd.parse, collapse = "")) } else { return(cmd) } } parseConstraints <- function(cmd) { # Allow cmd to have newlines embedded. In this case, split on newlines, and loop over and parse each chunk # Dump leading and trailing newlines, which contain no information about constraints, but may add dummy elements to vector after strsplit # Maybe return LHS and RHS parsed command where constraints only appear on the RHS, whereas the LHS contains only parameters. # Example: LHS is v1 v2 v3 and RHS is con1*v1 con2*v2 con3*v3 cmd.split <- strsplit(cmd, "\n")[[1]] # drop empty lines (especially leading newline) cmd.split <- if (length(emptyPos <- which(cmd.split == "")) > 0L) { cmd.split[-1 * emptyPos] } else { cmd.split } # Create a version of the command with no modifiers (constraints, starting values, etc.) specifications. # This is useful for syntax that uses the params on the LHS and with a modified RHS. Example: v1 ~~ conB*v1 cmd.nomodifiers <- paste0(gsub("(start\\([^\\)]+\\)\\*|[\\d\\-\\.]+\\*)", "", cmd.split, perl = TRUE), collapse = " ") # peel off premultiplication cmd.nomodifiers <- gsub("\\([^\\)]+\\)", "", cmd.nomodifiers, perl = TRUE) cmd.tojoin <- c() # will store all chunks divided by newlines, which will be joined at the end. # iterate over each newline segment for (n in 1:length(cmd.split)) { # in principle, now that we respect newlines, parens should only be of length 1, since Mplus syntax dictates newlines for each use of parentheses for constraints if ((parens <- gregexpr("(? 0) { # match parentheses, but not start() # the syntax chunk after all parentheses have been matched cmd.expand <- c() for (p in 1:length(parens)) { # string within the constraint parentheses constraints <- substr(cmd.split[n], attr(parens, "capture.start")[p], attr(parens, "capture.start")[p] + attr(parens, "capture.length")[p] - 1) # Divide constraints on spaces to determine number of constraints to parse. Use trimSpace to avoid problem of user including leading/trailing spaces within parentheses. con.split <- strsplit(trimSpace(constraints), "\\s+", perl = TRUE)[[1]] # if Mplus uses a purely numeric constraint, then add ".con" prefix to be consistent with R naming. con.split <- sapply(con.split, function(x) { if (!suppressWarnings(is.na(as.numeric(x)))) { make.names(paste0(".con", x)) } else { x } }) # determine the parameters that precede the parentheses (either first character for p == 1 or character after preceding parentheses) prestrStart <- ifelse(p > 1, attr(parens, "capture.start")[p - 1] + attr(parens, "capture.length")[p - 1] + 1, 1) # obtain the parameters that precede the parentheses, divide into arguments on spaces # use trimSpace here because first char after prestrStart for p > 1 will probably be a space precmd.split <- strsplit(trimSpace(substr(cmd.split[n], prestrStart, parens[p] - 1)), "\\s+", perl = TRUE)[[1]] # peel off any potential LHS arguments, such as F1 BY precmdLHSOp <- which(tolower(precmd.split) %in% c("by", "with", "on")) if (any(precmdLHSOp)) { lhsop <- paste0(precmd.split[1:precmdLHSOp[1L]], " ", collapse = " ") # join lhs and op as a single string, add trailing space so that paste with expanded RHS is right. rhs <- precmd.split[(precmdLHSOp + 1):length(precmd.split)] } else { lhsop <- "" rhs <- precmd.split } if (length(con.split) > 1L) { # several constraints listed within parentheses. Example: F1 BY X1 X2 X3 X4 (C2 C3 C4) # thus, backwards match the constraints to parameters # restrict parameters to backwards match to be of the same length as number of constraints rhs.backmatch <- rhs[(length(rhs) - length(con.split) + 1):length(rhs)] rhs.expand <- c() # check that no mean or scale markers are part of the rhs param to expand if ((preMark.match <- regexpr("^\\s*[\\[\\{]", rhs.backmatch[1L], perl = TRUE))[1L] > 0) { preMark <- substr(rhs.backmatch[1L], preMark.match[1L], preMark.match[1L] + attr(preMark.match, "match.length")[1L] - 1) rhs.backmatch[1L] <- substr(rhs.backmatch[1L], preMark.match[1L] + attr(preMark.match, "match.length")[1L], nchar(rhs.backmatch[1L])) } else { preMark <- "" } if ((postMark.match <- regexpr("[\\]\\}]\\s*$", rhs.backmatch[length(rhs.backmatch)], perl = TRUE))[1L] > 0) { postMark <- substr(rhs.backmatch[length(rhs.backmatch)], postMark.match[1L], nchar(rhs.backmatch[length(rhs.backmatch)])) rhs.backmatch[length(rhs.backmatch)] <- substr(rhs.backmatch[length(rhs.backmatch)], 1, postMark.match[1L] - 1) } else { postMark <- "" } # pre-multiply each parameter with each corresponding constraint for (i in 1:length(rhs.backmatch)) { rhs.expand[i] <- paste0(con.split[i], "*", rhs.backmatch[i]) } # join rhs as string and add back in mean/scale operator, if present rhs.expand <- paste0(preMark, paste(rhs.expand, collapse = " "), postMark) # if there were params that preceded the backwards match, then add these back to the syntax # append this syntax to the parsed command, cmd.expand if (length(rhs) - length(con.split) > 0L) { cmd.expand <- c(cmd.expand, paste(lhsop, paste(rhs[1:(length(rhs) - length(con.split))], collapse = " "), rhs.expand)) } else { cmd.expand <- c(cmd.expand, paste0(lhsop, rhs.expand)) } } else { # should be able to reduce redundancy with above # all parameters on the right hand side are to be equated # thus, pre-multiply each parameter by the constraint # check that no mean or scale markers are part of the rhs param to expand # DUPE CODE FROM ABOVE. Make Function?! if ((preMark.match <- regexpr("^\\s*[\\[\\{]", rhs[1L], perl = TRUE))[1L] > 0) { preMark <- substr(rhs[1L], preMark.match[1L], preMark.match[1L] + attr(preMark.match, "match.length")[1L] - 1) rhs[1L] <- substr(rhs[1L], preMark.match[1L] + attr(preMark.match, "match.length")[1L], nchar(rhs[1L])) } else { preMark <- "" } if ((postMark.match <- regexpr("[\\]\\}]\\s*$", rhs[length(rhs)], perl = TRUE))[1L] > 0) { postMark <- substr(rhs[length(rhs)], postMark.match[1L], nchar(rhs[length(rhs)])) rhs[length(rhs)] <- substr(rhs[length(rhs)], 1, postMark.match[1L] - 1) } else { postMark <- "" } rhs.expand <- c() for (i in 1:length(rhs)) { rhs.expand[i] <- paste0(con.split[1L], "*", rhs[i]) } # join rhs as string rhs.expand <- paste0(preMark, paste(rhs.expand, collapse = " "), postMark) cmd.expand <- c(cmd.expand, paste0(lhsop, rhs.expand)) } } cmd.tojoin[n] <- paste(cmd.expand, collapse = " ") } else { cmd.tojoin[n] <- cmd.split[n] } # no parens } # eliminate newlines in this function so that they don't mess up \\s+ splits downstream toReturn <- paste(cmd.tojoin, collapse = " ") attr(toReturn, "noModifiers") <- cmd.nomodifiers return(toReturn) } expandGrowthCmd <- function(cmd) { # can assume that any spaces between tscore and variable were stripped by parseFixStart # verify that this is not a random slope if (any(tolower(strsplit(cmd, "\\s+", perl = TRUE)[[1]]) %in% c("on", "at"))) { lav_msg_stop(gettext( "lavaan does not support random slopes or individually varying growth model time scores")) } cmd.split <- strsplit(cmd, "\\s*\\|\\s*", perl = TRUE)[[1]] if (!length(cmd.split) == 2) { lav_msg_stop(gettext("Unknown growth syntax:"), cmd) } lhs <- cmd.split[1] lhs.split <- strsplit(lhs, "\\s+", perl = TRUE)[[1]] rhs <- cmd.split[2] rhs.split <- strsplit(rhs, "(\\*|\\s+)", perl = TRUE)[[1]] if (length(rhs.split) %% 2 != 0) { lav_msg_stop(gettext( "Number of variables and number of tscores does not match:"), rhs) } tscores <- as.numeric(rhs.split[1:length(rhs.split) %% 2 != 0]) # pre-multipliers vars <- rhs.split[1:length(rhs.split) %% 2 == 0] cmd.expand <- c() for (p in 0:(length(lhs.split) - 1)) { if (p == 0) { # intercept cmd.expand <- c(cmd.expand, paste(lhs.split[(p + 1)], "=~", paste("1*", vars, sep = "", collapse = " + "))) } else { cmd.expand <- c(cmd.expand, paste(lhs.split[(p + 1)], "=~", paste(tscores^p, "*", vars, sep = "", collapse = " + "))) } } return(cmd.expand) } # function to wrap long lines at a certain width, splitting on + symbols to be consistent with R syntax wrapAfterPlus <- function(cmd, width = 90, exdent = 5) { result <- lapply(cmd, function(line) { if (nchar(line) > width) { split <- c() spos <- 1L plusMatch <- gregexpr("+", line, fixed = TRUE)[[1]] mpos <- 1L if (plusMatch[1L] > 0L) { # split after plus symbol charsRemain <- nchar(line) while (charsRemain > 0L) { toProcess <- substr(line, nchar(line) - charsRemain + 1, nchar(line)) offset <- nchar(line) - charsRemain + 1 if (nchar(remainder <- substr(line, offset, nchar(line))) <= (width - exdent)) { # remainder of line fits within width -- no need to continue wrapping split[spos] <- remainder charsRemain <- 0 } else { wrapAt <- which(plusMatch < (width + offset - exdent)) wrapAt <- wrapAt[length(wrapAt)] # at the final + split[spos] <- substr(line, offset, plusMatch[wrapAt]) charsRemain <- charsRemain - nchar(split[spos]) spos <- spos + 1 } } # remove leading and trailing chars split <- trimSpace(split) # handle exdent split <- sapply(1:length(split), function(x) { if (x > 1) { paste0(paste(rep(" ", exdent), collapse = ""), split[x]) } else { split[x] } }) return(split) } else { return(strwrap(line, width = width, exdent = exdent)) # convention strwrap when no + present } } else { return(line) } }) # bind together multi-line expansions into single vector return(unname(do.call(c, result))) } mplus2lavaan.constraintSyntax <- function(syntax) { # should probably pass in model syntax along with some tracking of which parameter labels are defined. # convert MODEL CONSTRAINT section to lavaan model syntax syntax <- paste(lapply(trimSpace(strsplit(syntax, "\n")), function(x) { if (length(x) == 0L && is.character(x)) "" else x }), collapse = "\n") # replace ! with # for comment lines. Also strip newline and replace with semicolon syntax <- gsub("(\\s*)!(.+)\n", "\\1#\\2;", syntax, perl = TRUE) # split into vector of strings # need to peel off leading or trailing newlines -- leads to parsing confusion downstream otherwise syntax.split <- gsub("(^\n|\n$)", "", unlist(strsplit(syntax, ";")), perl = TRUE) constraint.out <- c() # TODO: Handle PLOT and LOOP syntax for model constraints. # TODO: Handle DO loop convention # first parse new parameters defined in MODEL CONSTRAINT into a vector new.parameters <- c() # parameters that are defined in constraint section if (length(new.con.lines <- grep("^\\s*NEW\\s*\\([^\\)]+\\)", syntax.split, perl = TRUE, ignore.case = TRUE)) > 0L) { for (cmd in syntax.split[new.con.lines]) { # process new constraint definition new.con <- regexpr("^\\s*NEW\\s*\\(([^\\)]+)\\)", cmd, perl = TRUE, ignore.case = TRUE) if (new.con[1L] == -1) lav_msg_stop(gettext("Unable to parse names of new contraints")) new.con <- substr(cmd, attr(new.con, "capture.start"), attr(new.con, "capture.start") + attr(new.con, "capture.length") - 1L) new.con <- expandCmd(new.con) # allow for hyphen expansion new.parameters <- c(new.parameters, strsplit(trimSpace(new.con), "\\s+", perl = TRUE)[[1L]]) } syntax.split <- syntax.split[-1L * new.con.lines] # drop out these lines parameters.undefined <- new.parameters # to be used below to handle ambiguity of equation versus definition } for (cmd in syntax.split) { if (grepl("^\\s*#", cmd, perl = TRUE)) { # comment line constraint.out <- c(constraint.out, gsub("\n", "", cmd, fixed = TRUE)) # drop any newlines } else if (grepl("^\\s+$", cmd, perl = TRUE)) { # do nothing, just a space line } else { # constraint proper cmd <- gsub("**", "^", cmd, fixed = TRUE) # handle exponent # lower case the math operations supported by Mplus to be consistent with R # match all math operators, then lower case each and rejoin string maths <- gregexpr("(SQRT|LOG|LOG10|EXP|ABS|SIN|COS|TAN|ASIN|ACOS|ATAN)\\s*\\(", cmd, perl = TRUE)[[1L]] if (maths[1L] > 0) { maths.replace <- c() ep <- 1 for (i in 1:length(maths)) { operator <- tolower(substr(cmd, attr(maths, "capture.start")[i], attr(maths, "capture.start")[i] + attr(maths, "capture.length")[i] - 1)) maths.replace[ep] <- joinRegexExpand(cmd, operator, maths, i, matchLength = "capture.length") # only match operator, not opening ( ep <- ep + 1 } cmd <- paste(maths.replace, collapse = "") } # equating some lhs and rhs: could reflect definition of new parameter if ((equals <- regexpr("=", cmd, fixed = TRUE))[1L] > 0) { lhs <- trimSpace(substr(cmd, 1, equals - 1)) rhs <- trimSpace(substr(cmd, equals + attr(equals, "match.length"), nchar(cmd))) # possibility of lhs or rhs containing the single variable to be equated if (regexpr("\\s+", lhs, perl = TRUE)[1L] > 0L) { def <- rhs body <- lhs } else if (regexpr("\\s+", rhs, perl = TRUE)[1L] > 0L) { def <- lhs body <- rhs } else { # warning("Can't figure out which side of constraint defines a parameter") # this would occur for simple rel5 = rel2 sort of syntax def <- lhs body <- rhs } # must decide whether this is a new parameter (:=) or equation of exising labels (==) # alternatively, could be zero, as in 0 = x + y # this is tricky, because mplus doesn't differentiate definition from equation # consequently, could confuse the issue as in ex5.20 # NEW(rel2 rel5 stan3 stan6); # rel2 = lam2**2*vf1/(lam2**2*vf1 + ve2); # rel5 = lam5**2*vf2/(lam5**2*vf2 + ve5); # rel5 = rel2; # for now, only define a new constraint if it's not already defined # otherwise equate if (def %in% new.parameters && def %in% parameters.undefined) { constraint.out <- c(constraint.out, paste(def, ":=", body)) parameters.undefined <- parameters.undefined[!parameters.undefined == def] } else { constraint.out <- c(constraint.out, paste(def, "==", body)) } } else { # inequality constraints -- paste as is constraint.out <- c(constraint.out, cmd) } } } wrap <- paste(wrapAfterPlus(constraint.out, width = 90, exdent = 5), collapse = "\n") return(wrap) } mplus2lavaan.modelSyntax <- function(syntax) { if (is.character(syntax)) { if (length(syntax) > 1L) { syntax <- paste(syntax, collapse = "\n") } # concatenate into a long string separated by newlines } else { lav_msg_stop(gettext( "mplus2lavaan.modelSyntax accepts a single character string or character vector containing all model syntax")) } # because this is now exposed as a function in the package, handle the case of the user passing in full .inp file as text # we should only be interested in the MODEL and MODEL CONSTRAINT sections by_line <- strsplit(syntax, "\r?\n", perl = TRUE)[[1]] inputHeaders <- grep("^\\s*(title:|data.*:|variable:|define:|analysis:|model.*:|output:|savedata:|plot:|montecarlo:)", by_line, ignore.case = TRUE, perl = TRUE) con_syntax <- c() if (length(inputHeaders) > 0L) { # warning("mplus2lavaan.modelSyntax is intended to accept only the model section, not an entire .inp file. For the .inp file case, use mplus2lavaan") parsed_syntax <- divideInputIntoSections(by_line, "local") # handle model constraint if ("model.constraint" %in% names(parsed_syntax)) { con_syntax <- strsplit(mplus2lavaan.constraintSyntax(parsed_syntax$model.constraint), "\n")[[1]] } # just keep model syntax before continuing syntax <- parsed_syntax$model } # initial strip of leading/trailing whitespace, which can interfere with splitting on spaces # strsplit generates character(0) for empty strings, which causes problems in paste because paste actually includes it as a literal # example: paste(list(character(0), "asdf", character(0)), collapse=" ") # thus, use lapply to convert these to empty strings first syntax <- paste(lapply(trimSpace(strsplit(syntax, "\n")), function(x) { if (length(x) == 0L && is.character(x)) "" else x }), collapse = "\n") # replace ! with # for comment lines. Also strip newline and replace with semicolon syntax <- gsub("(\\s*)!(.+)\n*", "\\1#\\2;", syntax, perl = TRUE) # new direction: retain newlines in parsed syntax until after constraints have been parsed # delete newlines # syntax <- gsub("\n", "", syntax, fixed=TRUE) # replace semicolons with newlines prior to split (divide into commands) # syntax <- gsub(";", "\n", syntax, fixed=TRUE) # split into vector of strings # syntax.split <- unlist( strsplit(syntax, "\n") ) syntax.split <- trimSpace(unlist(strsplit(syntax, ";"))) # format of parTable to mimic. # 'data.frame': 34 obs. of 12 variables: # $ id : int 1 2 3 4 5 6 7 8 9 10 ... # $ lhs : chr "ind60" "ind60" "ind60" "dem60" ... # $ op : chr "=~" "=~" "=~" "=~" ... # $ rhs : chr "x1" "x2" "x3" "y1" ... # $ user : int 1 1 1 1 1 1 1 1 1 1 ... # $ group : int 1 1 1 1 1 1 1 1 1 1 ... # $ free : int 0 1 2 0 3 4 5 0 6 7 ... # $ ustart: num 1 NA NA 1 NA NA NA 1 NA NA ... # $ exo : int 0 0 0 0 0 0 0 0 0 0 ... # $ label : chr "" "" "" "" ... # $ eq.id : int 0 0 0 0 0 0 0 0 0 0 ... # $ unco : int 0 1 2 0 3 4 5 0 6 7 ... # vector of lavaan syntax lavaan.out <- c() for (cmd in syntax.split) { if (grepl("^\\s*#", cmd, perl = TRUE)) { # comment line lavaan.out <- c(lavaan.out, gsub("\n", "", cmd, fixed = TRUE)) # drop any newlines (otherwise done by parseConstraints) } else if (grepl("^\\s*$", cmd, perl = TRUE)) { # do nothing, just a space or blank line } else { # hyphen expansion cmd <- expandCmd(cmd) # parse fixed parameters and starting values cmd <- parseFixStart(cmd) # parse any constraints here (avoid weird logic below) cmd <- parseConstraints(cmd) if ((op <- regexpr("\\s+(by|on|with|pwith)\\s+", cmd, ignore.case = TRUE, perl = TRUE))[1L] > 0) { # regressions, factors, covariances lhs <- substr(cmd, 1, op - 1) # using op takes match.start which will omit spaces before operator rhs <- substr(cmd, op + attr(op, "match.length"), nchar(cmd)) operator <- tolower(substr(cmd, attr(op, "capture.start"), attr(op, "capture.start") + attr(op, "capture.length") - 1)) if (operator == "by") { lav.operator <- "=~" } else if (operator == "with" || operator == "pwith") { lav.operator <- "~~" } else if (operator == "on") { lav.operator <- "~" } # handle parameter combinations lhs.split <- strsplit(lhs, "\\s+")[[1]] # trimSpace( # handle pwith syntax if (operator == "pwith") { # TODO: Figure out if pwith can be paired with constraints? rhs.split <- strsplit(rhs, "\\s+")[[1]] # trimSpace( if (length(lhs.split) != length(rhs.split)) { browser() lav_msg_stop(gettext( "PWITH command does not have the same number of arguments on the left and right sides.")) } cmd <- sapply(1:length(lhs.split), function(i) paste(lhs.split[i], lav.operator, rhs.split[i])) } else { # insert plus signs on the rhs as long as it isn't preceded or followed by a plus already rhs <- gsub("(? 1L) { # expand using possible combinations cmd <- sapply(lhs.split, function(larg) { pair <- paste(larg, lav.operator, rhs) return(pair) }) } else { cmd <- paste(lhs, lav.operator, rhs) } } } else if ((means.scales <- regexpr("^\\s*([\\[\\{])([^\\]\\}]+)[\\]\\}]\\s*$", cmd, ignore.case = TRUE, perl = TRUE))[1L] > 0) { # intercepts/means or scales # first capture is the operator: [ or { operator <- substr(cmd, attr(means.scales, "capture.start")[1L], attr(means.scales, "capture.start")[1L] + attr(means.scales, "capture.length")[1L] - 1) params <- substr(cmd, attr(means.scales, "capture.start")[2L], attr(means.scales, "capture.start")[2L] + attr(means.scales, "capture.length")[2L] - 1) # obtain parameters with no modifiers specified for LHS params.noModifiers <- sub("^\\s*[\\[\\{]([^\\]\\}]+)[\\]\\}]\\s*$", "\\1", attr(cmd, "noModifiers"), perl = TRUE) means.scales.split <- strsplit(params, "\\s+")[[1]] # trimSpace( means.scales.noModifiers.split <- strsplit(params.noModifiers, "\\s+")[[1]] # trimSpace( if (operator == "[") { # Tricky syntax shift (and corresponding kludge). For means, need to put constraint on RHS as pre-multiplier of 1 (e.g., x1 ~ 5*1). # But parseConstraints returns constraints multiplied by parameters cmd <- sapply(means.scales.split, function(v) { # shift pre-multiplier if ((premult <- regexpr("([^\\*]+\\*[^\\*]+)\\*([^\\*]+)", v, perl = TRUE))[1L] > 0) { # double modifier: label and constraint modifier <- substr(v, attr(premult, "capture.start")[1L], attr(premult, "capture.start")[1L] + attr(premult, "capture.length")[1L] - 1) paramName <- substr(v, attr(premult, "capture.start")[2L], attr(premult, "capture.start")[2L] + attr(premult, "capture.length")[2L] - 1) paste0(paramName, " ~ ", modifier, "*1") } else if ((premult <- regexpr("([^\\*]+)\\*([^\\*]+)", v, perl = TRUE))[1L] > 0) { modifier <- substr(v, attr(premult, "capture.start")[1L], attr(premult, "capture.start")[1L] + attr(premult, "capture.length")[1L] - 1) paramName <- substr(v, attr(premult, "capture.start")[2L], attr(premult, "capture.start")[2L] + attr(premult, "capture.length")[2L] - 1) paste0(paramName, " ~ ", modifier, "*1") } else { paste(v, "~ 1") } }) } else if (operator == "{") { # only include constraints on RHS cmd <- sapply(1:length(means.scales.split), function(v) paste(means.scales.noModifiers.split[v], "~*~", means.scales.split[v])) } else { lav_msg_stop(gettext("What's the operator?!")) } } else if (grepl("|", cmd, fixed = TRUE)) { # expand growth modeling language cmd <- expandGrowthCmd(cmd) } else { # no operator, no means, must be variance. # cat("assuming vars: ", cmd, "\n") vars.lhs <- strsplit(attr(cmd, "noModifiers"), "\\s+")[[1]] # trimSpace( vars.rhs <- strsplit(cmd, "\\s+")[[1]] # trimSpace( cmd <- sapply(1:length(vars.lhs), function(v) paste(vars.lhs[v], "~~", vars.rhs[v])) } # handle threshold substitution: $ -> | cmd <- gsub("$", "|", cmd, fixed = TRUE) # if we have both starting/fixed values and constraints, these must be handled by separate commands. # starting and fixed values are already handled in the pipeline by this point, so should be evident in the command # bfi BY lab1*start(1)*bfi_1 ==> bfi BY lab1*bfi_1 + start(1)*bfi_1 double_asterisks <- grepl("\\s*[\\w\\(\\)\\.]+\\*[\\w\\(\\)\\.]+\\*[\\w\\(\\)\\.]+", cmd, perl = TRUE) if (isTRUE(double_asterisks[1])) { ss <- strsplit(cmd, "*", fixed = TRUE)[[1]] if (length(ss) != 3) { lav_msg_warn(gettext("problem interpreting double asterisk syntax:"), cmd) # sanity check on my logic } else { cmd <- paste0(ss[1], "*", ss[3], " + ", ss[2], "*", ss[3]) } } lavaan.out <- c(lavaan.out, cmd) } } # new threshold syntax shifts things to the form: # VAR | t1 + t2 + t3 (left to write ordering) # Parameter labels, fixed values, and starting values are tacked on in the usual way, like # VAR | 5*t1 + start(1.5)*t2 + par_label*t3 (left to write ordering) thresh_lines <- grep("^\\s*[A-z]+\\w*\\|\\d+", lavaan.out, perl = TRUE) if (length(thresh_lines) > 0L) { thresh_vars <- unname(sub("^\\s*([A-z]+\\w*).*", "\\1", lavaan.out[thresh_lines], perl = TRUE)) thresh_split <- split(thresh_lines, thresh_vars) drop_elements <- c() for (i in seq_along(thresh_split)) { this_set <- lavaan.out[thresh_split[[i]]] tnum <- as.integer(sub("^\\s*[A-z]+\\w*\\|(\\d+)\\s*.*", "\\1", this_set)) this_set <- this_set[order(tnum)] # ensure that threshold numbering matches ascending order this_set <- sub("[^~]+\\s*~\\s*", "", this_set, perl = T) # drop variable and ~ # convert to new t1, t2 syntax by combining modifiers with threshold numbers this_set <- sapply(seq_along(this_set), function(j) { # gsub("[^~]+\\s*~\\s*([\\w\\.\\-]+\\*)*1", paste0("\\1t", j), this_set[j], perl=TRUE) gsub("([\\w\\.\\-]+\\*)*1", paste0("\\1t", j), this_set[j], perl = TRUE) }) new_str <- paste(names(thresh_split)[i], "|", paste(this_set, collapse = " + ")) # replace in model string on the first line having relevant syntax lavaan.out[thresh_split[[i]][1]] <- new_str drop_elements <- c(drop_elements, thresh_split[[i]][-1]) } lavaan.out <- lavaan.out[-drop_elements] } # tack on constraint syntax, if included lavaan.out <- c(lavaan.out, con_syntax) # for now, include a final trimSpace call since some arguments have leading/trailing space stripped. wrap <- paste(wrapAfterPlus(lavaan.out, width = 90, exdent = 5), collapse = "\n") # trimSpace( return(wrap) } mplus2lavaan <- function(inpfile, run = TRUE) { stopifnot(length(inpfile) == 1L) stopifnot(grepl("\\.inp$", inpfile, ignore.case = TRUE)) if (!file.exists(inpfile)) { lav_msg_stop(gettext("Could not find file:"), inpfile) } # for future consideration. For now, require a .inp file # if (length(inpfile) == 1L && grepl("\\.inp$", inpfile)) { # if (!file.exists(inpfile)) { stop("Could not find file: ", inpfile) } # inpfile.text <- scan(inpfile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE) # } else { # #assume that inpfile itself is syntax (e.g., in a character vector) # inpfile.text <- inpfile # } inpfile.text <- scan(inpfile, what = "character", sep = "\n", strip.white = FALSE, blank.lines.skip = FALSE, quiet = TRUE) sections <- divideInputIntoSections(inpfile.text, inpfile) mplus.inp <- list() mplus.inp$title <- trimSpace(paste(sections$title, collapse = " ")) mplus.inp$data <- divideIntoFields(sections$data, required = "file") mplus.inp$variable <- divideIntoFields(sections$variable, required = "names") mplus.inp$analysis <- divideIntoFields(sections$analysis) meanstructure <- "default" # lavaan default if (!is.null(mplus.inp$analysis$model)) { if (tolower(mplus.inp$analysis$model) == "nomeanstructure") { meanstructure <- FALSE } # explicitly disable mean structure } information <- "default" # lavaan default if (!is.null(mplus.inp$analysis$information)) { information <- tolower(mplus.inp$analysis$information) } estimator <- "default" if (!is.null(est <- mplus.inp$analysis$estimator)) { # no memory of what this is up to.... if (toupper(est) == "MUML") lav_msg_warn(gettext( "Mplus does not support MUML estimator. Using default instead.")) estimator <- est # march 2013: handle case where categorical data are specified, but ML-based estimator requested. # use WLSMV instead if (!is.null(mplus.inp$variable$categorical) && toupper(substr(mplus.inp$analysis$estimator, 1, 2)) == "ML") { lav_msg_warn(gettext( "Lavaan does not yet support ML-based estimation for categorical data. Reverting to WLSMV")) estimator <- "WLSMV" } } # expand hyphens in variable names and split into vector that will be the names for read.table mplus.inp$variable$names <- strsplit(expandCmd(mplus.inp$variable$names), "\\s+", perl = TRUE)[[1]] # expand hyphens in categorical declaration if (!is.null(mplus.inp$variable$categorical)) mplus.inp$variable$categorical <- strsplit(expandCmd(mplus.inp$variable$categorical), "\\s+", perl = TRUE)[[1]] # convert mplus syntax to lavaan syntax mplus.inp$model <- mplus2lavaan.modelSyntax(sections$model) # handle model constraint if ("model.constraint" %in% names(sections)) { mplus.inp$model.constraint <- mplus2lavaan.constraintSyntax(sections$model.constraint) mplus.inp$model <- paste(mplus.inp$model, mplus.inp$model.constraint, sep = "\n") } # read mplus data (and handle missing spec) mplus.inp$data <- readMplusInputData(mplus.inp, inpfile) # handle bootstrapping specification se <- "default" bootstrap <- 1000L test <- "default" if (!is.null(mplus.inp$analysis$bootstrap)) { boot.type <- "standard" # check whether standard versus residual bootstrap is specified if ((boot.match <- regexpr("\\((\\w+)\\)", mplus.inp$analysis$bootstrap, perl = TRUE)) > 0L) { boot.type <- tolower(substr(mplus.inp$analysis$bootstrap, attr(boot.match, "capture.start"), attr(boot.match, "capture.start") + attr(boot.match, "capture.length") - 1L)) } if (boot.type == "residual") test <- "Bollen.Stine" se <- "bootstrap" if ((nboot.match <- regexpr("^\\s*(\\d+)", mplus.inp$analysis$bootstrap, perl = TRUE)) > 0L) { bootstrap <- as.numeric(substr(mplus.inp$analysis$bootstrap, attr(nboot.match, "capture.start"), attr(nboot.match, "capture.start") + attr(nboot.match, "capture.length") - 1L)) } } if (run) { fit <- sem(mplus.inp$model, data = mplus.inp$data, meanstructure = meanstructure, mimic = "Mplus", estimator = estimator, test = test, se = se, bootstrap = bootstrap, information = information) fit@external <- list(mplus.inp = mplus.inp) } else { fit <- mplus.inp # just return the syntax outside of a lavaan object } return(fit) } divideIntoFields <- function(section.text, required) { if (is.null(section.text)) { return(NULL) } # The parser breaks down when there is a line with a trailing comment because then splitting on semicolon will combine it with the following line # Thus, trim off trailing comments before initial split section.text <- gsub("\\s*!.*$", "", section.text, perl = TRUE) section.split <- strsplit(paste(section.text, collapse = " "), ";", fixed = TRUE)[[1]] # split on semicolons section.divide <- list() for (cmd in section.split) { if (grepl("^\\s*!.*", cmd, perl = TRUE)) next # skip comment lines if (grepl("^\\s+$", cmd, perl = TRUE)) next # skip blank lines # mplus is apparently tolerant of specifications that don't include IS/ARE/= # example: usevariables x1-x10; # thus, split on spaces and assume that first element is lhs, drop second element if IS/ARE/=, and assume remainder is rhs # but if user uses equals sign, then spaces will not always be present (e.g., usevariables=x1-x10) if ((leadingEquals <- regexpr("^\\s*[A-Za-z]+[A-Za-z_-]*\\s*(=)", cmd[1L], perl = TRUE))[1L] > 0) { cmdName <- trimSpace(substr(cmd[1L], 1, attr(leadingEquals, "capture.start") - 1)) cmdArgs <- trimSpace(substr(cmd[1L], attr(leadingEquals, "capture.start") + 1, nchar(cmd[1L]))) } else { cmd.spacesplit <- strsplit(trimSpace(cmd[1L]), "\\s+", perl = TRUE)[[1L]] if (length(cmd.spacesplit) < 2L) { # for future: make room for this function to prase things like just TECH13 (no rhs) } else { cmdName <- trimSpace(cmd.spacesplit[1L]) if (length(cmd.spacesplit) > 2L && tolower(cmd.spacesplit[2L]) %in% c("is", "are")) { cmdArgs <- paste(cmd.spacesplit[3L:length(cmd.spacesplit)], collapse = " ") # remainder, removing is/are } else { cmdArgs <- paste(cmd.spacesplit[2L:length(cmd.spacesplit)], collapse = " ") # is/are not used, so just join rhs } } } section.divide[[make.names(tolower(cmdName))]] <- cmdArgs } if (!missing(required)) { stopifnot(all(required %in% names(section.divide))) } return(section.divide) } # helper function splitFilePath <- function(abspath) { # function to split path into path and filename # code adapted from R.utils filePath command if (!is.character(abspath)) lav_msg_stop(gettext( "Path not a character string")) if (nchar(abspath) < 1 || is.na(abspath)) lav_msg_stop(gettext( "Path is missing or of zero length")) components <- strsplit(abspath, split = "[\\/]")[[1]] lcom <- length(components) stopifnot(lcom > 0) # the file is the last element in the list. In the case of length == 1, this will extract the only element. relFilename <- components[lcom] absolute <- FALSE if (lcom == 1) { dirpart <- NA_character_ } else if (lcom > 1) { # drop the file from the list (the last element) components <- components[-lcom] dirpart <- do.call("file.path", as.list(components)) # if path begins with C:, /, //, or \\, then treat as absolute if (grepl("^([A-Z]{1}:|/|//|\\\\)+.*$", dirpart, perl = TRUE)) absolute <- TRUE } return(list(directory = dirpart, filename = relFilename, absolute = absolute)) } readMplusInputData <- function(mplus.inp, inpfile) { # handle issue of mplus2lavaan being called with an absolute path, whereas mplus has only a local data file inpfile.split <- splitFilePath(inpfile) datfile.split <- splitFilePath(mplus.inp$data$file) # if inp file target directory is non-empty, but mplus data is without directory, then append # inp file directory to mplus data. This ensures that R need not be in the working directory # to read the dat file. But if mplus data has an absolute directory, don't append # if mplus data directory is present and absolute, or if no directory in input file, just use filename as is if (!is.na(datfile.split$directory) && datfile.split$absolute) { datFile <- mplus.inp$data$file } # just use mplus data filename if it has absolute path else if (is.na(inpfile.split$directory)) { datFile <- mplus.inp$data$file } # just use mplus data filename if inp file is missing path (working dir) else { datFile <- file.path(inpfile.split$directory, mplus.inp$data$file) } # dat file path is relative or absent, and inp file directory is present if (!file.exists(datFile)) { lav_msg_warn(gettext("Cannot find data file:"), datFile) return(NULL) } # handle missing is/are: missList <- NULL if (!is.null(missSpec <- mplus.inp$variable$missing)) { expandMissVec <- function(missStr) { # sub-function to obtain a vector of all missing values within a set of parentheses missSplit <- strsplit(missStr, "\\s+")[[1L]] missVals <- c() for (f in missSplit) { if ((hyphenPos <- regexpr("\\d+(-)\\d+", f, perl = TRUE))[1L] > -1L) { # expand hyphen preHyphen <- substr(f, 1, attr(hyphenPos, "capture.start") - 1) postHyphen <- substr(f, attr(hyphenPos, "capture.start") + 1, nchar(f)) missVals <- c(missVals, as.character(seq(preHyphen, postHyphen))) } else { # append to vector missVals <- c(missVals, f) } } return(as.numeric(missVals)) } if (missSpec == "." || missSpec == "*") { # case 1: MISSING ARE|=|IS .; na.strings <- missSpec } else if ((allMatch <- regexpr("\\s*ALL\\s*\\(([^\\)]+)\\)", missSpec, perl = TRUE))[1L] > -1L) { # case 2: use of ALL with parens missStr <- trimSpace(substr(missSpec, attr(allMatch, "capture.start"), attr(allMatch, "capture.start") + attr(allMatch, "capture.length") - 1L)) na.strings <- expandMissVec(missStr) } else { # case 3: specific missing values per variable # process each element missBlocks <- gregexpr("(?:(\\w+)\\s+\\(([^\\)]+)\\))+", missSpec, perl = TRUE)[[1]] missList <- list() if (missBlocks[1L] > -1L) { for (i in 1:length(missBlocks)) { vname <- substr(missSpec, attr(missBlocks, "capture.start")[i, 1L], attr(missBlocks, "capture.start")[i, 1L] + attr(missBlocks, "capture.length")[i, 1L] - 1L) vmiss <- substr(missSpec, attr(missBlocks, "capture.start")[i, 2L], attr(missBlocks, "capture.start")[i, 2L] + attr(missBlocks, "capture.length")[i, 2L] - 1L) vnameHyphen <- regexpr("(\\w+)-(\\w+)", vname, perl = TRUE)[1L] if (vnameHyphen > -1L) { # lookup against variable names vstart <- which(mplus.inp$variable$names == substr(vname, attr(vnameHyphen, "capture.start")[1L], attr(vnameHyphen, "capture.start")[1L] + attr(vnameHyphen, "capture.length")[1L] - 1L)) vend <- which(mplus.inp$variable$names == substr(vname, attr(vnameHyphen, "capture.start")[2L], attr(vnameHyphen, "capture.start")[2L] + attr(vnameHyphen, "capture.length")[2L] - 1L)) if (length(vstart) == 0L || length(vend) == 0L) { lav_msg_stop(gettext("Unable to lookup missing variable list: "), vname) } # I suppose start or finish could be mixed up if (vstart > vend) { vstart.orig <- vstart vstart <- vend vend <- vstart.orig } vname <- mplus.inp$variable$names[vstart:vend] } missVals <- expandMissVec(vmiss) for (j in 1:length(vname)) { missList[[vname[j]]] <- missVals } } } else { lav_msg_stop(gettext("I don't understand this missing specification:"), missSpec) } } } else { na.strings <- "NA" } if (!is.null(missList)) { dat <- read.table(datFile, header = FALSE, col.names = mplus.inp$variable$names, colClasses = "numeric") # loop over variables in missList and set missing values to NA dat[, names(missList)] <- lapply(names(missList), function(vmiss) { dat[which(dat[, vmiss] %in% missList[[vmiss]]), vmiss] <- NA return(dat[, vmiss]) }) names(dat) <- mplus.inp$variable$names # loses these from the lapply } else { dat <- read.table(datFile, header = FALSE, col.names = mplus.inp$variable$names, na.strings = na.strings, colClasses = "numeric") } # TODO: support covariance/mean+cov inputs # store categorical variables as ordered factors if (!is.null(mplus.inp$variable$categorical)) { dat[, c(mplus.inp$variable$categorical)] <- lapply(dat[, c(mplus.inp$variable$categorical), drop = FALSE], ordered) } return(dat) } divideInputIntoSections <- function(inpfile.text, filename) { inputHeaders <- grep("^\\s*(title:|data.*:|variable:|define:|analysis:|model.*:|output:|savedata:|plot:|montecarlo:)", inpfile.text, ignore.case = TRUE, perl = TRUE) stopifnot(length(inputHeaders) > 0L) mplus.sections <- list() for (h in 1:length(inputHeaders)) { sectionEnd <- ifelse(h < length(inputHeaders), inputHeaders[h + 1] - 1, length(inpfile.text)) section <- inpfile.text[inputHeaders[h]:sectionEnd] sectionName <- trimSpace(sub("^([^:]+):.*$", "\\1", section[1L], perl = TRUE)) # obtain text before the colon # dump section name from input syntax section[1L] <- sub("^[^:]+:(.*)$", "\\1", section[1L], perl = TRUE) mplus.sections[[make.names(tolower(sectionName))]] <- section } return(mplus.sections) } lavaan/R/lav_cfa_fabin.R0000644000176200001440000001426414627656441014620 0ustar liggesusers# FABIN = factor analysis by instrumental variables # Hagglund 1982 (efa), 1986 (cfa) lav_cfa_fabin2 <- function(S, marker.idx = NULL, lambda.nonzero.idx = NULL) { nvar <- ncol(S) nfac <- length(marker.idx) # overview of free/fixed LAMBDA <- matrix(0, nvar, nfac) LAMBDA[lambda.nonzero.idx] <- -1L lambda <- matrix(0, nvar, nfac) for (i in 1:nvar) { if (i %in% marker.idx) { lambda[i, marker.idx == i] <- 1.0 next } free.idx <- LAMBDA[i, ] == -1L idx3 <- (1:nvar)[-c(i, marker.idx)] s23 <- S[i, idx3] fac.idx <- marker.idx[free.idx] if (length(fac.idx) == 1L) { # most common scenario in CFA S31 <- S13 <- S[idx3, fac.idx] lambda[i, free.idx] <- sum(s23 * S31) / sum(S13 * S13) } else { S31 <- S[idx3, fac.idx, drop = FALSE] S13 <- S[fac.idx, idx3, drop = FALSE] lambda[i, free.idx] <- solve(S13 %*% S31, drop(s23 %*% S31)) } } lambda } lav_cfa_fabin3 <- function(S, marker.idx = NULL, lambda.nonzero.idx = NULL) { nvar <- ncol(S) nfac <- length(marker.idx) # overview of free/fixed LAMBDA <- matrix(0, nvar, nfac) LAMBDA[lambda.nonzero.idx] <- -1L S33.inv <- try(solve(S[-marker.idx, -marker.idx, drop = FALSE]), silent = TRUE ) if (inherits(S33.inv, "try-error")) { lav_msg_warn(gettext("fabin3 failed; switching to fabin2")) return(lav_cfa_fabin2( S = S, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx )) } lambda <- matrix(0, nvar, nfac) rm3.idx <- 0L for (i in 1:nvar) { if (i %in% marker.idx) { lambda[i, marker.idx == i] <- 1.0 next } free.idx <- LAMBDA[i, ] == -1L idx3 <- (1:nvar)[-c(i, marker.idx)] S33 <- S[idx3, idx3, drop = FALSE] s23 <- S[i, idx3] fac.idx <- marker.idx[free.idx] rm3.idx <- rm3.idx + 1L # update inverse s33.inv <- lav_matrix_symmetric_inverse_update( S.inv = S33.inv, rm.idx = rm3.idx ) if (length(fac.idx) == 1L) { # most common scenario in CFA S31 <- S13 <- S[idx3, fac.idx] tmp <- s33.inv %*% S31 # or colSums(s33.inv * S31) lambda[i, free.idx] <- sum(s23 * tmp) / sum(S13 * tmp) } else { S31 <- S[idx3, fac.idx, drop = FALSE] S13 <- S[fac.idx, idx3, drop = FALSE] tmp <- s33.inv %*% S31 # lambda[i, free.idx] <- ( s23 %*% solve(S33) %*% S31 %*% # solve(S13 %*% solve(S33) %*% S31) ) lambda[i, free.idx] <- solve(S13 %*% tmp, drop(s23 %*% tmp)) } } lambda } # internal function to be used inside lav_optim_noniter # return 'x', the estimated vector of free parameters lav_cfa_fabin_internal <- function(lavmodel = NULL, lavsamplestats = NULL, lavpartable = NULL, lavdata = NULL, lavoptions = NULL) { lavpta <- lav_partable_attributes(lavpartable) lavpartable <- lav_partable_set_cache(lavpartable, lavpta) # no structural part! if (any(lavpartable$op == "~")) { lav_msg_stop(gettext("FABIN estimator only available for CFA models")) } # no BETA matrix! (i.e., no higher-order factors) if (!is.null(lavmodel@GLIST$beta)) { lav_msg_stop(gettext( "FABIN estimator not available for models that require a BETA matrix")) } # no std.lv = TRUE for now if (lavoptions$std.lv) { lav_msg_stop( gettext("FABIN estimator not available if std.lv = TRUE")) } nblocks <- lav_partable_nblocks(lavpartable) stopifnot(nblocks == 1L) # for now b <- 1L sample.cov <- lavsamplestats@cov[[b]] nvar <- nrow(sample.cov) lv.names <- lavpta$vnames$lv.regular[[b]] nfac <- length(lv.names) marker.idx <- lavpta$vidx$lv.marker[[b]] lambda.idx <- which(names(lavmodel@GLIST) == "lambda") lambda.nonzero.idx <- lavmodel@m.free.idx[[lambda.idx]] # only diagonal THETA for now... # because if we have correlated residuals, we should remove the # corresponding variables as instruments before we estimate lambda... # (see MIIV) theta.idx <- which(names(lavmodel@GLIST) == "theta") # usually '2' m.theta <- lavmodel@m.free.idx[[theta.idx]] nondiag.idx <- m.theta[!m.theta %in% lav_matrix_diag_idx(nvar)] if (length(nondiag.idx) > 0L) { lav_msg_warn(gettext( "this implementation of FABIN does not handle correlated residuals yet!" )) } # 1. estimate LAMBDA if (lavoptions$estimator == "FABIN2") { LAMBDA <- lav_cfa_fabin2( S = sample.cov, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx ) } else { LAMBDA <- lav_cfa_fabin3( S = sample.cov, marker.idx = marker.idx, lambda.nonzero.idx = lambda.nonzero.idx ) } # 2. simple ULS method to get THETA and PSI (for now) GLS.flag <- FALSE psi.mapping.ML.flag <- FALSE if (!is.null(lavoptions$estimator.args$thetapsi.method) && lavoptions$estimator.args$thetapsi.method %in% c("GLS", "GLS.ML")) { GLS.flag <- TRUE } if (!is.null(lavoptions$estimator.args$thetapsi.method) && lavoptions$estimator.args$thetapsi.method %in% c("ULS.ML", "GLS.ML")) { psi.mapping.ML.flag <- TRUE } out <- lav_cfa_lambda2thetapsi( lambda = LAMBDA, S = sample.cov, S.inv = lavsamplestats@icov[[b]], GLS = GLS.flag, psi.mapping.ML = psi.mapping.ML.flag, nobs = lavsamplestats@ntotal ) THETA <- diag(out$theta) PSI <- out$psi # 3. correlated residuals (if any) are just the difference between # Sigma and S # if(length(nondiag.idx) > 0L) { # Sigma <- LAMBDA %*% PSI %*% t(LAMBDA) + THETA # THETA[nondiag.idx] <- (sample.cov - Sigma)[nondiag.idx] # } # store matrices in lavmodel@GLIST lavmodel@GLIST$lambda <- LAMBDA lavmodel@GLIST$theta <- THETA lavmodel@GLIST$psi <- PSI # extract free parameters only x <- lav_model_get_parameters(lavmodel) # apply bounds (if any) if (!is.null(lavpartable$lower)) { lower.x <- lavpartable$lower[lavpartable$free > 0] too.small.idx <- which(x < lower.x) if (length(too.small.idx) > 0L) { x[too.small.idx] <- lower.x[too.small.idx] } } if (!is.null(lavpartable$upper)) { upper.x <- lavpartable$upper[lavpartable$free > 0] too.large.idx <- which(x > upper.x) if (length(too.large.idx) > 0L) { x[too.large.idx] <- upper.x[too.large.idx] } } x } lavaan/R/lav_model_estimate.R0000644000176200001440000007461614627656441015732 0ustar liggesusers# model estimation lav_model_estimate <- function(lavmodel = NULL, lavpartable = NULL, # for parscale = "stand" lavh1 = NULL, # for multilevel + parsc lavsamplestats = NULL, lavdata = NULL, lavoptions = NULL, lavcache = list(), start = "model", do.fit = TRUE) { lavpartable <- lav_partable_set_cache(lavpartable) estimator <- lavoptions$estimator verbose <- lav_verbose() debug <- lav_debug() ngroups <- lavsamplestats@ngroups if (lavsamplestats@missing.flag || estimator == "PML") { group.weight <- FALSE } else { group.weight <- TRUE } # backwards compatibility < 0.6-11 if (is.null(lavoptions$optim.partrace)) { lavoptions$optim.partrace <- FALSE } if (lavoptions$optim.partrace) { # fx + parameter values PENV <- new.env() PENV$PARTRACE <- matrix(NA, nrow = 0, ncol = lavmodel@nx.free + 1L) } # starting values (ignoring equality constraints) x.unpack <- lav_model_get_parameters(lavmodel) # override? use simple instead? (new in 0.6-7) if (start == "simple") { START <- numeric(length(lavpartable$lhs)) # set loadings to 0.7 loadings.idx <- which(lavpartable$free > 0L & lavpartable$op == "=~") if (length(loadings.idx) > 0L) { START[loadings.idx] <- 0.7 } # set (only) variances to 1 var.idx <- which(lavpartable$free > 0L & lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L) { START[var.idx] <- 1 } if (lavmodel@ceq.simple.only) { x.unpack <- START[lavpartable$free > 0L & !duplicated(lavpartable$free)] } else { x.unpack <- START[lavpartable$free > 0L] } # override? use random starting values instead? (new in 0.6-18) } else if (start == "random") { START <- lav_partable_random( lavpartable = lavpartable, # needed if we still need to compute bounds: lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions ) if (lavmodel@ceq.simple.only) { x.unpack <- START[lavpartable$free > 0L & !duplicated(lavpartable$free)] } else { x.unpack <- START[lavpartable$free > 0L] } } # 1. parameter scaling (to handle data scaling, not parameter scaling) parscale <- rep(1.0, length(x.unpack)) # for < 0.6 compatibility if (is.null(lavoptions$optim.parscale)) { lavoptions$optim.parscale <- "none" } if (lavoptions$optim.parscale == "none") { # do nothing, but still set SCALE, as before # 0.6-17: # only temporarily: 'keep' this mistake, and change it later: # (note the "standarized") # we only do this to avoid breaking a test in semlbci # } else if(lavoptions$optim.parscale %in% c("stand", "st", "standardize", # "standarized", "stand.all")) { # this is what it should be: } else if (lavoptions$optim.parscale %in% c( "stand", "st", "standardize", "standardized", "stand.all" )) { # rescale parameters as if the data was standardized # new in 0.6-2 # # FIXME: this works well, as long as the variances of the # latent variables (which we do not know) are more or less # equal to 1.0 (eg std.lv = TRUE) # # Once we have better estimates of those variances, we could # use them to set the scale # if (lavdata@nlevels > 1L) { if (length(lavh1) > 0L) { OV.VAR <- lapply(lavh1$implied$cov, diag) } else { OV.VAR <- lapply( do.call(c, lapply(lavdata@Lp, "[[", "ov.idx")), function(x) rep(1, length(x)) ) } } else { if (lavoptions$conditional.x) { OV.VAR <- lavsamplestats@res.var } else { OV.VAR <- lavsamplestats@var } } if (lavoptions$std.lv) { parscale <- lav_standardize_all( lavobject = NULL, est = rep(1, length(lavpartable$lhs)), est.std = rep(1, length(lavpartable$lhs)), cov.std = FALSE, ov.var = OV.VAR, lavmodel = lavmodel, lavpartable = lavpartable, cov.x = lavsamplestats@cov.x ) } else { # needs good estimates for lv variances! # if there is a single 'marker' indicator, we could use # its observed variance as an upper bound # for the moment, set them to 1.0 (instead of 0.05) # TODO: USE Bentler's 1982 approach to get an estimate of # VETA; use those diagonal elements... # but only if we have 'marker' indicators for each LV LV.VAR <- vector("list", lavmodel@ngroups) for (g in seq_len(lavmodel@ngroups)) { mm.in.group <- 1:lavmodel@nmat[g] + cumsum(c(0, lavmodel@nmat))[g] MLIST <- lavmodel@GLIST[mm.in.group] LAMBDA <- MLIST$lambda n.lv <- ncol(LAMBDA) LV.VAR[[g]] <- rep(1.0, n.lv) } parscale <- lav_standardize_all( lavobject = NULL, est = rep(1, length(lavpartable$lhs)), # est.std = rep(1, length(lavpartable$lhs)), # here, we use whatever the starting values are # for the latent variances... cov.std = FALSE, ov.var = OV.VAR, lv.var = LV.VAR, lavmodel = lavmodel, lavpartable = lavpartable, cov.x = lavsamplestats@cov.x ) } # in addition, take sqrt for variance parameters var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs) if (length(var.idx) > 0L) { parscale[var.idx] <- sqrt(abs(parscale[var.idx])) } if (lavmodel@ceq.simple.only) { parscale <- parscale[lavpartable$free > 0 & !duplicated(lavpartable$free)] } else { parscale <- parscale[lavpartable$free > 0] } } # parscale should obey the equality constraints if (lavmodel@eq.constraints && lavoptions$optim.parscale != "none") { # pack p.pack <- as.numeric((parscale - lavmodel@eq.constraints.k0) %*% lavmodel@eq.constraints.K) # unpack parscale <- as.numeric(lavmodel@eq.constraints.K %*% p.pack) + lavmodel@eq.constraints.k0 } if (debug) { cat("parscale = ", parscale, "\n") } z.unpack <- x.unpack * parscale # 2. pack (apply equality constraints) if (lavmodel@eq.constraints) { z.pack <- as.numeric((z.unpack - lavmodel@eq.constraints.k0) %*% lavmodel@eq.constraints.K) } else { z.pack <- z.unpack } # 3. transform (already constrained) variances to standard deviations? # TODO # if(lavoptions$optim.var.transform == "sqrt" && # length(lavmodel@x.free.var.idx) > 0L) { # # transforming variances using atan (or another sigmoid function?) # # FIXME: better approach? # #start.x[lavmodel@x.free.var.idx] <- # # atan(start.x[lavmodel@x.free.var.idx]) # start.x[lavmodel@x.free.var.idx] <- # sqrt(start.x[lavmodel@x.free.var.idx]) # assuming positive var # } # final starting values for optimizer start.x <- z.pack if (debug) { cat("start.x = ", start.x, "\n") } # user-specified bounds? (new in 0.6-2) if (is.null(lavpartable$lower)) { lower <- -Inf } else { if (lavmodel@ceq.simple.only) { free.idx <- which(lavpartable$free > 0L & !duplicated(lavpartable$free)) lower <- lavpartable$lower[free.idx] } else if (lavmodel@eq.constraints) { # bounds have no effect any longer.... lav_msg_warn(gettext( "bounds have no effect in the presence of linear equality constraints")) lower <- -Inf } else { lower <- lavpartable$lower[lavpartable$free > 0L] } } if (is.null(lavpartable$upper)) { upper <- +Inf } else { if (lavmodel@ceq.simple.only) { free.idx <- which(lavpartable$free > 0L & !duplicated(lavpartable$free)) upper <- lavpartable$upper[free.idx] } else if (lavmodel@eq.constraints) { # bounds have no effect any longer.... if (is.null(lavpartable$lower)) { # bounds have no effect any longer.... lav_msg_warn(gettext( "bounds have no effect in the presence of linear equality constraints")) } upper <- +Inf } else { upper <- lavpartable$upper[lavpartable$free > 0L] } } # check for inconsistent lower/upper bounds # this may happen if we have equality constraints; qr() may switch # the sign... bad.idx <- which(lower > upper) if (length(bad.idx) > 0L) { # switch # tmp <- lower[bad.idx] # lower[bad.idx] <- upper[bad.idx] # upper[bad.idx] <- tmp lower[bad.idx] <- -Inf upper[bad.idx] <- +Inf } # function to be minimized objective_function <- function(x, verbose = FALSE, infToMax = FALSE, debug = FALSE) { # 3. standard deviations to variances # WARNING: x is still packed here! # if(lavoptions$optim.var.transform == "sqrt" && # length(lavmodel@x.free.var.idx) > 0L) { # #x[lavmodel@x.free.var.idx] <- tan(x[lavmodel@x.free.var.idx]) # x.var <- x[lavmodel@x.free.var.idx] # x.var.sign <- sign(x.var) # x[lavmodel@x.free.var.idx] <- x.var.sign * (x.var * x.var) # square! # } # 2. unpack if (lavmodel@eq.constraints) { x <- as.numeric(lavmodel@eq.constraints.K %*% x) + lavmodel@eq.constraints.k0 } # 1. unscale x <- x / parscale # update GLIST (change `state') and make a COPY! GLIST <- lav_model_x2GLIST(lavmodel, x = x) fx <- lav_model_objective( lavmodel = lavmodel, GLIST = GLIST, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache ) # only for PML: divide by N (to speed up convergence) if (estimator == "PML") { fx <- fx / lavsamplestats@ntotal } if (debug || verbose) { cat(" objective function = ", sprintf("%18.16f", fx), "\n", sep = "" ) } if (debug) { # cat("Current unconstrained parameter values =\n") # tmp.x <- lav_model_get_parameters(lavmodel, GLIST=GLIST, type="unco") # print(tmp.x); cat("\n") cat("Current free parameter values =\n") print(x) cat("\n") } if (lavoptions$optim.partrace) { PENV$PARTRACE <- rbind(PENV$PARTRACE, c(fx, x)) } # for L-BFGS-B # if(infToMax && is.infinite(fx)) fx <- 1e20 if (!is.finite(fx)) { fx.group <- attr(fx, "fx.group") fx <- 1e20 attr(fx, "fx.group") <- fx.group # only for lav_model_fit() } fx } gradient_function <- function(x, verbose = FALSE, infToMax = FALSE, debug = FALSE) { # transform variances back # if(lavoptions$optim.var.transform == "sqrt" && # length(lavmodel@x.free.var.idx) > 0L) { # #x[lavmodel@x.free.var.idx] <- tan(x[lavmodel@x.free.var.idx]) # x.var <- x[lavmodel@x.free.var.idx] # x.var.sign <- sign(x.var) # x[lavmodel@x.free.var.idx] <- x.var.sign * (x.var * x.var) # square! # } # 2. unpack if (lavmodel@eq.constraints) { x <- as.numeric(lavmodel@eq.constraints.K %*% x) + lavmodel@eq.constraints.k0 } # 1. unscale x <- x / parscale # update GLIST (change `state') and make a COPY! GLIST <- lav_model_x2GLIST(lavmodel, x = x) dx <- lav_model_gradient( lavmodel = lavmodel, GLIST = GLIST, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, type = "free", group.weight = group.weight, ### check me!! ceq.simple = lavmodel@ceq.simple.only ) if (debug) { cat("Gradient function (analytical) =\n") print(dx) cat("\n") } # 1. scale (note: divide, not multiply!) dx <- dx / parscale # 2. pack if (lavmodel@eq.constraints) { dx <- as.numeric(dx %*% lavmodel@eq.constraints.K) } # 3. transform variances back # if(lavoptions$optim.var.transform == "sqrt" && # length(lavmodel@x.free.var.idx) > 0L) { # x.var <- x[lavmodel@x.free.var.idx] # here in 'var' metric # x.var.sign <- sign(x.var) # x.var <- abs(x.var) # x.sd <- sqrt(x.var) # dx[lavmodel@x.free.var.idx] <- # ( 2 * x.var.sign * dx[lavmodel@x.free.var.idx] * x.sd ) # } # only for PML: divide by N (to speed up convergence) if (estimator == "PML") { dx <- dx / lavsamplestats@ntotal } if (debug) { cat("Gradient function (analytical, after eq.constraints.K) =\n") print(dx) cat("\n") } dx } gradient_function_numerical <- function(x, verbose = FALSE, debug = FALSE) { # NOTE: no need to 'tranform' anything here (var/eq) # this is done anyway in objective_function # numerical approximation using the Richardson method npar <- length(x) h <- 10e-6 dx <- numeric(npar) ## FIXME: call lav_model_objective directly!! for (i in 1:npar) { x.left <- x.left2 <- x.right <- x.right2 <- x x.left[i] <- x[i] - h x.left2[i] <- x[i] - 2 * h x.right[i] <- x[i] + h x.right2[i] <- x[i] + 2 * h fx.left <- objective_function(x.left, verbose = FALSE, debug = FALSE) fx.left2 <- objective_function(x.left2, verbose = FALSE, debug = FALSE) fx.right <- objective_function(x.right, verbose = FALSE, debug = FALSE) fx.right2 <- objective_function(x.right2, verbose = FALSE, debug = FALSE) dx[i] <- (fx.left2 - 8 * fx.left + 8 * fx.right - fx.right2) / (12 * h) } # dx <- lavGradientC(func=objective_function, x=x) # does not work if pnorm is involved... (eg PML) if (debug) { cat("Gradient function (numerical) =\n") print(dx) cat("\n") } dx } gradient_function_numerical_complex <- function(x, verbose = FALSE, debug = FALSE) { dx <- Re(lav_func_gradient_complex( func = objective_function, x = x, h = sqrt(.Machine$double.eps) )) # does not work if pnorm is involved... (eg PML) if (debug) { cat("Gradient function (numerical complex) =\n") print(dx) cat("\n") } dx } # check if the initial values produce a positive definite Sigma # to begin with -- but only for estimator="ML" if (estimator %in% c("ML", "FML", "MML")) { Sigma.hat <- computeSigmaHat(lavmodel, extra = TRUE) for (g in 1:ngroups) { if (!attr(Sigma.hat[[g]], "po")) { group.txt <- if(ngroups > 1) gettextf(" in group %s.", g) else "." if (debug) { print(Sigma.hat[[g]][, ]) } lav_msg_warn(gettext( "initial model-implied matrix (Sigma) is not positive definite; check your model and/or starting parameters"), group.txt) x <- start.x fx <- as.numeric(NA) attr(fx, "fx.group") <- rep(as.numeric(NA), ngroups) attr(x, "converged") <- FALSE attr(x, "iterations") <- 0L attr(x, "control") <- lavoptions@control attr(x, "fx") <- fx return(x) } } } # parameter scaling # FIXME: what is the best way to set the scale?? # current strategy: if startx > 1.0, we rescale by using # 1/startx SCALE <- rep(1.0, length(start.x)) if (lavoptions$optim.parscale == "none") { idx <- which(abs(start.x) > 1.0) if (length(idx) > 0L) { SCALE[idx] <- abs(1.0 / start.x[idx]) } } if (debug) { cat("SCALE = ", SCALE, "\n") } # first try: check if starting values return a finite value fx <- objective_function(start.x, verbose = verbose, debug = debug) if (!is.finite(fx)) { # emergency change of start.x start.x <- start.x / 10 } # first some nelder mead steps? (default = FALSE) INIT_NELDER_MEAD <- lavoptions$optim.init_nelder_mead # gradient: analytic, numerical or NULL? if (is.character(lavoptions$optim.gradient)) { if (lavoptions$optim.gradient %in% c("analytic", "analytical")) { GRADIENT <- gradient_function } else if (lavoptions$optim.gradient %in% c("numerical", "numeric")) { GRADIENT <- gradient_function_numerical } else if (lavoptions$optim.gradient %in% c("numeric.complex", "complex")) { GRADIENT <- gradient_function_numerical_complex } else if (lavoptions$optim.gradient %in% c("NULL", "null")) { GRADIENT <- NULL } else { lav_msg_warn(gettext("gradient should be analytic, numerical or NULL")) } } else if (is.logical(lavoptions$optim.gradient)) { if (lavoptions$optim.gradient) { GRADIENT <- gradient_function } else { GRADIENT <- NULL } } else if (is.null(lavoptions$optim.gradient)) { GRADIENT <- gradient_function } # default optimizer if (length(lavmodel@ceq.nonlinear.idx) == 0L && length(lavmodel@cin.linear.idx) == 0L && length(lavmodel@cin.nonlinear.idx) == 0L) { if (is.null(lavoptions$optim.method)) { OPTIMIZER <- "NLMINB" # OPTIMIZER <- "BFGS" # slightly slower, no bounds; better scaling! # OPTIMIZER <- "L-BFGS-B" # trouble with Inf values for fx! } else { OPTIMIZER <- toupper(lavoptions$optim.method) stopifnot(OPTIMIZER %in% c( "NLMINB0", "NLMINB1", "NLMINB2", "NLMINB", "BFGS", "L-BFGS-B", "NONE" )) if (OPTIMIZER == "NLMINB1") { OPTIMIZER <- "NLMINB" } } } else { if (is.null(lavoptions$optim.method)) { OPTIMIZER <- "NLMINB.CONSTR" } else { OPTIMIZER <- toupper(lavoptions$optim.method) stopifnot(OPTIMIZER %in% c("NLMINB.CONSTR", "NLMINB", "NONE")) } if (OPTIMIZER == "NLMINB") { OPTIMIZER <- "NLMINB.CONSTR" } } if (INIT_NELDER_MEAD) { if (verbose) cat(" initial Nelder-Mead step:\n") trace <- 0L if (verbose) trace <- 1L optim.out <- optim( par = start.x, fn = objective_function, method = "Nelder-Mead", # control=list(maxit=10L, # parscale=SCALE, # trace=trace), hessian = FALSE, verbose = verbose, debug = debug ) cat("\n") start.x <- optim.out$par } if (OPTIMIZER == "NLMINB0") { if (verbose) cat(" quasi-Newton steps using NLMINB0 (no analytic gradient):\n") # if(debug) control$trace <- 1L; control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = 0L, # abs.tol=1e-20, ### important!! fx never negative abs.tol = (.Machine$double.eps * 10), rel.tol = 1e-10, # step.min=2.2e-14, # in =< 0.5-12 step.min = 1.0, # 1.0 in < 0.5-21 step.max = 1.0, x.tol = 1.5e-8, xf.tol = 2.2e-14 ) control.nlminb <- modifyList(control.nlminb, lavoptions$control) control <- control.nlminb[c( "eval.max", "iter.max", "trace", "step.min", "step.max", "abs.tol", "rel.tol", "x.tol", "xf.tol" )] # cat("DEBUG: control = "); print(str(control.nlminb)); cat("\n") optim.out <- nlminb( start = start.x, objective = objective_function, gradient = NULL, lower = lower, upper = upper, control = control, scale = SCALE, verbose = verbose, debug = debug ) if (verbose) { cat(" convergence status (0=ok): ", optim.out$convergence, "\n") cat(" nlminb message says: ", optim.out$message, "\n") cat(" number of iterations: ", optim.out$iterations, "\n") cat( " number of function evaluations [objective, gradient]: ", optim.out$evaluations, "\n" ) } # try again if (optim.out$convergence != 0L) { optim.out <- nlminb( start = start.x, objective = objective_function, gradient = NULL, lower = lower, upper = upper, control = control, scale = SCALE, verbose = verbose, debug = debug ) } iterations <- optim.out$iterations x <- optim.out$par if (optim.out$convergence == 0L) { converged <- TRUE } else { converged <- FALSE } } else if (OPTIMIZER == "NLMINB") { if (verbose) cat(" quasi-Newton steps using NLMINB:\n") # if(debug) control$trace <- 1L; control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = 0L, # abs.tol=1e-20, ### important!! fx never negative abs.tol = (.Machine$double.eps * 10), rel.tol = 1e-10, # step.min=2.2e-14, # in =< 0.5-12 step.min = 1.0, # 1.0 in < 0.5-21 step.max = 1.0, x.tol = 1.5e-8, xf.tol = 2.2e-14 ) control.nlminb <- modifyList(control.nlminb, lavoptions$control) control <- control.nlminb[c( "eval.max", "iter.max", "trace", "step.min", "step.max", "abs.tol", "rel.tol", "x.tol", "xf.tol" )] # cat("DEBUG: control = "); print(str(control.nlminb)); cat("\n") optim.out <- nlminb( start = start.x, objective = objective_function, gradient = GRADIENT, lower = lower, upper = upper, control = control, scale = SCALE, verbose = verbose, debug = debug ) if (verbose) { cat(" convergence status (0=ok): ", optim.out$convergence, "\n") cat(" nlminb message says: ", optim.out$message, "\n") cat(" number of iterations: ", optim.out$iterations, "\n") cat( " number of function evaluations [objective, gradient]: ", optim.out$evaluations, "\n" ) } iterations <- optim.out$iterations x <- optim.out$par if (optim.out$convergence == 0L) { converged <- TRUE } else { converged <- FALSE } } else if (OPTIMIZER == "BFGS") { # warning: Bollen example with estimator=GLS does NOT converge! # (but WLS works!) # - BB.ML works too control.bfgs <- list( trace = 0L, fnscale = 1, parscale = SCALE, ## or not? ndeps = 1e-3, maxit = 10000, abstol = 1e-20, reltol = 1e-10, REPORT = 1L ) control.bfgs <- modifyList(control.bfgs, lavoptions$control) control <- control.bfgs[c( "trace", "fnscale", "parscale", "ndeps", "maxit", "abstol", "reltol", "REPORT" )] # trace <- 0L; if(verbose) trace <- 1L optim.out <- optim( par = start.x, fn = objective_function, gr = GRADIENT, method = "BFGS", control = control, hessian = FALSE, verbose = verbose, debug = debug ) if (verbose) { cat(" convergence status (0=ok): ", optim.out$convergence, "\n") cat(" optim BFGS message says: ", optim.out$message, "\n") # cat("number of iterations: ", optim.out$iterations, "\n") cat( " number of function evaluations [objective, gradient]: ", optim.out$counts, "\n" ) } # iterations <- optim.out$iterations iterations <- optim.out$counts[1] x <- optim.out$par if (optim.out$convergence == 0L) { converged <- TRUE } else { converged <- FALSE } } else if (OPTIMIZER == "L-BFGS-B") { # warning, does not cope with Inf values!! control.lbfgsb <- list( trace = 0L, fnscale = 1, parscale = SCALE, ## or not? ndeps = 1e-3, maxit = 10000, REPORT = 1L, lmm = 5L, factr = 1e7, pgtol = 0 ) control.lbfgsb <- modifyList(control.lbfgsb, lavoptions$control) control <- control.lbfgsb[c( "trace", "fnscale", "parscale", "ndeps", "maxit", "REPORT", "lmm", "factr", "pgtol" )] optim.out <- optim( par = start.x, fn = objective_function, gr = GRADIENT, method = "L-BFGS-B", lower = lower, upper = upper, control = control, hessian = FALSE, verbose = verbose, debug = debug, infToMax = TRUE ) if (verbose) { cat(" convergence status (0=ok): ", optim.out$convergence, "\n") cat(" optim L-BFGS-B message says: ", optim.out$message, "\n") # cat("number of iterations: ", optim.out$iterations, "\n") cat( " number of function evaluations [objective, gradient]: ", optim.out$counts, "\n" ) } # iterations <- optim.out$iterations iterations <- optim.out$counts[1] x <- optim.out$par if (optim.out$convergence == 0L) { converged <- TRUE } else { converged <- FALSE } } else if (OPTIMIZER == "NLMINB.CONSTR") { ocontrol <- list(verbose = verbose) if (!is.null(lavoptions$control$control.outer)) { ocontrol <- c(lavoptions$control$control.outer, verbose = verbose) } control.nlminb <- list( eval.max = 20000L, iter.max = 10000L, trace = 0L, # abs.tol=1e-20, abs.tol = (.Machine$double.eps * 10), rel.tol = 1e-9, # 1e-10 seems 'too strict' step.min = 1.0, # 1.0 in < 0.5-21 step.max = 1.0, x.tol = 1.5e-8, xf.tol = 2.2e-14 ) control.nlminb <- modifyList(control.nlminb, lavoptions$control) control <- control.nlminb[c( "eval.max", "iter.max", "trace", "abs.tol", "rel.tol" )] cin <- cin.jac <- ceq <- ceq.jac <- NULL if (!is.null(body(lavmodel@cin.function))) cin <- lavmodel@cin.function if (!is.null(body(lavmodel@cin.jacobian))) cin.jac <- lavmodel@cin.jacobian if (!is.null(body(lavmodel@ceq.function))) ceq <- lavmodel@ceq.function if (!is.null(body(lavmodel@ceq.jacobian))) ceq.jac <- lavmodel@ceq.jacobian trace <- FALSE if (verbose) trace <- TRUE optim.out <- nlminb.constr( start = start.x, objective = objective_function, gradient = GRADIENT, control = control, scale = SCALE, verbose = verbose, debug = debug, lower = lower, upper = upper, cin = cin, cin.jac = cin.jac, ceq = ceq, ceq.jac = ceq.jac, control.outer = ocontrol ) if (verbose) { cat(" convergence status (0=ok): ", optim.out$convergence, "\n") cat(" nlminb.constr message says: ", optim.out$message, "\n") cat(" number of outer iterations: ", optim.out$outer.iterations, "\n") cat(" number of inner iterations: ", optim.out$iterations, "\n") cat( " number of function evaluations [objective, gradient]: ", optim.out$evaluations, "\n" ) } iterations <- optim.out$iterations x <- optim.out$par if (optim.out$convergence == 0) { converged <- TRUE } else { converged <- FALSE } } else if (OPTIMIZER == "NONE") { x <- start.x iterations <- 0L converged <- TRUE control <- list() # if inequality constraints, add con.jac/lambda # needed for df! if (length(lavmodel@ceq.nonlinear.idx) == 0L && length(lavmodel@cin.linear.idx) == 0L && length(lavmodel@cin.nonlinear.idx) == 0L) { optim.out <- list() } else { # if inequality constraints, add con.jac/lambda # needed for df! optim.out <- list() if (is.null(body(lavmodel@ceq.function))) { ceq <- function(x, ...) { return(numeric(0)) } } else { ceq <- lavmodel@ceq.function } if (is.null(body(lavmodel@cin.function))) { cin <- function(x, ...) { return(numeric(0)) } } else { cin <- lavmodel@cin.function } ceq0 <- ceq(start.x) cin0 <- cin(start.x) con0 <- c(ceq0, cin0) JAC <- rbind( numDeriv::jacobian(ceq, x = start.x), numDeriv::jacobian(cin, x = start.x) ) nceq <- length(ceq(start.x)) ncin <- length(cin(start.x)) ncon <- nceq + ncin ceq.idx <- cin.idx <- integer(0) if (nceq > 0L) ceq.idx <- 1:nceq if (ncin > 0L) cin.idx <- nceq + 1:ncin cin.flag <- rep(FALSE, length(ncon)) if (ncin > 0L) cin.flag[cin.idx] <- TRUE inactive.idx <- integer(0L) cin.idx <- which(cin.flag) if (ncin > 0L) { slack <- 1e-05 inactive.idx <- which(cin.flag & con0 > slack) } attr(JAC, "inactive.idx") <- inactive.idx attr(JAC, "cin.idx") <- cin.idx attr(JAC, "ceq.idx") <- ceq.idx optim.out$con.jac <- JAC optim.out$lambda <- rep(0, ncon) } } fx <- objective_function(x) # to get "fx.group" attribute # check convergence warn.txt <- "" if (converged) { # check.gradient if (!is.null(GRADIENT) && OPTIMIZER %in% c("NLMINB", "BFGS", "L-BFGS-B")) { # compute unscaled gradient dx <- GRADIENT(x) # NOTE: unscaled gradient!!! if (converged && lavoptions$check.gradient && any(abs(dx) > lavoptions$optim.dx.tol)) { # ok, identify the non-zero elements non.zero <- which(abs(dx) > lavoptions$optim.dx.tol) # which ones are 'boundary' points, defined by lower/upper? bound.idx <- integer(0L) if (!is.null(lavpartable$lower)) { bound.idx <- c(bound.idx, which(lower == x)) } if (!is.null(lavpartable$upper)) { bound.idx <- c(bound.idx, which(upper == x)) } if (length(bound.idx) > 0L) { non.zero <- non.zero[-which(non.zero %in% bound.idx)] } # this has many implications ... so should be careful to # avoid false alarm if (length(non.zero) > 0L) { converged <- FALSE warn.txt <- paste("the optimizer (", OPTIMIZER, ") ", "claimed the model converged,\n", " but not all elements of the gradient are (near) zero;\n", " the optimizer may not have found a local solution\n", " use check.gradient = FALSE to skip this check.", sep = "" ) } } } else { dx <- numeric(0L) } } else { dx <- numeric(0L) warn.txt <- "the optimizer warns that a solution has NOT been found!" } # transform back # 3. # if(lavoptions$optim.var.transform == "sqrt" && # length(lavmodel@x.free.var.idx) > 0L) { # #x[lavmodel@x.free.var.idx] <- tan(x[lavmodel@x.free.var.idx]) # x.var <- x[lavmodel@x.free.var.idx] # x.var.sign <- sign(x.var) # x[lavmodel@x.free.var.idx] <- x.var.sign * (x.var * x.var) # square! # } # 2. unpack if (lavmodel@eq.constraints) { x <- as.numeric(lavmodel@eq.constraints.K %*% x) + lavmodel@eq.constraints.k0 } # 1. unscale x <- x / parscale attr(x, "converged") <- converged attr(x, "start") <- start.x attr(x, "warn.txt") <- warn.txt attr(x, "iterations") <- iterations attr(x, "control") <- control attr(x, "fx") <- fx attr(x, "dx") <- dx attr(x, "parscale") <- parscale if (!is.null(optim.out$con.jac)) attr(x, "con.jac") <- optim.out$con.jac if (!is.null(optim.out$lambda)) attr(x, "con.lambda") <- optim.out$lambda if (lavoptions$optim.partrace) { attr(x, "partrace") <- PENV$PARTRACE } x } # backwards compatibility # estimateModel <- lav_model_estimate lavaan/R/lav_lavaan_step04_partable.R0000644000176200001440000001343614627656441017243 0ustar liggesuserslav_lavaan_step04_partable <- function(slotParTable = NULL, # nolint model = NULL, flat.model = NULL, lavoptions = NULL, lavdata = NULL, constraints = NULL) { # # # # # # # # # # # # # # 4. lavpartable # # # # # # # # # # # # # # # if slotParTable not null # copy slotParTable to lavpartable # else # if model is character or formula # create a temporary variable tmp.data.ov equal to lavdata@ov # if estimator "catML" # set meanstructure to FALSE # set the member type in the temporary variable tmp.data.ov to a # numeric vector with all zeroes # create lavpartable via function lavParTable (=lavaanify) # using the temporary variable for parameter varTable # else # if model is lavaan object # set lavpartable = parTable(model) # else # if model is a list # set lavpartable to # as.list(lav_partable_complete(as.list(flat.model))) # else # *** error *** # if slotParTable is NULL check lavpartable via lav_partable_check # if lavoptions$optim.method is "em" and there are variances specified in # partable with free = 0L and # starting value ustart 0, set ustart for these variances to # lavoptions$em.zerovar.offset if (!is.null(slotParTable)) { lavpartable <- lav_partable_set_cache(slotParTable) } else if (is.character(model) || inherits(model, "formula") || # model was already a flat.model (is.list(model) && !is.null(model$mod.idx) && !is.null(attr(model, "modifiers")))) { if (lav_verbose()) { cat("lavpartable ...") } # check flat.model before we proceed if (lav_debug()) { print(as.data.frame(flat.model)) } # catch ~~ of fixed.x covariates if fixed.x = TRUE # --> done inside lavaanify! # if(lavoptions$fixed.x) { # tmp <- lav_partable_vnames(flat.model, type = "ov.x", # ov.x.fatal = FALSE, warn = TRUE) # tmp <- try(vnames(flat.model, type = "ov.x", ov.x.fatal = TRUE), # silent = TRUE) # if(inherits(tmp, "try-error")) { # warning("lavaan WARNING: syntax contains parameters involving ", # "exogenous covariates; switching to fixed.x = FALSE") # lavoptions$fixed.x <- FALSE # } # } # if(lavoptions$conditional.x) { # tmp <- vnames(flat.model, type = "ov.x", ov.x.fatal = TRUE) # } tmp.data.ov <- lavdata@ov if (lavoptions$estimator == "catML") { lavoptions$meanstructure <- FALSE tmp.data.ov$type <- rep("numeric", length(tmp.data.ov$type)) } lavpartable <- lavParTable( model = flat.model, constraints = constraints, varTable = tmp.data.ov, ngroups = lavdata@ngroups, meanstructure = lavoptions$meanstructure, int.ov.free = lavoptions$int.ov.free, int.lv.free = lavoptions$int.lv.free, marker.int.zero = lavoptions$marker.int.zero, orthogonal = lavoptions$orthogonal, orthogonal.x = lavoptions$orthogonal.x, orthogonal.y = lavoptions$orthogonal.y, orthogonal.efa = lavoptions$rotation.args$orthogonal, conditional.x = lavoptions$conditional.x, fixed.x = lavoptions$fixed.x, std.lv = lavoptions$std.lv, correlation = lavoptions$correlation, effect.coding = lavoptions$effect.coding, ceq.simple = lavoptions$ceq.simple, parameterization = lavoptions$parameterization, auto.fix.first = lavoptions$auto.fix.first, auto.fix.single = lavoptions$auto.fix.single, auto.var = lavoptions$auto.var, auto.cov.lv.x = lavoptions$auto.cov.lv.x, auto.cov.y = lavoptions$auto.cov.y, auto.th = lavoptions$auto.th, auto.delta = lavoptions$auto.delta, auto.efa = lavoptions$auto.efa, group.equal = lavoptions$group.equal, group.partial = lavoptions$group.partial, group.w.free = lavoptions$group.w.free, as.data.frame. = FALSE ) lavpartable <- lav_partable_set_cache(lavpartable) if (lav_verbose()) { cat(" done.\n") } } else if (inherits(model, "lavaan")) { lavpartable <- lav_partable_set_cache(as.list(parTable(model)), model@pta) } else if (is.list(model)) { # we already checked this when creating flat.model # but we may need to complete it lavpartable <- as.list(flat.model) # in case model is a data.frame # complete table lavpartable <- as.list(lav_partable_complete(lavpartable)) lavpartable <- lav_partable_set_cache(lavpartable) } else { lav_msg_stop(gettextf( "model [type = %s] is not of type character or list", class(model))) } if (lav_debug()) { print(as.data.frame(lavpartable)) } # at this point, we should check if the partable is complete # or not; this is especially relevant if the lavaan() function # was used, but the user has forgotten some variances/intercepts... if (is.null(slotParTable)) { junk <- lav_partable_check(lavpartable, categorical = lavoptions$.categorical ) rm(junk) } # for EM only (for now), force fixed-to-zero (residual) variances # to be slightly larger than zero if (lavoptions$optim.method == "em") { zero.var.idx <- which(lavpartable$op == "~~" & lavpartable$lhs == lavpartable$rhs & lavpartable$free == 0L & lavpartable$ustart == 0) if (length(zero.var.idx) > 0L) { lavpartable$ustart[zero.var.idx] <- lavoptions$em.zerovar.offset } lavpartable <- lav_partable_set_cache(lavpartable, NULL, force = TRUE) } list( lavoptions = lavoptions, lavpartable = lavpartable ) } lavaan/R/lav_fit_other.R0000644000176200001440000000346614627656441014715 0ustar liggesusers# various fit measures # - lav_fit_cn # - lav_fit_wrmr # - lav_fit_mfi # - lav_fit_ecvi # Y.R. 21 July 2022 # Hoelter Critical N (CN) lav_fit_cn <- function(X2 = NULL, df = NULL, N = NULL, alpha = 0.05) { # catch df=0, X2=0 if (df == 0 && X2 < .Machine$double.eps) { CN <- as.numeric(NA) } else { CN <- qchisq(p = (1 - alpha), df = df) / (X2 / N) + 1 } CN } # WRMR # we use the definition: wrmr = sqrt ( 2*N*F / nel ) # Note: when multiple groups, 'nel' only seems to correspond to the # first group??? lav_fit_wrmr <- function(X2 = NULL, nel = NULL) { if (nel > 0) { WRMR <- sqrt(X2 / nel) } else { WRMR <- as.numeric(NA) } WRMR } # MFI - McDonald Fit Index (McDonald, 1989) lav_fit_mfi <- function(X2 = NULL, df = NULL, N = NULL) { MFI <- exp(-0.5 * (X2 - df) / N) MFI } # ECVI - cross-validation index (Brown & Cudeck, 1989, eq 5) # "In the special case where F = F_ML, Equation 5 [=ECVI] is the # rescaled AIC employed by Cudeck and Browne (1983, Equation 5.1). This # result is concordant with a finding of Stone (1977). He showed under general # conditions that if the "leaving one out at a time" method of cross-validation # (Stone, 1974; Geisser, 1975) is employed, a log-likelihood measure of # predictive validity is asymptotically equivalent to the AIC." (p. 448) # not defined for multiple groups and/or models with meanstructures # TDJ: According to Dudgeon (2004, p. 317), "ECVI requires no adjustment # when a model is fitted simultaneously in multiple samples." # And I think the lack of mean structure in Brown & Cudeck (1989) # was a matter of habitual simplification back then, not necessity. # YR: - why does Dudgeon eq 22 use (df + 2*npar) instead of (2*npar)?? lav_fit_ecvi <- function(X2 = NULL, npar = npar, N = N) { ECVI <- X2 / N + (2 * npar) / N ECVI } lavaan/R/lav_msg.R0000644000176200001440000001554414627656441013520 0ustar liggesusers# Displays a message (... concatenated with spaces in between) with header # 'lavaan(function):', except when showheader == FALSE, and formatted to have # a maximum line length of 'txt.width' while all but the first line start with # 'indent' spaces. If a footer is specified it is appended to the formatted text # 'as is'. The message is shown via R function 'message()'. lav_msg_note <- function(..., showheader = FALSE, footer = "") { wat <- unlist(list(...), use.names = FALSE) if (!showheader) wat <- c("lavaan NOTE: ___", wat) msg <- lav_msg(wat, showheader = showheader) if (footer != "") msg <- paste(msg, footer, sep = "\n") message(msg, domain = NA) } # Displays a message with header and formatted as # above via R function 'warning()'. lav_msg_warn <- function(..., footer = "") { if (!lav_warn()) return() wat <- unlist(list(...), use.names = FALSE) msg <- lav_msg(wat) if (footer != "") msg <- paste(msg, footer, sep = "\n") warning(msg, call. = FALSE, domain = NA) } # Displays a message with header and formatted as # above via R function 'stop()'. lav_msg_stop <- function(..., footer = "") { wat <- unlist(list(...), use.names = FALSE) msg <- lav_msg(wat) if (footer != "") msg <- paste(msg, footer, sep = "\n") stop(msg, call. = FALSE, domain = NA) } # Displays a message with header and formatted as # above via R function 'stop()', where the message is prepended with "FIXME:", # to indicate an internal error, e.g. an error condition which was supposed # to be handled in the calling functions. Such error message do not have to # be created by [n]gettext[f] because they don't have to be translated!!! lav_msg_fixme <- function(...) { wat <- c("FIXME: ", unlist(list(...), use.names = FALSE)) stop(lav_msg(wat), call. = FALSE, domain = NA) } # subroutine for above functions lav_msg <- function(wat, txt.width = getOption("width", 80L), indent = 3L, showheader = TRUE) { if (showheader) { ignore.in.stack <- c( "^eval$", "^try", "^doTryCatch", "^lav_msg", "^stop$", "^warning$", "^which$", "^unique$", "^as\\.", "^unlist$", "^message$", "^source$", "^withVisible$", "^tryCatch.W.E$", "^withCallingHandlers$", "^do.call$", "^paste" ) sc <- sys.calls() sc.i <- length(sc) sc.naam <- "" while (sc.i > 0L) { x <- tryCatch( as.character(sc[[sc.i]][[1L]]), error = function(e) {"unknown"} ) if (length(x) == 3L) { # needed if a function specified in namespace, e.g. # as.character(str2lang("lavaan::sem(m, d)")[[1L]]) x <- x[[3L]] } skip <- FALSE for (re in ignore.in.stack) { if (grepl(re, x)) { skip <- TRUE break } } if (!skip) { sc.naam <- x break } sc.i <- sc.i - 1L } if (sc.naam == "") { header <- "lavaan: ___" } else { header <- paste0("lavaan->", sc.naam, "(): ___") } } else { header <- "" } txt.width <- txt.width - indent # whitespace at the right # make sure we only have a single string txt <- paste(wat, collapse = " ") # split the txt in little chunks chunks <- strsplit(paste(header, txt), "\\s+", fixed = FALSE)[[1]] # chunk size (number of characters) chunk.size <- nchar(chunks) # remove empty chunk in position 1 (if txt starts with whitespace) if (chunk.size[1L] == 0L) { chunks <- chunks[-1L] chunk.size <- chunk.size[-1] } nstart <- 1L nstop <- 1L corr.line1 <- 7L # first line possibly contains "error: " while (nstart <= length(chunks)) { while (nstop < length(chunks) && sum(chunk.size[seq.int(nstart, nstop + 1L)]) + corr.line1 + nstop - nstart + indent < txt.width && chunks[nstop + 1L] != "___") { nstop <- nstop + 1 } corr.line1 <- 0L if (nstop < length(chunks) && chunks[nstop + 1L] == "___") { # forced line break chunks[nstop + 1L] <- "" nstop <- nstop + 1L } if (nstop < length(chunks)) { chunks[nstop + 1L] <- paste0( "\n", strrep(" ", indent), chunks[nstop + 1L] ) } nstart <- nstop + 1L nstop <- nstart } paste(chunks, collapse = " ") } # Transforms a value to a character representation for use in messages # if logsep = "array" (default), letters[1:3] is transformed to ("a", "b", "c") # if logsep = "none", c("x", "y", "z") is transformed to "x", "y", "z" # if logsep = "and", 1:3 is transformed to 1, 2 and 3 # if logsep = "or", c("a", "b", "c") is transformed to "a", "b" or "c" # The type of quote can be modified via parameter qd (default = TRUE). # If the object has names, the names will be prepended with a colon before the # value, e.g. c(x = 2.3, y = 4) --> (x : 2.3, y : 4). lav_msg_view <- function(x, log.sep = c("array", "none", "and", "or"), qd = TRUE) { if (missing(x)) { return("NULL") } log.sep <- match.arg(log.sep) xn <- names(x) if (is.list(x)) { xx <- sapply(x, lav_msg_view) } else { if (is.character(x)) { if (qd) { xx <- dQuote(x, q = FALSE) } else { xx <- sQuote(x, q = FALSE) } } else { xx <- as.character(x) } xx[is.na(x)] <- "NA" } if (!is.null(xn)) xx <- paste(xn, ":", xx) if (length(xx) == 1) { rv <- xx } else { if (log.sep == "array") rv <- paste0("(", paste(xx, collapse = ", "), ")") if (log.sep == "none") rv <- paste(xx, collapse = ", ") if (log.sep == "and") rv <- paste(paste(xx[-length(xx)], collapse = ", "), gettext("and"), xx[length(xx)]) if (log.sep == "or") rv <- paste(paste(xx[-length(xx)], collapse = ", "), gettext("or"), xx[length(xx)]) } rv } # --------------- examples of use ---------------------- # # warning if argument x is missing # lav_msg_warn(gettextf( # "argument %1$s is missing, using %2$s.", # x, lav_msg_view(usedvalue) # )) # # # warning if length of an argument x is greater then 1 and cannot be # lav_msg_warn(gettextf("%1$s argument should be a single character string. # Only the first one (%2$s) is used.", xname, x[[1]])) # # # error if argument is unknown (show value) # lav_msg_stop(gettextf( # "%1$s argument unknown: %2$s", # xname, lav_msg_view(xvalue) # )) # # # error if argument isn't one of the allowed values, show values allowed # if (length(allowed) == 2L) { # lav_msg_stop(gettextf( # "%1$s argument must be either %2$s", # x, lav_msg_view(allowed, "or") # )) # } else { # lav_msg_stop(gettextf( # "%1$s argument must be one of %2$s", # x, lav_msg_view(allowed, "or") # )) # } # # # error if argument isn't one of the allowed values (show invalid ones) # lav_msg_stop(sprintf( # ngettext( # length(invalids), # "invalid value in %1$s argument: %2$s.", # "invalid values in %1$s argument: %2$s." # ), # x, lav_msg_view(invalids, log.sep = "none") # )) lavaan/R/lav_representation.R0000644000176200001440000000304714627656441015767 0ustar liggesusers# user visible function to add 'matrix' entries in the parameter table lavMatrixRepresentation <- function(partable, representation = "LISREL", add.attributes = FALSE, as.data.frame. = TRUE) { # check parameter table partable <- lav_partable_complete(partable) # get model matrices if (representation == "LISREL") { REP <- lav_lisrel(partable, target = NULL, extra = add.attributes) } else if (representation == "RAM") { REP <- lav_ram(partable, target = NULL, extra = add.attributes) } else { lav_msg_stop(gettext("representation must either \"LISREL\" or \"RAM\".")) } partable$mat <- REP$mat partable$row <- REP$row partable$col <- REP$col if (as.data.frame.) { partable <- as.data.frame(partable, stringsAsFactors = FALSE) class(partable) <- c("lavaan.data.frame", "data.frame") } if (add.attributes) { if (representation == "LISREL") { attr(partable, "ov.dummy.names.nox") <- attr(REP, "ov.dummy.names.nox") attr(partable, "ov.dummy.names.x") <- attr(REP, "ov.dummy.names.x") } else if (representation == "RAM") { attr(partable, "ov.idx") <- attr(REP, "ov.idx") } attr(partable, "mmNames") <- attr(REP, "mmNames") attr(partable, "mmNumber") <- attr(REP, "mmNumber") attr(partable, "mmRows") <- attr(REP, "mmRows") attr(partable, "mmCols") <- attr(REP, "mmCols") attr(partable, "mmDimNames") <- attr(REP, "mmDimNames") attr(partable, "mmSymmetric") <- attr(REP, "mmSymmetric") } partable } lavaan/R/lav_fit.R0000644000176200001440000000520414627656440013503 0ustar liggesusers# deprecated: only kept in order to avoid some older packages lav_model_fit <- function(lavpartable = NULL, lavmodel = NULL, lavimplied = NULL, x = NULL, VCOV = NULL, TEST = NULL) { stopifnot(is.list(lavpartable), inherits(lavmodel, "lavModel")) # extract information from 'x' iterations <- attr(x, "iterations") converged <- attr(x, "converged") fx <- attr(x, "fx") fx.group <- attr(fx, "fx.group") if (!is.null(attr(fx, "logl.group"))) { logl.group <- attr(fx, "logl.group") logl <- sum(logl.group) } else { logl.group <- as.numeric(NA) logl <- as.numeric(NA) } # print(fx.group) control <- attr(x, "control") attributes(fx) <- NULL x.copy <- x # we are going to change it (remove attributes) attributes(x.copy) <- NULL est <- lav_model_get_parameters(lavmodel = lavmodel, type = "user") # did we compute standard errors? if (is.null(lavpartable$se)) { if (is.null(VCOV)) { se <- rep(as.numeric(NA), lavmodel@nx.user) se[lavpartable$free == 0L] <- 0 } else { se <- lav_model_vcov_se( lavmodel = lavmodel, lavpartable = lavpartable, VCOV = VCOV, BOOT = attr(VCOV, "BOOT.COEF") ) } } else { se <- as.numeric(lavpartable$se) # could be logical NA } # did we compute test statistics if (is.null(TEST)) { test <- list() } else { test <- TEST } # for convenience: compute lavmodel-implied Sigma and Mu if (is.null(lavimplied) || length(lavimplied) == 0L) { implied <- lav_model_implied(lavmodel) } else { implied <- lavimplied } # if bootstrapped parameters, add attr to 'est' if (!is.null(attr(VCOV, "BOOT.COEF"))) { attr(est, "BOOT.COEF") <- attr(VCOV, "BOOT.COEF") } # partrace? if (!is.null(attr(x, "partrace"))) { PARTRACE <- attr(x, "partrace") } else { PARTRACE <- matrix(0, 0L, 0L) } new("Fit", npar = max(lavpartable$free), x = x.copy, partrace = PARTRACE, start = lavpartable$start, # needed? (package stremo!) est = est, # at least 5 packages!! se = se, fx = fx, fx.group = fx.group, logl = logl, logl.group = logl.group, iterations = iterations, converged = converged, control = control, Sigma.hat = if (lavmodel@conditional.x) implied$res.cov else implied$cov, Mu.hat = if (lavmodel@conditional.x) implied$res.int else implied$mean, TH = if (lavmodel@conditional.x) implied$res.th else implied$th, test = test ) } lavaan/R/lav_func_deriv.R0000644000176200001440000001564414627656441015057 0ustar liggesusers# numerical derivatives using complex numbers # see Squire & Trapp 1998, siam rev 40(1) 110-112 # or Ridout, MS (2009), the american statistician 63(1) 66-74 # it would seem that you can choose h to be fairly small, without # sacrifycing accuracy due to rounding errors # YR 17 July 2012 lav_func_gradient_complex <- function(func, x, h = .Machine$double.eps, ..., fallback.simple = TRUE) { f0 <- try(func(x * (0 + 1i), ...), silent = TRUE) if (!is.complex(f0)) { if (fallback.simple) { dx <- lav_func_gradient_simple(func = func, x = x, h = sqrt(h), ...) return(dx) } else { lav_msg_stop(gettext( "function does not return a complex value")) # eg abs() } } if (inherits(f0, "try-error")) { if (fallback.simple) { dx <- lav_func_gradient_simple(func = func, x = x, h = sqrt(h), ...) return(dx) } else { lav_msg_stop(gettext( "function does not support non-numeric (complex) argument")) } } if (length(f0) != 1L) { lav_msg_stop(gettext( "function is not scalar and returns more than one element")) } nvar <- length(x) # determine 'h' per element of x h <- pmax(h, abs(h * x)) # get exact h, per x tmp <- x + h h <- (tmp - x) # simple 'forward' method dx <- rep(as.numeric(NA), nvar) for (p in seq_len(nvar)) { dx[p] <- Im(func(x + h * 1i * (seq.int(nvar) == p), ...)) / h[p] } dx } # as a backup, if func() is not happy about non-numeric arguments lav_func_gradient_simple <- function(func, x, h = sqrt(.Machine$double.eps), ...) { # check current point, see if it is a scalar function f0 <- func(x, ...) if (length(f0) != 1L) { lav_msg_stop(gettext( "function is not scalar and returns more than one element")) } nvar <- length(x) # determine 'h' per element of x h <- pmax(h, abs(h * x)) # get exact h, per x tmp <- x + h h <- (tmp - x) # simple 'forward' method dx <- rep(as.numeric(NA), nvar) for (p in seq_len(nvar)) { dx[p] <- (func(x + h * (seq.int(nvar) == p), ...) - func(x, ...)) / h[p] } dx } lav_func_jacobian_complex <- function(func, x, h = .Machine$double.eps, ..., fallback.simple = TRUE) { f0 <- try(func(x * (0 + 1i), ...), silent = TRUE) if (!is.complex(f0)) { if (fallback.simple) { dx <- lav_func_jacobian_simple(func = func, x = x, h = sqrt(h), ...) return(dx) } else { lav_msg_stop(gettext( "function does not return a complex value")) # eg abs() } } if (inherits(f0, "try-error")) { if (fallback.simple) { dx <- lav_func_jacobian_simple(func = func, x = x, h = sqrt(h), ...) return(dx) } else { lav_msg_stop(gettext( "function does not support non-numeric (complex) argument")) } } nres <- length(f0) nvar <- length(x) # determine 'h' per element of x h <- pmax(h, abs(h * x)) # get exact h, per x tmp <- x + h h <- (tmp - x) # simple 'forward' method dx <- matrix(as.numeric(NA), nres, nvar) for (p in seq_len(nvar)) { dx[, p] <- Im(func(x + h * 1i * (seq.int(nvar) == p), ...)) / h[p] } dx } lav_func_jacobian_simple <- function(func, x, h = sqrt(.Machine$double.eps), ...) { f0 <- func(x, ...) nres <- length(f0) nvar <- length(x) # determine 'h' per element of x h <- pmax(h, abs(h * x)) # get exact h, per x tmp <- x + h h <- (tmp - x) # simple 'forward' method dx <- matrix(as.numeric(NA), nres, nvar) for (p in seq_len(nvar)) { dx[, p] <- (func(x + h * (seq.int(nvar) == p), ...) - func(x, ...)) / h[p] } dx } # this is based on the Ridout (2009) paper, and the code snippet for 'h4' lav_func_hessian_complex <- function(func, x, h = .Machine$double.eps, ...) { f0 <- try(func(x * (0 + 1i), ...), silent = TRUE) if (!is.complex(f0)) { lav_msg_stop(gettext( "function does not return a complex value")) # eg abs() } if (inherits(f0, "try-error")) { lav_msg_stop(gettext( "function does not support non-numeric (complex) argument")) } if (length(f0) != 1L) { lav_msg_stop(gettext( "function is not scalar and returns more than one element")) } nvar <- length(x) # determine 'h' per element of x # delta1 <- pmax(h^(1/3), abs(h^(1/3)*x)) # delta2 <- pmax(h^(1/5), abs(h^(1/5)*x)) delta1 <- h^(1 / 3) delta2 <- h^(1 / 5) H <- matrix(as.numeric(NA), nvar, nvar) for (i in seq_len(nvar)) { for (j in 1:i) { if (i == j) { delta <- delta2 } else { delta <- delta1 } H[i, j] <- H[j, i] <- Im(func(x + delta * 1i * (seq.int(nvar) == i) * x + delta * (seq.int(nvar) == j) * x, ...) - func(x + delta * 1i * (seq.int(nvar) == i) * x - delta * (seq.int(nvar) == j) * x, ...)) / (2 * delta * delta * x[i] * x[j]) } } H } lav_deriv_cov2corB <- function(COV = NULL) { nvar <- nrow(COV) dS.inv <- 1 / diag(COV) R <- cov2cor(COV) A <- -R %x% (0.5 * diag(dS.inv)) B <- (0.5 * diag(dS.inv)) %x% -R DD <- diag(lav_matrix_vec(diag(nvar))) A2 <- A %*% DD B2 <- B %*% DD out <- A2 + B2 + diag(lav_matrix_vec(tcrossprod(sqrt(dS.inv)))) D <- lav_matrix_duplication(nvar) out.vech <- 0.5 * (t(D) %*% out %*% D) out.vech } # quick and dirty (FIXME!!!) way to get # surely there must be a more elegant way? # see lav_deriv_cov2corB, if no num.idx... # dCor/dCov lav_deriv_cov2cor <- function(COV = NULL, num.idx = NULL) { # dCor/dvar1 = - cov / (2*var1 * sqrt(var1) * sqrt(var2)) # dCor/dvar2 = - cov / (2*var2 * sqrt(var1) * sqrt(var2)) # dCor/dcov = 1/(sqrt(var1) * sqrt(var2)) # diagonal: diag(lav_matrix_vech(tcrossprod(1/delta))) nvar <- ncol(COV) pstar <- nvar * (nvar + 1) / 2 delta <- sqrt(diag(COV)) if (length(num.idx) > 0L) { delta[num.idx] <- 1.0 } A <- COV * -1 / (2 * delta * delta * tcrossprod(delta)) if (length(num.idx) > 0L) { A[num.idx, ] <- 0 A[cbind(num.idx, num.idx)] <- 1 } A2 <- diag(nvar) %x% t(A) OUT <- diag(pstar) diag(OUT) <- lav_matrix_vech(tcrossprod(1 / delta)) var.idx <- lav_matrix_diagh_idx(nvar) DUP <- lav_matrix_duplication(nvar) OUT[, var.idx] <- t(DUP) %*% A2[, lav_matrix_diag_idx(nvar)] if (length(num.idx) > 0L) { var.idx <- var.idx[-num.idx] } OUT[var.idx, var.idx] <- 0 OUT } lav_deriv_cov2cor_numerical <- function(COV, num.idx = integer(0)) { compute.R <- function(x) { S <- lav_matrix_vech_reverse(x) diagS <- diag(S) delta <- 1 / sqrt(diagS) if (length(num.idx) > 0L) { delta[num.idx] <- 1.0 } R <- diag(delta) %*% S %*% diag(delta) # R <- cov2cor(S) R.vec <- lav_matrix_vech(R, diagonal = TRUE) R.vec } x <- lav_matrix_vech(COV, diagonal = TRUE) dx <- lav_func_jacobian_complex(func = compute.R, x = x) dx } lavaan/data/0000755000176200001440000000000013301004377012423 5ustar liggesuserslavaan/data/Demo.growth.rda0000644000176200001440000007453712104004704015322 0ustar liggesusers‹<›w<Õïÿÿ½9öÞŠ†Bƒ$×3TR"©¤"£Œ„”ÊHòF[VQföJê:FF!{ï½ç±×ïüþø~þxÛëvn×ëu»®çz<î¼ ´Œ”˜˜ 5š™Š@MC¹¥¥¦üPQ.&ÊÅ¢eiçpÀÊÉᡳ5@ÃMùëÿ/g§¬E‘꯰ÜÅ;XŒs÷1jÔä´Å¦qŠu•Ù]P×'_åÜ#ðÆb¦4s…ÚÊ0ßI9dK¢Ríþ£}í‰öÆOãKþïѶV¼7¿¿8‰õ”gƒçÜ<îw¥ÕŒ÷”Iżt£±“_ÕÞàÖÚ„Ÿº$Ư÷;Ü9´ÖoÜÌû£Ñ:ßµJà<¯%Nnq7Gw0¿ˆªª2ÆÜè0Æ¿àÅÄMY¹Úû@ó–ÝØC¼œDu}kklè** ¡~ÅÝ,€–[<ïså[£´¡’ëêhj!?™-ªµØ: 1Çú½W1*ϸyÆhº@ÿ>Ûs>üéZû-µU`ÀqB3hƒU£ÐJב“ø¤x,Ð|¥,s×< Zúª8©—äŒæ}"v¿ÙD³«ÿJ#ÿàÊèQòJl*ZyÛþýèž¿uë0Ã. Ýúy ½6OGZsŠPC¶Ú?þåÞD %ÇÒ…y€ðߨ—é:´ÜØÁµ+“ˆë®ìµ~g€bëkV}h1‹örçÇ>sÙ«‹LAhé­õËý·_ ÖËV4'…gpý_O©7ø¢¥%™ÀAœ@]aöàùž´¼<¨ÿ´=š XTÒµ¡Å´é׉)yháãSÚݯð¯ìß(W4Stv«íå\—Zâ»qÓÛ Ãû'€Æ¶b¹:¢Í«JÌÇów-Ƕâì^´’ùtߟžÓ¨ï¦‚_¡ù{\÷§óª b®ŽPªlt_EíÛ‡šÞf"¼ÕÜX"êg³Ò4JÕ€Ê$8D²Æ ù­Ð¹Nyãª>Ó’{ 9h‘¤(jØæƒ–5Bî„xá±È‡‹{³»pUaaŸôv*ž‘RèŒ&Ñœ&¾g€6SíÕ¿¢µÕ#šoöû õ¥‚<¦â¸ÏíªÙ‡6o4F´ÓívB5é?®iIáa–´/DVxùŸYŠD`É­™‘n'ÿ{h`z ¨DvùŠë AB£QM0 èp›Ö“™V4¿Ozûåû—èwðÖ2ÛÎyy有ÚÔø/¤L­¹·þ*¹—ÍhÆ /j¬ŒñôÁKÜ_%NãÕÐ(Â%Ù4rúì!Ö9høïÑ«Q]³{(Õ#Vä_ùRî °7êø=À¾omõÿR‹ŸËES:²êÄ€e´]Žû¬­IÔ'UÕ}zó±JùÍ´d|ƒI‹ h³n¸tïF³•– <\Àñ½?™>X:€†½ázk«eÓ³ÐäÝ—?¸8ȸý@ñ3e&#ÊüÙØ³x­|nä¥ÛׂÈ)ÓËC¨ƒ¡K"kw4®M9üôRw$žìLžoJ&¢ò™©S•ôxñÕÐÏÛ¹û€!;§¸³â+š_Õ¬áÙƒÖÄÞüˆÀ5_XüŠy¤è~u• Z,{[æÛoƒÚßå7×= @c#= Ìh}CÜfîÈqL¾ÜE\¼æ‹ÖÓ‡esâ½ðœÇQšzÝd-JÌøä¦‹[õØÛ«Í Ѐ”GGœó:‰Ó¦mÕûáÜ\ó^îÖ€9Ð=é¾|Êò!šŸüm ´ß{õh?!ô£_^í·5@ñçK/‰k[_º’önôyVìJF¹aL¡Ìnø7“ £d’n(þÛrˆŒjj“ÿýª> Tw”kݧ0Yýfƒ‡c&~¶·A&8Mê…YÍü>†[6禔upß¡,¯¶@§&PdùÒ-|¤å~§kŠšÈ¤I˜<‡–ƒdÕ¼j³/UˆD)e5ÏŽ½ÜǃY»üU;ƒÐúÍùŽ–ŸÝ@p?ËÈí´,[»FŠ?•ò.³³ŒñØÈ†a ÓyÕ*‹g¯nZÈŒµÉ¬®Emy¤%®<´t®øÙŒÁ+<Ÿ3è‘õq7°ªe<ôcÄãGöXŒŒ;AÍÇâù¹Ãˆü#d{ƒ$‚'½º«F_Æ-òsú óhcáQ®ZéÞÕá7wõ·RÉ/¾1Á+OšHSæ+}õ†¡ÚQ\êRÃ0@ËcáhU¼“Ä`‘¢v5® ÏÐÑ@Œ>j½=õâûQœ÷öçvºXôûƒuQ¦“Ú\±Û|—Äò¨Z£ˆìŠšhE,G¯ãïÜ”ŽgH ñö÷¼Z8û:ëU_ àÉöLߣˆ¶mî_v{쬒Æ:g­²ñŠðµád@cùyw gѺÆOúW6¥@s£r¥4„7œÃ,ÜÚÑš°žÊàõ<7º¦;°pmßMWp£B}ÒºÄCiþÀpæ]Š®«d|¾O>*Õé›RŸç‘úyÑ›€;µÞ=Ì™ðFëñ»²ô¦QûüùJaÚ@c‘ú¨\!úôXxÚu hé¿sç®yà­öÊz¿8´Ò¼'ÍíËIÜx›Mxôåù•}§náþ¼§•Ú¯qå'÷f%#´2òzÒŽõê^¹×§­E¾ìL\,Å}„+ú7pŸCÁò#Ðæ.d×RÅã:+GeZ´jwWëycZªýhe)òõZˆsööÁ“µIQÒ C¸9z=mk<“”²¾ù/Z Õôßòk`yT¹ø6¢ ˜$´ÆÓgIÔú+¯É3£5ç¦ çè¢Øà"¿¿:P÷)ªä„°¢Ïžùâ¼}$ÚbÂ0Õâ…M‚[þ$žH9¸b–„cäž$虮ݢ\††½C~ŽEÛŠ'«ÉájˆL}v~Bøš`³÷ pߣÚ]uÙ­=¦ v?€¶÷y¾< ‡‚î›~8-ûõß=–°À‹7øŸ|»XC"]zfñ€cÇãj äÝ¿äÞ‡–K_IõÏx¿‡Ÿ¼ñœÝ“2ݸTàú¶Ø¾TÄ,×ÿorá’÷k)®"¨Fÿ‹îàMW4ñmɘ½yQ<’Ðÿªåÿ8¨¤?üéAâŒPŽ˜ó"còݒ䬀4ŸŸÁc…á´Æjxv€Öá3SÚá¾ìÂKöÆdY:®—ËhÛ½˜N‰¬†–~üè[tsÅ 2iUIb$z7¾c•ÜÀ5ìÞœùåÐ1؉çñC%‰Š25”øW–þ-ÕíÖ­3áúYÀ‘köý¶Â¥¡/¸7ìûð©ic/“®iQYt1Gm‰ñ½vÿâñôçõ(ÍÓÏð"WŽÙMÚ´–R-3ý×i)r N?Cc‡ï}‹Qv¯´c?[P÷U¦7âÙÏ€ý9OªýÔ%-œõZò=paSÌÓÇCÕKl"¾Ñ²]p€:ç^~sZU¼•ìÈ©…Wv¾ü*TÚÒòo_¼D˶O"Ý<Íðæäl„ÙëT`›8ÑôчÄ(6&$^ØŸf$¿e¸ƒÖK¾o¸±g§ã´9Ÿ˜Pý=ÄYMƒW¾ßôÑj.MZ/ µÙ°o+õATöæe#‹Oð‰þá*‹Í¡FþÞt‰ èÆ¸5ú˜¡‘Q?«¨Œ@4ÏžUæ*tá.Q‡<"€»íÌ8¡ÕMj~1E{éÆ'Ò4ͨ¡ý`ïX¸‰JgŽ”AI(Ý~…õ¡ÏvÀ²8'Hú«†/ÆíTh@+í iñÚ²*Rè7¾ôqôú=t"x¾CpÆdé5ÒëFÓ‰hË\^ÉU2¸<:+x}ØÄXü!ßó'»ž'Yƒ÷+z ¸ƒV+V.íw{‹§˜þ\eÖ …ÈšÓD þ0~}ßs^<¹{H‘Ž/m,ûŤ]âÚraênB7pxÞ:¹“ \­ç-&÷9¡¹Fû¶Mq  )DõU¼hMå8{ª±Ðèr]ÙK£‰,´©×Ñ€pH¬íù ójßÒŸ»V«,v7o!’wGè °¥|8`òCm3öä¸îàúŠSD{7 þÃ÷®µ¢75d¥%à%Ç¡/®$½êPI ÎZešµsVâhzñÏ`¼˜T®2ëж¥s·n{à ?Îû”]făe+4¾åæh^+$áÑ´‰ [¶°Eƒ{­hÎ9ÌTÃiÕøí9”•e53Y+Œôé »¦´A÷ž\FZ%º÷âCXŒ6-¬ž¼ûÉÏ"ÿÝñA]x Ž|Ä可of'ÝýЧõ›6Ý,Σï}o˨ãôèg¶ûÈ×j`(#×ãBDFª.¿œ¥ÐŒ×'‚äÜ4¬ñX¡ËØØœ5?Ú¡åÍ"ƒ¥CÀªù=§ÿV‰z[N!e8äÿL Oì‚ß›eÓo=@Ì=SÞO‚r¶Á–Jð317 ÊÂ9ÏMRÝ-ÜNùÔôgƒÜ_µ1‡uNH¾\+ê+”FWzw£fªÐÛþ¹¬Àx,8ÚÇ\˜.;Þy¼¾ÖÍ­Ù‡·RQ×îIK7j 4ÔïΙ,•€Æo«OìŠdFCÿÈ{W5Ù)}c^>=ò8ö±õÄRwò·2Ãp2^! Oix›£M—p{-¶F`Þ-ÙËk ƒº¸Ü*#›ûÐJÍà [ï~J¼fãþ¾¿ŒfœëYžì¶÷÷Ͱ/¡ê )ídTHÄŠüÃ2´àPs‰ŽÁ8ʼRŽ$qÛ%‡é·á~'sÎß73€uÙ0Ùû¡†§5kÚ‘*¨ÿÛ=7îžàËÍxÝô èR|ž›²ïzû­+Qö¸}.Ì‘–VÏ<¨71”úÚƒ!åÝ€þ\dùeŽVñò½*4c·ýt¿âÅ3!âCÀ"[á^Ÿ°ÅÜ2Ç­Ö©{ïÈá‚M•r°nìýasô0ª0ªçëñ ÁA’ìÔ%Ù‹Üwg]&2îs4¯ƒ]“Å4 jø‡€öšñŸ4h&ð{p:-$'ØzÚÈào[¢•AÄ>Ôüê”"¦¢^x×z¤ƒ.-Í<Û®0cmÁ+ˆŒ›õYð¡±csç3€š®*®b+EsE]{{ðOÍ׺¡I¡hÐäšà{=_`¸è•gŒ{-å©X½ã/y– ytsv=¯èŒ®<ù< DÃɼ+)Àf1Ñ›#¿´Ñw”¸ƒïUÿHÚý'¨þßyî.À=Ã3ß<&2gÚ‚]ÑZ0ÝEóX4ß^Yc@ÉËrç®·-w:€M¨â‰§Ý´zŽjqæÛ$Z óý;{ØÊ|.—>rÆZëQaÛdâÐþ˜ÎáÄʃ¿>8úëˆWxq¬4ÐZ{¹IFé¡9ׄff$´•(öBñŸ°>yöðÝ`ô0~|µè8^ ¸(”êòÕɸ´ (5aä¹E_hþigkÇqâ5—wEÏ(sÄ ýžý£×ÀŒ?î ¢ ¥õoÑêªÙ»ösiJÀyÕëÖÁóB@còkŽÕ–­š¹ôlÙ–a¬V¹áÈ:îÈÏ ÖT¢oQ§ož ^¯÷‘iÅkÔ÷ìèÊ)ý!\ß/½¨Oý]D‚ZZl=ö,CôÐj;Üæ"ÕîÎ’oSx] Uiúèa`t]{k9z-\ß7[F4ÀÃJÆi 8¶¼þÈ€D…õ©¯‘‰ =4Ïöuþª·Ò¢ô¯ÍŽä¶0gŸ«?õD`p‘ÿqG˜/‰Œü€–_ß”¤ÌÅ€×~q ¸Yîçðe6—»œ:°;'VûîIDÅ …ßæå€ñå‰u!ç´¨Ø7=|4x#†kXš(}7ðL¬Ž¢Æ ˆÖn@wþ"ñ7N?UëÆ7$f§@ŽYÕËÀaEÕ$Kׇ‹”„öì¡øŸÑPØo;©Ü¸4®Š?þl÷»sˆÜ–áôë­·€¶¹Ï¥¡ Í·´9§~:ŒWgÒbJq Zâ°£jlÎÂ!¬ïöàåïûO/•SøÐ—á«/ZtÓeäóßœ*ÛÞ×Úñº®ºx»j^ÜŽéÈMˆD 4‡÷U[ M¼õ¿°_h-+òb¨ÇqàúÇ–_“„çÖ«r“'€æ•‚Nú×d ¿ss]ƒXüò=.%Á‘¨ä€ÛÈ×!¼XïJ]ç‹×ûŸsÆ¿çóxÈ@3É&ÙzÁ›DÐüüä…WÀ¹[Öêu¥8Z+vQ×H|ˆÖK]Z60E§8¿rJK#QòA÷i4ö¥x§ü„6<2q¥á>‘ú”±m ºT›¡s%Õ4‡ì= ¨ÿÍ—ùk_Aøïˉ¥)k´’nz$ªC¨mOWևùÔ›_{G¿ƒàç1´é:ªmGåïX6ï¡ðâ³Ü{ž÷ñÜ6óërÙ >>ÿ^Í• ¨hÏ ø„цÜ·¥ø ?Oè»0< ´Í¾U?S«ˆvÕStý\õ]ZQ ¿íÞs¬yØd4†„¤€^˰HQÄhŸ oé5öâüqE @ûå̱ϮSxóçý‹¼Î”÷¬ôì~ ¼GZýêäqKEì`ŒOýóUÅ€ðèø»_ hJ¶OæéÃ$B’§ ùæG<ÁCëÁ/+´]:’ZºÐÚg3·ŸÝu¸™ËçBq7*\ 0b.¶¸«ÛÛÙÀõV »2Bí$Ìyô IŽ»Q§y4ˆ{Ôk®*³£UΩOþüM¨Ÿ~wOõ圑Aƒ[l˜<*ô—5]…ïäŽÞr;ÿX'˜T*T5þ‰hmìØ§¹|}`DÂ)<‡ñÂ;æù½º‰@ó4¦•nÁXÜ}Ö%/½îöé÷!hC½„û„š¥h ~\½ ÔŠBÅ-ÊJÿãúƒÎ±¾¶$"Y¼†u ­.5Þð7ð&aöW© )x•Ýò‡ OÜØâ[£¥ìƒé²Ñ³ã-xÑRäí)‡J`áNe¨Ó–B;œÊ§n¿b%Ñ ûKG5¼'±{à¶m[ â¶Z8õ 8ó_ºrŠ£uÂYÓôßÀ[ÍOü¥7Ä¢ñµrëàάêI1êüÕÚû^ ÖÁFÑRí>T ?ÝÖ?è TÕC&bQbhYȺªcŒ„lŠ/!é£uîýQƒ[[hÜÑ:R€¬…ÚŠÌdê÷¸üõ•V?´HÔ‡8gàœù9õþ¼>ZJžð0yú øwy»§Ö¢å!ëïí@c“Ùp˜q èŸd5wmœA;‚ ‡žª4PòOÃèÈßtÚ{út¤ºÐújæ1î÷’¸/œÁC*q xã"!1Ʊ²B€Kº'Œzó4ZùømÔªø Y®1÷êƒË¸uüt ^ü¸ë1úÄ|µÞ§ŒñW‚¡àÞßÔÞ{ï« $Ü;š›\þnrÎ_¶Œ¦*=Þ\ rúžIÓSŽþœ2“—-‹Fã¡Æ¿ƒi@Uû½> è5$/˰Gâf¹÷7•êÐÖ§ý£´fEx@vÙÛ)± ¸+¶¨ŽÌÏ÷<1ÜãŸ_³;XÙ%¿º äíhîž9f`ûë$µÎ(€&G>ôÿ*¢ÔñÒÛŽ½B‘¦‚\:—Nå-Gó6/¯*ýK`|á?«š|¾æÇ¨Ù·ƒ Èëøâ&ê—ÎUýoßö=+3<‹;ü˜ä>HwvòYáöŠï 25wþˆ³c±{ÑÀ„–¬öû:à–Y=ý„= h‰¶>|¿ œbœö$¡i&ßÒòÔßÊ”¸ÕB–0\tóp=2Žxâ–3ièq¿öUPOâ·> ¬ŠGqæÅL`™´óHßAÀUò‹ú¡ˆ=pÖó×ÊXèn¯meE3· UÀ± ÏîãüÚ€FŒÍxOÉÄçm›+ ó?Ðyf@ oæ[²Ž:\çÈ¢’^ÿx°Ø¾ ö9ë~#Tƒ(AÕÞPl ~'füû l?§gß‘ªýÈÉgvÃih³Ìwôø¯z<âýÁع4–DOß I~jl¯«O±D U_ì³ïÐÚ襼µKþà\ú4`Ëþ·ÿï^4¥²öÝÕù5u<‹;¦£Ñ6¶+0J%NÊ#IyàäI¹ŸäiŠz‹Íî{€ª½¹³¢ßçñð¹ÍQêæÓ¯œx_e”1mïrÜ÷+{¼W4ÈžäŒâ3¾d’uOÈØ÷ZÂð“§ÝöÝÀÆú°;áØnÒ‰¿›,)‡ÆßY›úŠg£ÓÀ‹l ô79„K§1Od.à’Œ@]#·KÖþ-Ð~îç}3SÌG®ýBbB³*¡†§âC€®ØcýÝîYqØ7×½%LÇ7#ÏJÁÈn­üënxiïP—´I<0±¨z`@â:«Ø.Ù# ž¬²Ä|­uÏóg?sÑ+;õЍC×€p1×-s¾Qø –~>§éxwìÁì*…§é…~Õf*¡2q;)q@ŒžUL6KçÎßA–þš¡°+PSùt_»Ç /$^š–üÔžM9ª 1PšÈ)4§³€+þýª#Ÿk,Nêà9u¼¶}ÙÌG[h®ÿæv "œâÍ¿y—RoÝòºÿÚyÞ®Íñú×á r¸¥ôjæYV ÿa6L”ãß³!ûGFÛ.gẓʗ¹(>wË9ÇÀ´Äï9G}}³ýØÑŸq·!sdÞé ûQ]c~-£}u/h+e÷H|܆O-SŸºo_ÉÊä PEÄþx÷%U5j‹ý ×V×¨áÍýgÊïÕjwóÌ×; ¢”oäóâÚ‰NÈL|Ø\ê!Lö½ÀËFøòßkÕP½Õç ŠžMŒºÜ¢³ª†‚€Cc´…ÇKxuU%÷.Gý©ÅÑoMq ÜYYñB©xOðŽÝ-¯$1cÜEˆºŽæ­õ5€ ±Ö«äK"pôç¬ g ÊÔvBË_ÕØêŸD0=µÓ¶îlïÑ”è˜á‡'Æ›»|Іs˜ sQ Zç ïþ²øØCŸ^.9 ,&Æo ’—ï"Ý« «Ãå¯^jg›¥—®÷þT¡ ãò/b@»ûÃØ‹~%DŽIx'û!Zy]ӗبœÿöí™\&gÜsÏþr3 W#ž+ÿPt=Z‚¯ªÑâŸD÷G1xKfÿ‹Ti7zÇœ¿¿mÏ™þéú@á÷¡ÚANs ry@— ‚ÂïÖÃ'ýq¾ú#+$œ³“ÓlG(üý~þÕÍÀS@<ÏÄx…"?¥„®p퀈“ëÔlÀ¥«f™Ð9 ‚fÁ¼ÃÒªhó?åAº~\™³\ºè¬-žj” ¼Ö[Èî¨MêTƼ+ më—°7¡Úv«Ý¯™-ß.4J o Œ«ÄÌèãUÒ˜AÔÖ%wƒ/«°H8°$o9òڲ󛌇¼ Ô¶È»»•˜[J–^:Ä/3¦›Ý8î˹ÆúÞ@ë´Gp%Ž OvIÐÇÒã¹Ì?‡š C€aדÂ'Ù%ÀpWȯãúi`ž3 Z:éIÑ'Ÿ”W<‡Â®”Ÿ@MKTÓ}À‚šŽù…Ùï(\øHÂöú#4'0˜úðx È\Œ|Câý2áAñçA–#Ê·å;Zw\–þñШ>Ì©b¡ð$WTŽkÈ(ÐÇŸ¿Õ~ÊAÈÁÓ‘’· þ&Øýÿ¹DÊPÎq±a»ñ•)0\‘DÅfÀ{ð݇-À"pÁñq= ½_ÒÙÞ™ôAbZ>Ÿ*Ò6¢·˜HÝ‚®×÷ýêÑDà[ÏPQ”uDóNîøzî.ù1e´ÇM·¦M<3}V<_zß8eài> 4üä<Dž)[Ì7%1h</AËæÇß+z‚è툎¾ÇS–#˜˭™o5>5sÂÕ¿É ·(¾­êTÑF”pQÏ©ï°&ѱþºß¨ÄlÇ«-<Ñdlðç&U2°ÎnH™Ÿ;†'|žÝ»Üe•8vÔäÔ6“¶…§”XáUîhÔ¬:péÅIæÆáN6 éûoTPGÿ¶¥oIpÞÀ= Ÿ#@Jø|t¸Ãy4²xõÜåÂ@<\ËLE‰É-‡Ý“èžÑ–Çd›ü¹U–{DÛIôŽ)r¢)AkºŸ2ïFüÕŸ‰–ö²‡=?€î0Oåd‹p;Ž ³š:gñoŽ!ÀÔùi´6ßö·müë_DãÞ³“Iw÷¢ñO%³»q»ÑFVXF,H”¹k*Ú`øìQ/ƒæe<Öò’î¡íÑ“}÷B€U½åµP";l2Ôœ5BW¬º,íÖ??ܼ¶ŠFlúc”"{…ÖJCέ=x|õdb*Žûx>Ä;Øo»Ö¸»Qö#8QO3ņ¦, ‚2–ÔÀM_ÌM¢Þ_e»¤U_úÝѨ¬§ŽÞ-ûìÌ/§öÅf‹ã1ÍÎEÀ²ßäû¹a<èãÿŽ3oÎzþò<ÆÃ¯D:ÏjØÇ>'ÎÞñß½Æï’M8(>©ÿÓ„ÌõríãëϵK=М~ }ßÇ—Àí/Óò¢XÂm’.±—RêÜñ½DÇê÷||Ï(Dg+Å®-R|¤ts®CZ a_½L”©›‡žnÐ|Zë±(ס}@'§0p-´ èbŽ^?ûØ9îÚ yY`²®„—?7-çõ3Ê—1PWá è°×Fô‹]”:c0ùÛ@ïÁa%‡mш_FÀ+ àú÷™±<ˆ‡:5óžlsOéÞb·&`2zTÚÐkÀb¸¬ÿwèð÷Í-~œ«b)íc‡ãßXMàÓëi«µÿI¢šæÚmWÐ 4}œgCô¾s™xººèóWDÎeQú ~ö†]Úd–Vht0ÂÓ–ý>·´ ÷Â+ð‰’Ÿ8û2ˆþÛ'uxk$ìê v¨ü)þqà°’¿°œ:¶WîŒp¼g´!]âÅ®¦«ðŠ®òýékºª¾é>ð× ¹¬ð\€ﭑzàø–;3º&<>úžýö^›3¥Ÿö@E—ð¶°iæJÔÑì ÿhÅh Þ²³y¥,yÃÂôoÑo±‡-ž ?w»šìªeÕ¶˜ýA–·+4¥€x².šAÖ;L~àh2N¹Li=IO”ð^Ýds¼”žß=ÇÜÊ¢oé˜(üÑûã±1˜UmK$6û.†`’70þsØ[£òXnlÜÛ'‰»šª6p #мâí ª®ÍÆÒîU´ª—Õ®­]Ž+5ŒB½Ý¨®ÕøJá&æ—WÅm'ã¸ß¾@ö¯ÀÔX/&ö¸õïùÈ>R‰ýûû«±'Ï?ÜA\wÁ83΀ÄXñhõì0ü Óuæ§½#;ŸABlýÐcëT fS»\§ 2<¦W5íÜPs`T@Ȉ6Z]âúrCý5ôdÚÞxA™­¡« Àu+vê¼4èåm1Ü ú²°¦ )šiÓ€Ièã®ý©î/î+=f»W?/í14F -3Ä®Kh¤xÊó§6ÍåØ\IàÑÙö¶jC¹c ˸§ÐŒIDªØËkÿˆ¾ÃÉÛ¹ªÇÑ…sÝæ=à¸ëÉ>àó-ŠÉîJÃk§­k_y•ƒ$oz’Hëw`Ó#»‰\Å;‹DA¹<´Y"2Ù±'„«ç¶tL—H¢&lO$w¾¢ÕÏTn1”ƒÀ£bå¡+À®5KÍZ€}öK>ã\3HK5˜ÝŒ ðxCx×éš@yÊØļíTm~ÏÉ¥Âså¶+ª °UB¬žmjå´‹ÒtÁÀTú&,[$—ƒd•ÞÈ£™Ìt…JÏ×”øy¾(ÔxÃõÛÞQ8½D¾Í3—´³ö#ö Œ \”iujˆü¦¶tVsq©(ï±+À}µ®­c $Þˆ¿Òùbûê\Ÿ™‘Øâg‚{Õ>ÆÑÕŽÀà®Ïê´§ÊhîJŠM×)žñÑ. d;¨UÞ9 rç`Tœ» ~§óÖˆˇœ-F@|³ëÃÓ°ÿÐÒö—;z—û@0µï枺) >^²9ÒŽÖºîºo3ç o3kÌÍ”þ xlþc -…m8SÑøõ‰‹Êí@}LÞ4õÌg´•tœI›~Ž×;¯Ý-¥†›¯×6Aä’z1°r}SÈÇæc©­°¾>‰>¦¶FG9Ä9 æ9¿wíßXIv ‡ ‡£7ú;hÂׄ©D×yÏ”+ E›+ÔôÏó€÷èñ©H^wàºU®­ùU XÜ„„½RN€äOós •ýÄj3§ø|ÃØ–¼4AD˜³Òð-g×ãè¯Ëxö{ÌÕŸ{œA¬–Šñ …'D?e©èÄi£Y‰’×2}@ÿ`§í÷Á2 ‘]‘ú—KчTÃ4ãœ:<&wöŠwÑ3<—ñºR5I Ä’–t5S|w°?Kp„¼òä¥ð¿’\â³×þSåwþ“n#áÛÞÊ^@´e”ÓÍú„†/<¸ƒSAÐÿ‹B‘µ;‰êóP´ó0‰õùÎ;ù©^ผø{1Ð?üÖ.kîô‰˜tÁmø†šøº°‰ñ˜ÂƒSý)À5sUJøu>ŸÎ¤ø>Aõ¬JÍÞ,/2-‚=óX)=øHí·O¼ëò޶Ò~êÅ%tßÝg°ÛÜPwª ¶kå5»îGƒ ^¸ÍËS 5,LU›?àÑ<‹fþ,*?IœbÄÝÉx+ *9þCük8éª5ƨ»ði³Ç–šQÀò‹ãó¶;.'üãæJÖáá 5ØqjÑ1 ¨sî´XÙQF©d©J Smµ ੺×ô%óÈIüü£Dˆ9›oP™ÏGÑö:•¶}{Wä(‰ýÐãÃY-’@ËÔ„)ïuð}Ui­‚[W"}ŠÐbÖ‘‚<ßx´æ+_ÚÁ‚²´5?íÀ»7¹^$¶òrqâ2?…ÖÒ<ôõÖK\4uîa…ÈïéŸ0¹¯…úE⿱WaÞmŠ5àaz›/¿„‡ÉÀrˆ.ÔgÅDŠŸ¬ÎâÖ±[™‘Úcè]úÚ”DÍ™ŸŒLìÙLz;é+/ª"½±é=^l}*4{È´«fOœÇM. AõU*5è¬y«@b¬²±å¸ŽoX=tÊtXÊv©.Sê[k÷úÅÊ3@¿1¸&~óHf>÷?z&¯«ƒ¼ï¿¡4õ­Ý=Ài›R8å4 ôî'oéßµ‡QÏÀèG´¨àNS3€L ?íê]y`Û­²Êîo4,ÔEõÖAÀlÈÉq³ÄúކŸ• Îsz¿‹€jä}¿æ7º×Rȶ4zÕ×®[°ý6Ìó—8DCIå"ò,{Cõ¨Õ8¸Ç'¦¶«’)|WYe)ÓˆVþsØ-u øwý3Š)åXßÀ¼`>øðEˆp­Ç_aÖ“¾cò˜ñœÑ}‹»{AôûÉ•#÷ð¸è„ä× Üßv·6©l¤ž[vž;û’ÏŽzbÇ­W|ݶҳc.F¹èý¿¸\<ŸùL°ÛCèöhÍ¥¤uHç×"ó'À·õúmÙ9I ^’. SAkA²ºµJ õáø-猓 2°K[ö?°èØ÷‡êTב—øÛÀI+Bȧjž›ÊŠãŠÑàŽžÕ÷»ë|wL“pËö¤bžkšéW½ 9¨…ÇËÆΘÝî0ŽºgR=7æü¤7py}rÓÝû§<ù¤ÕA:‡óûŽhH|‘YepkÞŠ¬£èÛ¶zB?kˆÄÝÉØËQziSœÌUþCðBì^–Ìr¼1½ONïdšv2+–rê>ÝQ®¿%xþ®Qi_K î³/\¹&‡zM¥¥†£QŸ@ÉAóK´qD¢ðÌþ?xÁÒóiÚåa<Ô+þ>·/ßÊÕ¶H}ŽÈ‘Ûa6ë')B;›EDC·û#z«¸ðpì½ø7_Ôû8øÝ^žT4_ìñúˆžKÎõCÖÂÁ4……x†4õšãV^zvï³£š/^½¹"ú Ut<>~_ /öhÉðG¼Â V‡„>3£5=ZËW‚›¨uUà]jŸ¨§¯wBS½ÉíŠ"7Q ù*£‘ÁêÍ,‘&<YP`㥉ËÛÞ½¨æ+?uœÊ%¢OiÇýt±x£p¯‰EF=žKjmíöDï-N&öeàEÇûi=¬‡Ñ¶Áº K<$'ÎD­ƒªù–“|úî¡… ¦(ßÞ‹´íѸ0óY¿±x=Cvÿ$¹­Ùm<¥F ¢-oäñÔÅ[?Žl£y½É¥Ï”ø[Ÿsb¶ðƒ§·ï7}ÕÁŸÇ,N¥Q8z²ñªp×Ü'âF~v“œ«èîÚâ[ß;&ž4ã­ 5ÒÎ:ÛöÌ<ÄðwJ»\÷WÙDs'ߊó˜Eâ¥úÑsy5èç鋾§Zaï”ëìŒp“MÖŸÒN@xcÀª9Ì‚WO… òö¨^ÀÍí¯%r㢧 ÑBÒK3«åbÔ÷½ÇþÝÅ5œ8A¬ÕÀkÿŒp^Aµ#×^EÆ£ƒt&ó´6è•bø–Ív5Ð0~DZ’‰Æ¾iÇÉ^KAÏ>¿^jpCS g^½«¾ŠÉIÌ=Ú*/ñpôɤD~<ú-e›­¦MPiœ„Iu\µÅ\™û¯ºšÆ0ÏP£$ã34¸îàÍ1I£‡òQNo†C“ÏA<$϶>ÈO¢cPcØ(0ÄÓ]Ô‡/ÕíÁ³ÌÂ??~^@ ÚêûÅæÅÑŠ¥”˜¬¦^ñ9™­òÔõ‰J}QæG]c䘽Ðï‡áû™XQÿÌ ‰QYML®=£Ü;vUs8è‘p¹b«Oý©D4â¾ñòÜK„vŽ?¢J7AÁê~gB¹x†…¯rÒT M|øD`Y&¢ÆåþÝŽOðì¾jmaO[Ü&ã!k^šƒ{q<½]¾3&%ýÄ95Ì(ñ«m¨úiL~`w^ôå\“yâY(j¶ÛÒˆ¥ï@85Áß–yów¦”ú“¥ÞãÆ]û s¨PW2\=IÉ׳ñÚJÝõNù›êáu—3¦7/ âåÚö+N×HÔÜfÌí­x`ņ§³Ñ_—§Ë?bñlV{Õ;Æ1<:SY|GÊ ŒÖCJwñÄ#Žß÷26Ð߃'Æár¨Âõ>ªüwfz{Ñ-„GÿéIåB)‰þð}ÁG¡¸¢·t¬åS/×Ô“Ñ\ºbîa<§™uvƵï¬Z<É9Ž Döæ<@…=1!˜å^ ,ûårÁ Ïeùž’¶ýˆžw¬–jsª a®&ñò*z<ÿæÆ¥›íx¹TÎ_ÿZ$ž«ÿ PÔ®æ ×LïÒãß]ÎÔŸR÷ãÍ aêKóëh¨úxû—Û—ño·¡–Ñ}¨Õ÷ûÎdK&Z˜’‰JDÝq2ÛFì¸û€ü÷9WU4g\m@D™ù³Ònà9åæd¦xŒ0°/ÑÏKü¶¶ÂËÍq4߯?Ç daÑŻƨzë:ím¼xmNbÂ<Ó(É+Í׆FÓξâù…Šòæ™SQC©ÆËØég¸ÒðZ}V‰ZbêeÉÍg¸³¬Ktͪ OÉD‡q+-ázGv»h¸€úSDû«¾B<÷…‰ýÎR^¢K>«±Šæ¤ÛhèZð”XÇpyYÿ9‡Ú ÊÀ ¨‰á†Ér 2šüÏ,L3ñ3‰æ;]ßÍ N¨wj`¶@süŽֿK¥wÍå¹Ðòãë]]"4¸WîîI5M¾3IÍ›}‚¹‰ÞkpoóÓZ›´v1€õþI1L–L*d‘¡ÃƒC‹«ÇÓq¹òàwÅeLv3ø9Bƒ&·Õ–Õ­µñ°q˜Å¯è<|ˆÖê˜9Î z!#uî#^þ”il©‡g?o_ðÄËî§—Ùö>FéïîÒüý…Òjº™üq!jN­Ì”ü‡Ð2õK1!ƒTl$þßJZúMzÌ÷®O¿šÉwL#QÿwÑÝw¸ õï%úõUØ¢%û€%¦«L¨Éºq)jF­P¹n—©P£^NáBѰ4ÜðÉòÓvªv=>p¿h-ÿ#¿¯ Ó Rô6D?#ó8Z/HV“¼…{û·ý˜rnãf†]ߎiöâV7N6•u*•²ͺZªa¦šaõ¶F«ùe1)D<÷HõÜ·l!4Ѫð_¹Ë;}0¬È—;2ýžÑzŒžëVŒ´ÜC£H²ïL×wþ& õô¡Ô‡ßæÒÈôh²•îo–&?ê‰.üT†b´ôæÎ*áEEéÓÌxâÓŽŸUîKõÌs7õB3„ÿ¢&¼㩳©ñ®$Z¼^[Åqéþ4­eõìîÏvœu[öìiу¸ÏO‡GEâš‘ð~’êD¢ •˜b0âA‰—›ÎR|Fß­ äÖ¶w*ÿQ%P½ÇžÅg ÐÔsÕƒ}×9Û?«[ƒ‡e²Òk ~â†é«ÝÅO,ñtšÃÅéW6xA‡s'’õZ—Þî9ØÛЧD©>6ëÀí/ÖvýwÏ×YVtáÆ€t]b_6šò~[Ìã©'ËfEÎáh⛫N±Ž6ÞP¹ò}èO"I.hÞlãY›>«ó»\ѲÛýŽ@´ù÷ësç_h(CF†¬Râ „ ÔÅ^¸ó›+µÝ~mÜ8W1´Pá‹kWÏY6:,¡`˜}ù\1Ÿ9qQ/>ž&XhæãÅ-Ö†èÓmˆœ:RWS…z+7¤½‡®0Ëä0"²âó$ûë·PgIärV&ž+Øý'RŽÏ< ¾>¨áÁfÿB ´2ÈoЬ‚›ÿ‰µ«të#ßÇòˆÿ¯«7¦êþÿBŠM¦ ¥$¢ì[dŠH¡’Y•)$Ò )I„BI†DæŒÛký>ïßZŸ?ÎYûìµ_kíuÎëµ÷ã¹^g­¤GzuxžU…輇OJnXgºQgj›€4Ôxo´º6eÝhQ€~{¦£”U°šZ}ñþñH¤‰vÝŸ°…¦Âà—îažIìßKÐÁW^ì̇^3·*n*½ïì3²å€q'±ÝRœHÚ² QÛÀ5­£µVvBXuW*ooè7ÞÝ|”u›ÉÔuÂù1´¥¾Á°tW‘ð3ÏÂ9IÞ¨T}fåRU]šÚYÁ HÉ[ã΄aô9?1Žóó °8ôZßÄŒï|ŠõåÞSa¢°evÈ÷·%öªî±V¶†·Îy•!¨nÔ~¬±“ZWÆÖ·ØÿªÁáú˜«­X_“r{ùffº6¸&%3Ã(O—yZfÐ\}ø“[ïeµhŽô`3PÂÝ# pä¶¿¾žœ#ÎïNQšƒ©ŸF!çªòa¡/ÔzˤtM8ìV2ÁúÝòÓOއÆí¯<¡[ð&¼…Á²RùOµp|ûÁG­·m°Ð|Wâs®÷HÝþDÚ½nCg3ßmk:g†&åÕf`ð^Å¡6B Ë.e'Xå ÕMC²Ó3ÇLß™èôÃðÓ UœÅÃø”)­:uÍgµ›^ @‘!}¨Ò‡vüwÜuÞ„@'yåe¬¨4ÌÔº ÆQS©Ï VØð´¨æAçÌbÜëQ•㜠ñýŸcò;µÓu 0ºÍP*!Þ†¨L®Ë ]TÙ”íäu ›2ÙÚ)ù$ÿ¿ÊßÃÜÉí’cº9X]eÁ3X†} q~W³a¹#ÈzªZ"Rƒ¶¼è€™“CÑŒÛÔaæ#<¾±ÈbOìõË^ÀÔê-´Ù@ TœþdžľX뜄åÃç #RëÝ«<@oÈ$iì P-ÝQ9B »õ`si§;™Ïú:=ð‚òã㻾å3_“Í6àYE¢×g—Œ£0`,ézb¨—'îö¾ÊËÅ6Þ 'î3¥*HÒ`ŽùÛÝ+iHZ·Q¢O3†EÁ‹‡nq⽎®z¦XWRñ®Jš”Oé9œÙŸ /eßÍKm°©û³ª²ß;¶ù!ʺjü~Ɔ|?‰ÄŸ§„ºnaK¯¤vŒ@!¶5NЍ]ÃéáØ‰\˜ÖI¬¹œ• º:;ösÀ|œy…oÏ^ÒÙòÈÒ üÄ.näpäáIá®ø |é)­°|òëÖMLàŒ›I#”3™ì;›7tï’»kxvn:b£^{æD^,•1…aaÒ[}À.èü0ªÈög ¨¼¯Ï&²ÞaÙ÷ZY1¨¿ %éMK²'sœ¯¡áÕîÇé/ êµ×ÅÁØGó—öâÜØ-õSÿVä¥oY´ý1ƒs–ez3‚±0¼  TYÝŽÄtÕ5 39¤ùn½áøð .å§±í×ç‚Õ ßë§E¿Áò.j»ÌEQpöÈ| ?íýîä…òë—°Ç„EëýÕ Oœ€äöøí¾ Ý(!J¶€ñqµÈ îXœçß|WÂØ© N[ܰdÍŽÖ™ksawwýü]‰'ŸÑ<ã‰KòÛŽÄqáÒùÝÔÃgXô…>IÚò²¶Kìÿ¥‰û{¥ %óÚBJ]5Ì]zqyèúYX&ìg }s—Àx³x¡Xê>Œ‰õxm¿öÚ½k9Ùï2ÃÈ·”Þ´Hu>YÁ%Q3œ3ÿ˜ONAYSæã^Q5œa îîÖ1…1ÇüûF`ôý÷1ʃР©·b±²æŽÈŒ?Zuú.†ûÉú`fïˆSШö`êó+ s8ч#‚ë_iHEê&èäFÊîf VãAÎUyX±›;d÷š1Ÿa¡µh§zÌ9šŸÔ7!%J-¸ä£Ì-rt¾ŽœÅò=×ý-Ê8¨v`"¶§÷ïžHqÎ(?±sÛvcÿç~Yº¬«™ô’½½‡O^+Ï”„¶·».1‡&—Zq¼¨5ßùˆV{N)i±ýí …_8qcð…Г|ú„.™o[a„ {Äø œpâû;.«Õ=c..ê 3é~I{•Gô}ºÍÁÏ'•ü–?ĶžrÁv„ÍÜÏ·»ùx`уbºÆ” ¤è¿œ`3ÅÙÜWid=nœés,ÐÃÆø[d1£}=‹ËYñsÝÇvç[(ù{÷ñ‘DwÌ|ÿ‚]Êö ,´~–¿~¡(ê´F•†ðûª«ê3ÛJ ùrKG„E;‚I§å 7–Î:û—N¿O¥&eQ°9²¡H2gé5ŒNE¥äÇnÁ¡©¸Äã7°pY^ëêëu¨”½'ÇDZ‹f…›©€ºß¢0¶:f9¦#O6ÊÀ˜äϊׯ’ ÄGê¼ f‹éD¿v¬§Té:[™¦í`z¹ [e„w¬ùåÁ´ÂäÞÚ"ôG?eúä õŠЇ? %iiÅÌ'Z5½R³ܰøªhÜÚ¨f~kº¥/=—Ïü…ÇøCmCO¼¿I8x VÂ{³™} Ìc‹ïK8{zm“Åçë«8ãÏQvÀO™EM–Ôq~€çJЕ½=ÝØ*©/<3;CìÔƒƒ1Øpp2`Ra‡y­¹Ò3:1O?Uù{ç 8}s¼,‘#Y›´‹ô³a^Ýœ¥”¶ˆ½—®ít¢¿£DþÓj ²è„(KCÅÙ=f…ÿ°7l/ïïP{ Ï$v‘*Ʃ̥«#ãœÀçt§„ï›R›±ìBÁþöŠiœ©åì\Q€™KÒ¶É üvà 9ƆM9Ãgtœ Û~™ž<ì”æµ3bß«{}{„ïî?ÿí‡|1|]Ý÷÷@>Ý6 ·jˆ«ë_5¸4ý“öÀ_ f…cmßÕ/n^—÷•1º^¯,$˜Ç{kÁrû'ùŸp({ÅLÕ¾Q¡üáë k{ahÇ©f6! Þ6dÖïOÂz{:3£7º°ªòçПÉÐJoúû[~Ôw›oÿ¾î ëG>ßfÌ„–Žôb&1GQû¤Ó½x¤žTsp$„áúËÐý:@j?ã°¸SO‹V5j\ÀÙ€éíÛåa`XˆþWÌ$vŒ78o}|‡†šh9w]±íAO^ÏÐChýÇ6‘g$ó¯më»ôaTê£9x7–¿ìiJ8’O—Ê½çÆ².*]¬ ÌÀ‘{Îen©‡ t>;Sc?ÀÈaâ$¹í9á“‹d¸àSì`Ј•cw_?HèY&Ðé°|àêÿ‹M®Ø´„‚Tóý°Èy¹ŸÃ;zc (kŠΧ¯èat?‚ëG˜*üGÑàqô}Ú¬6]› BÒnöxêÏ ®‘<]Õ+›¹¬ “Ñ©¢žØŽåŸŽ3,›Àp`’ÕPQ 0.8œ ÌÁ*‘O¦)ÚK8ÿàÅ ÓÄF¸©j« ]r¯ºcÊÎá ["‹²Ï'lëaq¡³Äv£èŸg ±ÛjïÄ rüèíØ‡¹¿‡fÞ­KÂÒ$Û1}Kœxè¼Î æÆ¡´†¬ˆuÜ)¯, U…§øç^¨b‹}££Fä9$…~ c̲f’f/EÁüb´K(©[µÝl¾°„‹º~v¥¸ÁKç*?<‚gï®±”„Òð{Bâ!¥ÀI$½©|íwinÍÞ ü+ØeÀ°\?’Ïv£šS–]ºÑ¥˜ÓXo²íî¥wû€¶þlAK‘Ë´«Œ=È+0e´ãPŽÞDµ)õ Þâ[~œêˆ-iŽnû•‡­lÜ»ò[ûqd÷)eöElî+oôþ“µá=Yr›žRùù—;±»þÐÑÅð .ý¨‹7ö€¶õjÝíqÖ…ðøÝ`ù}Kºï³“­¹3sPX\Go|ª‹r/EÀıø¡iè'Å|ø; Äåc6¼ü)@vÜ·ÛÞô R~9 °j¶½5³}‡3½‰•P}(ŒÓôù' Sïð·tò ¹í(¥J ûÔ©a¿caÕ%üÛ…¤5'¿Í’‰T#3)gœ¼œ£e˜ •š ˜MÄ”¹X–À»×qŒ­’é0»Îî0¦§ÿèƒÃ¹¿wŠt`ajoUR ’"Ž~/ÃJUÊëNÍÛøÇjš/lþ δYtÔº&à€óa¡<&" n']ŽØÝíVz]'ú`ÎÎm–TʼncÑDKÕ\e·sºÓpÇë^ù>ÓÃ%ÓÌ&£Ö£—ަÒåhìºÒ!Ròñ Rþ„X®p…þONÏ:- õ-ˆîØR-/“sU¿Bsß{‘F½uœŒ9óxƒÉ»ßÝÏØ õ‡îiê¶Uâ ‘=aÏGÝh[ÔRº„Ã?K•«±»´æjÌT0çä)úb$‘¾»)ß1³Þ˼0ƒ–®—G'ÚH0oèl~vU;.®÷·½…eÆaJɆŽÈm Þ©Ê ƒ‹}BÉÍ#PßØÐØÏ§‰MúÏÛ>¯¬BáïÛ*8Õ¼©¥œY §»’+àlMÞ[níTúýÊ 'ï Ãò .m5aÙ¬£ûi©‡ Ò¤ß|[q¬"r†×gó>ªÕ<Ê„³k\cC¬ðø}Qí¨Ì:kɽ*Ááã]tÚµo ]_Ä–Mâ`<ÈÝý³§,õ¾ÌZ|| †Lbó‹±Háú´?qƒ—'¿JŸ9†å?>hr¯}Ċʺè>Þù:öáàÖ¯èӽ§ýæ"TžhŠ4xmÇlcßp¼‡Šcì §…°¯gÐù½n".oaÍù+—­óι33ôØ® ²½oÝp°$ºðFÇ¥Þe õÆÙ`ÉíOïAÿìñîÜQ]ø‰½­[±OAøŽ„ø^lÿ˜%V7S„¤S¬¢‡šoãD†wyÁµVó;N%.l|ó#yñL$N/'K‘sÀˆÏ•ð+ùw°ýŒýùgáýóÅ›]üðG§Ø±K:)X˜.<{dþu¼­™tÆÑïn7w\ͧÓ(¿p¸ü ´s¿Òþ±£úʆ“÷ÏBå„\[æ@jôTŒ'9ಒÌïÃÉ· ¾“Mã].Œ&^»'ƃ56Ú’ÏG €ìþûêøö2X3\¹q¾™„ƒWÏ¥$@»ìQQoN]˜’nêî*Å‚[%m¼ÿa³+FsŒÁ8©*Íq#!×Úº{¬U2pÊ,1Hv‰yC'xµ‰¹¦éÒ?' eBâþÙ4d¼…ƒ¬YÿælÒaæ…CHd" GÇü7›ÖÔ#åV9S¶ñÏß¹º½ëwIáÇ„'®–Ü30î„…û\ ¾Ë:zÜÎØ1‰Œïˆ)çÀÊõüc€¹ן›wEC.·k,¸æ=EŸcÿ¸ ¶^3gÁ‘}ÏÛ×ö+Âê ZŒ¯E:NÂæÎ³[¶cû~‡ÅgvÐeþ‰N„ýÞÛô9H¦ƒBîž7a¤Â“qçV^hÄ6Ž.5+ègW¯QóÆžê.OÖ³]øÁNÔª¾ÈwêtªA>χ¯iO`L[r~É⌔åt^¼‰ãÌö‘Ë1))3ÃöBå²·Ò¦wp×öjQÏQv»ÉW_¡Cô]ÚÁ‰{8™a?õêH v~zü$âm/ÒXßòÅ]WÁ^¥ÏM\?±?íHzGÄsèÞoŸ"b«œÏ0çqq§@$Ô>¥ÍHÀ¼âßo97˜^jÕ^yÝt[wœß±Á}‹ý*j—¢°tçE&‘¬zXµ­Ùdî6 kj„¹Þ¡PÀ½kò°ƒ’sé+ϳüÉghÒï\|>ëwߨÞ<iN?èèµ–RöîZ·o•PPsÇÈ,ú¶Ï'\Q?œ'BF0wåò½¼A',¦IömßÁ€s¿7ý~ÙÁ€3ò[¿”®°ÂÊíxñ€é&˜ßl³ïÓ*RÃ-r …8Øzè#åý.‹_Œ»Û™O·v¾vםӨrÛæÕ•’ì¦Ó m^M„j®Ì-ÅïÜ±ïÆ–ö<ý8Äc÷µ[±‰iÿ1Þe˜?e·þàñtÌzwEÔáM]®Xm)ÐÌ¥ úq›¸Ç©aS]ÀIö¥‰Xœ(z¾óB÷.ýòæ¹–WHª¬¸"“Ô5å4Aͦv\R8—ñpjô÷”¿?ÉòÊ$ÃïÛ…VµTûÛ`›øÛm4ìtp®fŽOS}Œ•lÛdû±›U¼äm?b¿ùs¿ÎßÅeƒ¤OÂdë+[1wª"þ¾…:ŽV:&ùì†ù"ö™Â8©U}ömŽ÷ôÿ3Ëÿ•â¶óî\XÌËQt*¨ Ff·o5â…‘c·_Fë…"-²}ËÞ*‘ šäÉëÁÉ5ÕJòŽ˜<7§-ÅAãüî̳€t¯­©¾¬ (6M.Ë&Uh7mdñÂ2+[¡úê'°ŒëííyòÐuľ€ê…¤WA¯ØbÕ °Z³˜Õ»7ÛÂ"‘KVïÛ¦³tìRåz’‚]J\—e,±‡GÁÄõ°&öãiÆSé›°gû{™é* hs¾Å¿ØäÏ7$%‡u±VJõœÓ)lt¹µ)æ» PYÞx䨱¨†¬ðÂhoS_û;¨A©x9ÛÓ†̘õS•ÒÆ••g¶W7¸¢òs÷aï>·ïN¦Úg+ «—œô|ÄöG«³m;Nû~Lœ½ õÖ¥+…;!ûÀ;½Y‰"œrº²û©= ¾PÔ\xñ–M‡4“Ý«X¸Ö¥ù“GB½Þû/býÃo­ÓfÂ’’7ÛȈ=é|\K × mÊ6…÷ÞCIø™†& ®Ô´Ð³ÖBÏíÜ'}š¬Øùå¡ÐÐ_,JjµŠøÊ€åÖo™ò`«¨â+1©.(ŽwøluAÉ3¿~½’{Mÿž­ ýÝö[}‰Öïqffö€]œ2¤4Ý´&¨a»Kò…³òÐì;7¸€Dáßr'ízqpniº\¥ÉŸO‰¨Áyágûo³_€!®+:áý/aæ4“ktg Ûrh/̶ãhìЃ‹sÛpL’Íú•=Ž–ü>Àʾ&BŸq>ñ«€‰n×rÎLË=ì—ZÔ%Áß9ŽîÈÞàùý–ñ¾8ÿþ£ZäþÌ_îÖlTÍÁn½Ë‹ÎaPx <îÖãw0¤|ð›&)QBeÿ<+gïÐE´7`×ýÃ/º ®Чz@à2k)®Çç‚=q¤7ˆú{+Îÿ–%?¥Â\×ÙëAݸxÃÛuQét<™ ¿üU]oe¯Ã"åľɪKÖ*U&uÐ-äÕZʾÁ7mŸ|èy-Ó’aä! ÏÜR%O™ôr»`káßÊî˜ÑÏ è5x¥^©oƒ³€²LƒÙiþ£ùôʳ^F{±6TËIáþøG¿mMu|/tVoapž9‡¸° w:_JÅȧ"ÇäÓk‘Þ³újAݾ;‚Ou }"µòVÀyðS0³ÄźMuÆ?Áp鮤}½á8ø\ß›é¤ ‡˜¥îd‡Y§³û¢b2pe&ÎLÂÏf|ÜxÓË{°ïkó¿=g̰Á>2øë v¤Ž=V¢gÅT‰Õó5ÍCR~ÍàØE¶nô÷5¥D‡J7\œ õìÍ‚A®ir2ƒÑ·×Šå<¿C#Qæ}Çœ:­ ¬Ë²$GªFƪ¶$¼ºÉay®&ca5‚Hú˜”¹à‹W©ÞBM¹Ðð\59#´æ-¶÷^¸ å÷XH ý+ïÀ\óPf.©žŒ;Ý®Œ3ZvX÷õ¸iŠØe\ô}äõý°|Øõ\ʦÜûüSŸ]•€¹d殣УH2I °ùS_oÖM¨Ðxp;áÇglZù“²u òž–ú~J^î²Iò ü%w0¢_Χ¯áyÞÕ'Œd™Œ¬< 9˜3â^½RwS›·è8¤ùÕ½«zPžÈg7Óô GŸV½r0’ÔŽvWpîFì=E|z™ù€ž÷Æ:êÝyv)b‹æÂ˯'ÐÅf¶SÂý3.5ú˜œÉã†ÎÛýóZ@¬rK+ÛrÛÜÜ—nó¸ÂÜ1ñZÁë§±ÎÊó‚}7 °“;ÐÝÓ?;ØjÏÀ?)NÙ¯'p˜9J^¿Ãõ[´¤F/òË¢ÊõBF\ôígY)X¼=…åÝåTXçñÑç0ä‚4+6-Årm hî ‘º©Ã9FÇØ¹°vOýtzËLùJ|áõ5ÁÑo|÷Æ–ßã@ÀS­½üyØ4ïÕ'ßì_?Ј£®wœŸ íä©Kê×r‘²¢¤U'ô;eÚÇ«àðÁG¹}ó8jMá˜~î}Òá¤3‘ØpˆaÜ»Ç^ÅÕ#ñJ<‚úz&툵hí=aÿo&Î7Úqì>.°'yõU°BGªQöVšTøç.ÁŸ€£ŠØJªSrûдF‚™h é‰Ô+v]üÚ—i\ó²,Ÿ~%.¶@fÎkÔÓ°UZ¾yjõ(ŽzÐsØ×Ç`óy‹¦k&"0ã‘%þudÌ©‰µ_ù¥¤¸!ß°éz(²S’[Ãe«“íò’0ŸïÑ´§L‹Ón‘V²†¡ÛåOæ? t|ŸkoÆ!-¦¼ÚÆsP6vè\ñ¶,(ñ‘Ô%AÏÇRŽ˜³[°èËÑOµ³0P¬úÞw`S‘Q~ó~u6Úç”]y/EV™päN&oǾçH³°ö;ÎÈ #^F֪܈ӡ=ŧÖòpdºÖ(ÙÐrÖ¼,18Æ6ùü½Ãh†I< ¶\ob nÙÏ–ï8£w'å7£/TÝZÿþc¶‰™rZ´ E˜}!¦ó±7—ŒçB¾®*ÂëÊ{‘K¨n2TAêÞöÂU‡@:®«ýüòGè|œ'Žw€5#ÁtçìZ}7`‡lqß÷ῲ‡¢þSÛe#„Pƒ`¤¾ý¢ˆ!tªYº¤\ÇÕ,ë/á~Wp¼7}!·©ëtuµ`Ñó ígx¡±ñ_´øºí4Ñ>sH߃ ¬óM°úèýõ7¹n¸àôÐ`*¢ FV’¹cÀú©–Ño.JÐx6IÓåöGV»KäLƒ±És…¿<ÆÆ¦­§¨…ƒmÙoЇÅÅ m ›†Š”k‹£!‹rë-¯ P/™º¯nù ‹²Ù\4I  É~Y9Äg‰M›ƒ^ذOâªÐvq¾ qœ:NIyî¡ uÓyù'¶Ñ=à×fìm¿sŸ] žd0ôÏ󰌵äÛ ˜',µ‘NÐèá…Ðûøª€lç"Æ»ÇÙèýµG6wí ¡_Û6˜´°ÙôZ÷ öz¿xÏÒìŽË1CTÇ;VôR&yÿo츨}ãEùøÛ¾ëöã[€"¡À­Ã™þ—ùíü™§T+«ö?ÂúÒŸ {¦aÞÛþṵnÏ®âJÀnÑ`-éÅnl°âµ©Ñ€z™½µ‡l+`n‹ÛÒ|ôMhþä)’to ÆwôÕ3zâPIÕŸ\9$Ö•™‰ØüD}ž2ÓiêSñ“›4¿Ï ý…ÎzV™ïJËSÍs8üL>$Á²—ÀðýÓ%ÿ,]˜Õ¨Oî¾c§6õѹ‘±üèg¬›y£ýÅú›edäqò¨Lu韜âáÅ2ù’…OJ1•«âò¾Š­°òg‡£ð›­¼'—=yÓ’Cqów.bÌÞ‡©a»íÐV N+á~ 3N7n=s‹Ò…¡N ‹d Ï„þ=òú®Õ‰%| Çœë’Ûh7Ó‘ìòågÍ6X…~}í0IhˆwɀܑúŸ ø×M‰6·À†%i¿¬¾;Ó#y«¥Áà ÍjØó>»©¸›¯ÄkÔhS£ƒv“Æž<œSÖ|¯­»Kbô7Û|8„ók_„«?dáPc½ÒH²;¾®CÄ:¤NÃt”÷ .nÞy·?. W:þÜ>Ü…ñ{öïÕÀ%¯2ú¯Hí¬'¹³Úâ”aÏ¥—ùÒP]|/¬ÿ%tp¶ï–Î)wó•EÕP}[Hfÿá5è8QD_uç.g*ª›l‚!{ÃýÓ:wÆ.¥F ¶¼¤»«¢Hõá|lêåù›¹Ù¶tµ¢[²‘bŽ©<Î9\ù,Ï4ƒ·l>,Å@ ºÌ4ü/ Zs–¥\”ú€,çü%“í5Žqj¬8•6Ãौ¢wÿpŒK2ù¡$¾PÚ–WdÃ5Ï._tƒWn¶&MŒÐïnþ¨eR &|gbøyí°9-sþªK´/^j>IT8DÉi'¹c… òÿºA¡M7 ›Ï]²ßmå).M•–0oÌ pOßæ èmW3ípö¨nÃÚÀUHÒÚç/¥* $ÔR:ôBj3Ó~n>¤ ½ÇÚáèVW¦;ÏÁ¨è3QïJS¿÷Ó5’êÿ<‡©Bæu?þ$gŽdåì£b†®£a“„-Ú–r Çîð‚~‰ñ·xlL[TùÅC #Ås¤Êâ„Ä^ÓüŽ/0lsÔßÑ,n_kõäk¬^„­šAöÓ=ÍÅ«¬Åè¡áÔûþM¾J0!rœŒQØöT”[ĹI¡,Ǿ¼À¾„v¿÷"aý&4 mÙÖXÔ°^6D÷±\ t«ì¾¾/ãt„¦snó²ÇþXÙž%s |~þq>1Ÿ)QƼûv½Z5%æÀÀ5.ÞVK-¬`K¯déþu'ßß­“ªâ¶KVÁ°¾ƒëµÔZ7ÐØ?kX[àä×ÄW'vnôAöj%çæ´›Ðç}:‚¤*Í{šÕà×ÝïÓf!ƒãÀÉÀGÉH=ùÛǶ|'îŸä‘àÞÐaÌñ?^lÁò[zìy‚pÔïÓÚzqhÛÑâA&»/_§¢GqžÑŽ”~j£þñÓÄ­7úܯ¯%?9¯B—S@€øÉ¯¸±du¢5 (©ÇK¿ÑeàçÂcIucø¬~­ˆñ Ž|‘-ož7…•ìÔV¡S_¡®Ùÿ£~FD0…òí,‡Ò+]Ç‹Û ÄL­÷{9#Ì[«^~…eí|R½‘ÐûWÜÆd—ü Iº¤¾e&Í«æz{ya¾±éc³Öq¾ÿ—çǽ6¬Jßý­öNRµ¯9—ïƒaøWÙ• º¢Z –¼0}KEÚ*W–¥Þ—òGC î•_áPûà !%W”\F7ádrž‰µŽÖ_X5_ 7Äï*jãbÛaôGᘴsþ–#l•—Cý±÷@Ï\®˜ûûN¸ún¼¢ËÞÜT‘þùU}œ<¢ÑèEXÇ™`þìß½°e!è,9HŠfö|ò»ý:“_¼çnyI¯ ÅG‘4íó¯UPÓݰ4þáY Tt QöÄù|£ MÓ8¶}ö^P vl“+ßu¹…Àø}ׯB™r}êQºƒsÇ€¨þŠ4rú¯%̽w€mM¢ý¢ø÷Üq“ÈÅ@˜ÿaz¿Êâ(ÐÊíÛ<ôÚ2üw7ßÂÎ0›ÁÙîg0 ˜Rå7´ ´Ã‚¼tî~PUl£æfí–Ú{Ü®ùiؘ®Ç”§Åí>™Úþ….×£!¶ OpðåÛtÛØeŒ=œ­.up>²xa¸ô ¬´Z´H¤Àœüñí†Å³XÈåyÒ=þ PþÚÿë$%ÂÜ‚Áž©«Ð$ûx‘rÈgƒŒSÓÖ™ u¢'L±x&­ß+6ì[Àúî7ãK>¡Ð¢Ü`•³#¾öŒÃw?Ghûü¯]&କÖÊîRâ‚:e«œ‰3ó/zgËÎ ?¸Ã âääŸ_Ï›Ä}ÆÛ ”Z€ªð>E¦–e$Í—¸U5s+¯~ÅÂh%O¢ôq¬{¨Ï½x;ŸýëÆÌ6ìO¶Ñáý– u;¶*6+ïªD7“Ê­ûPô× :¿î–Ý«5ö¾ DK¦wÂ_Ü‘âZf¿Õð4,ô)K8­}…Öµò¢´z.$ºZ*H´áâß¾£Â#®8ûèím»2&-Ç3Cp²Cï(k½:ÒNýþZ Ý]®èM`Vüæ'á˜OˆI’c§e>SŸLPX8æÿùæÆ_zÇ܆\ì E*ÿÅ*=¾#Ø1ø«Í§‚ã/••ˆ¢Á˜ú'ãu¼Ë¤8Äø«¤8a§ü¦o«]waâUõòKðG’iv„ÅRâµÚIyk oÒ\v×4c_ÚÃYI³0¦×^°ÏúÛ]Å÷øBÃ7;ëD)S¨!^‘×2Ä™põ¯ÓžPÿªÛ$–Ö-bq¼w5°‰ÿ —ÿiƒ|zRÛì?\ <ÂÝýç’*9zG`š½ªDŽá ~¸äòw² þ«ðï–µw‚IÆeþçŸo¤^åÀøE¾^oåþÚ¢Õþ}‚9¹ué使rqK³Õ訫äì)§‹þh8õ.ŽU>«½–pg–&ÜõpˆÞe÷–KyP3ð•`æ%Š3ù×ìy„BÏ’^‘º NHZH¯†}Å:ÉÜÀ½K±v~ˆC1ü´n޽†#wÕ'ú¿â ø1ß± *!eÍlÍPŠÆnO¡w–ðkùã×¶:è³¢qÇ]ßÐoûW㞦!‰«û³•Ñ9losì¸f׋m¤½ ŽþƸX•0žvCf» Ž Íaê—œ¼]0´ó‹Ÿ Ý¼ Í9’QçÉ[a¢lÂ^ÿ(üñ”9Á"'dÓ’~ÿßÚ‚m_5,ïÛ³,UL½¸´Í•È‹ ‘2õ:ŸA¿øà;¿_ƒÆÞ±|0ôøéxaîŠ ŒERÇõŽ{³¹úŸV÷¡È¹T$‡&Ïó”Â('Þmœ4j-÷tÞŽuÚƒ7ªÏ`ÿùéz•IC˜”¾!7çdëÌlt 8Uá±/×N¨wpmÚDØ”(t-`ƒ_'T™F™a~Éž¾>(Ïb­/n¾´Õ3v8äLFg—zfì»XI©¥³ƒžj«­-ß ÐsmþùYÃ.lî(5íðÒÁÁ´aªñ¬ØþÜwJÍV„«ÌµpxÕòøó(€ñ ¹í“cÊØTÂîuˬ ¶FªFÉ⨫ Ëé ˜9A~̰rž³-f¹Ë°C|s¼P8¨Ñ¾ÝïÖ7úÀõtçÍHIñS8B‰ÂÂÿ&žYšèöNN1¬r@sdµÑ#š¶Pº¸½qЛ:-'8d©›~_æ ‘$.ø¾Û€ ž»òËÍã ©¾NªZS¦8'ŸnmÞŠ}þoù¯äa§µüvFgœÔsØk˜yJFl/oòƒRŸk[gYe°Oþæ)Ñ/Th`ÍïéÕÛ õÌ ÏG¹Ï"±‘%àÅ9 ,ê›/ßLùœbOîÂ~uú7›t `ð÷Ë¿~u õÈÓÇÛ­qp`ÄõhÓ Œ>|è3ަ -*^c¿~Áë)6øîmxr«–™ÇvF µ7êê°ÀvlY¿£c½Ó »7k²GåìeB‹E⨠]9g!jä)W®¯¿w"ùÈaŸv÷uì5ýipÚ¾‰À×›ÀÆS´ï7縴`6ïÓ±ëá}Ð^.¾>”„ea>^µÆXtIœo e+‹Ï”®rcù«>æg¯oçÓºË* ÌY—Þye( QH|Çqæ)[è$ßo(¾´èS¼à¸Á{å¿mÁ,Ï匒`^ìÓÞ£›¸ Z, ßfÇ+BƼ cÞ[fÀ7)_œ‡I…”@OèvÜÝñRŒüùW–=pjk#ÛÛÛó°xøiàÝVfèÏc,¼h‰}å»®^¦»áâ;¯|¶UÃ’‡Þ©r„ÁmKʉº±0TÝ“þËý>D¦;|hâ†ÍsE01’Þ¤|‚'ÕQ|E|bB¾‚@_c28–ïŒÃôç„m/ôÁ¸—´ˆƒ>I­Í~Ýæß-ôÅäádØU:—2;ß)áÚ¯zåN˜òñ{¦Ý§½ét—éweÈ„ƒíuR _ÊÝŠŠj KY ´ < 46{¶Q‡] +Õ”±E|¥¼óo>$(¶™&‰pÁÀ ­'÷2àì[N×ç-a&ÖÕŒ¤p]nÚe ¢ßŠÚbùq˜ïʽï5"8.u5–Áº(û¬üDç0m‹uË|ä(4¸ \Ú¿Q·ÝSª© -´´ÛŽö»œ ëÍ–ø![+F3ˆúð†£…McûÜUž`Ø¢Gò(kˆ¢Ø¬)@§ì1†Ü£^@½G©¶Ó»r«{TžÕ ñó³m3ì¬8ÒØøÙƧ…µ”â7ï€N#%~ë¼®Ù’+ñÍ£î.#óãësÝ¿\…H~ðæÂG:6«IX49^û™á Ö9}9qTê$]blÏÂP«˜M+×sümÏ€ëM—5™™¡%=dðð#òúóç0 ¬_ògjÿ# äÇv·@·L¾ðWÿ¨¼Î%ð}’ üšo¹O`?’Õ‡ƒo‘Âlk°þ È[ë¯q=ŽÁ³‰mƒ²::æ'lG£¦`ÀäÄPÐE>cÊûgÔÌG°¼W-ÏÔÉkÆ_Y'sÁ¨Ó@ùKë0v䜕”°ÂºHËß:H”7¸( ÓQ?Ì”Ö4t¬…¾'ŠX±Eªý4•w/ŒöÓŒ¶[»C {½F§@ëÈ3ŠÛCØä;û(~d!'…–ò“+ÿøšýƒAES§Ì3,0›ê½Ÿóæ?È7f‘åVÚYÊ“¨Áá8ç3|/ OÜ7…zɈ›Jèa$a_ºvŽ;t|×?{V/M¹Kƒ¦ ¡iNûËáÁ=˜Ú2Ö5šÝ³ÄÓ:òØ×÷½ôCÄ\Ð8ŸdΆeô|1º¥8Û-Ðf`+D9Aîom(ò¾¸ïs¶G¨…þnLÃŽW‹t 8÷…·d´“ö‰ xäÓE³ßaŒº/Õ b8¶\Ñ~ê2ë^ö0.=ýbí({ô Ìê·4Ôo€4†?>ÊBð3yú1“-J+Ÿ2PÂþ_aÍ#+1)ô’Å÷ÃiðQòØ··Ë H;'T¥”/s/Üc¤OI`mχö(úœ)sy}òáoì¸Îðë£ÌǬÖ.ÿµ†í{’öÛóqÖP†>ýö¶/ØÙ?6£CòÀ®[»ßÀÜLÉÂDW\¸ÿÎÆOiVÜvï6¿TŠ#íaC#\P¤ý²÷±ÁÝmÍW|„ù,†zfµpÂåªëÞuȾërä¢á è(¸²ðóÝ´Ü#›a$·0éÐÎìñ¢í+?ƒÍ¶þïd4(ØÙ¥mñPò÷iv­°×a5WúþIƒ9ÌLžÐS…Ñcß(ï‹ =#¥‹“‘ø¼½ìÅÓ­0õ±ô,[•' êÝ÷ÜI‡=2ÄŠ]4®su¾GÀÄPÔÖgt˰0•ÂÉ.`‹!S>1…µÃ8ý¬D”¸.޹a›î´å˜bïJ°DÏÐ3ÄøŸ%ŒÛ±‡$×µßÉ›=¤Ì)jØ•úª•ÿË_jÖ¤Un1ƒ#Ëûåòå@žHN®SnDÝÏ4Ú¨sY÷aŠ¡üa˜°¤rò_ÜŽó ˆw^€‘¯áœUç åÙ±ÐV~ÀßrÇ㦱Àwå‡cœ³M/›àì©¤Â“Ý Xö.ºåZmû¥›¡{1=c„ƒ¦Iƒ|?çaä{G×µjmÜC#ík€–½þ¥£ì6î;©2»H…Ù7$ar¡ ƒ`Ôã—&ô&*q±™tò]›ÄlÅóÓ['`Vçüª‚ÿNx׸c¦FÀõÁß’‚e`v_öu7%ìPïžJ!ñBÕãÚ±¶ì8ïB²9ª%Q5wøu"`Ãþ2=Eg¹[ƪ@ëçÎÑÎ9‹õe íû[¹q…»(âø©h¤ ÝËþ·¡{øö5½…ÖÉ?ô2§q±šjêz¯ˆR’úçUÁœ"ë¯õ¨ç@ÜnWì@Å©¨ ZpÉs˜±cwÙêÿϙߗ!‚ùb¡î͉ga¬¸’—ä‘86é-aßdî‚}IlˆÍ>t ·|‹ÿ6ŠdïÇk‹ìXÌ-Úö‡§½xïZpboÛëê 6éçâÑ;ò ÒÖ5›ßå”1ÌLpà®,ŒeÌ‘Ä~¡†HÑØ0åÉ]n ¤øBF—uH2~äßþÜ5ŸÁ¬ú^qØOlnHÙÕõ Õ§w ‰Ý*^¼0+רֆ%\|tG½ üÆeÓ)f¤©¨Ø1OÄ9É{wºÛ¯ÙÂú‘5&éÂÇoÜQ€q%E¾\ã<¤öQäÙUf éÊ>–k'àßuÕÉ“Ê_ “ÍêÜÏwÝP~…íDlM<[uú¸¼±Ë8ø‰¹ëí}8 £“q¦#uÐÀú¶5 …bÉÉ&šX2’6Ý'ìÿе[³4äR¡3糯üÅ,ì4Ò½û!ÔúÏõLÚq½‚±§[O:–ÁDŠ7ƒÃ®ƒPà®E¼® £Bî"›ÒqQKÙf…o § 2ç¤`e¯§óÊTÒîX ¿ÞŽD Âfã<=è­÷õè9 ŽÜÕÝç¼p$ôé¢Ã;aHîîêÔbÆ^Wã÷‡òàÿXÚïžW˜µ“ZkSý¯¯Š«¦Ä A¿RØæÊ8» seÍ0Ïs›ËóOÁǪ§rìW+Ö“›BZU0gn5Ï’-K®gÿQ>3{¼è"õ§áßÒ©«ê¤ ý‘br²Ç]KRÊèòdÝEÓSÚ¸Pý”^lXÆïÉ»1huQ8ŠÇ(6·¥°MçM(=b²ÐèzÚÒü3Nt{Õ§q°BÿZˆoÄÿì±b¼Š”›Ä¶&®4Óñg5J‹Fø•$È4aø<SdÓîž?0|nUüå[u¿$ÈpÐ~f¼<á”À>&Þ<ìãsÙÇôÁã?ÌÚd–Ä¿õ$gþ¹¸&e½g#¿Öz ‡ÁؾS^G˜ù€Ö¸©¨<ɨÅÔ´DUø›(·¥íÎEèš~"2X‡ÃÛ¾$—HJàL• Îw./ŒýyšÛ'<iéa×$ Ã6NÇvc{y{ ßä(œÕÔ`¼‘¿Áw×Ê w܆Þmí9E þ¨©L ô`af!Þ¸HgŠ)ØCv;BÂÆžGUJ-pÒëìÀ•37âäcÕ‡±nœ÷¨~C4’^Ë*+Çdcí%Mð=ö9Ë£á+Ît›vœz ÃWÔãšÿzCyžªß•@“Û~áº3´pXŸ8Å‘“O§i–u¡†Æ„Îßõ…ÈÍ+Šçœ Q?×#¬Í›@g/¾:Lû¤Ÿß IHãl1oƒ¹R.K¶ˆDòô6Ú9Õ¤F=")–=rªlêbÔvœI³¹9š mª"ù&Å8Õ ½‰QW GW]£¯¿‚ÎUo Çœêñ­ùʳǮð÷\Ç21¥•p³ @é/?¸w F®ø€J—~’!IfÅÚšŒþÏ‘ëL÷Ímn8lœÿßaëÿãdp”þÏ:öŸ%óŸuü-—ÿâ\þ‹³üÏgùÿûþkyüÿ$ÀjÿÀYò“Øú?!O7nëëk¶ÿ7SË{æÿ›éÿ:Ù¬ÌÍ%­í7Æo<­ýÏõÿH»g~lavaan/data/FacialBurns.rda0000644000176200001440000000134112104004704015275 0ustar liggesusers‹åV]oAvK T,…åk¥ˆXÀ&Šš¦ñ¥/`“¾nê֘К@>ö?ùüú_|ôI¼çÚåž|”äpfîÜ{æÌÝÀøøü zUJÊØ (ä¡eÐW@Y*B|ÿĽxïN>ήçJ™I …qÂ)¡Lp5BEL#GÈv äía¼ƒ›FÌF=×Ô¡[‚VÒ§SE~:Zãr+à’OŸsëðé ® ÔÁrËÐ)¢¾Š±¤}žrXÏúÎPÇ~6|pò¾Þ…–m_ÁÒ­&1ŽC;A00á!й …º0æÛÈ øjâX3±øòu,Fˆàœ¬½çËUX÷û1±òy²|çIb±_÷¥}¥ @B8#s>“¾Þ=$¼Rw÷Ú‚îzO =ÜUw²ûØF|¬ŸÇ'ð¤×P«Ÿ¿¨^ªÕ]ë:9 Âs°Î-ÀC šz­Öó!ãóÒDŸ{¨ã÷†½éØ!ôtn>»gÑ­ß'¡¦­ö} {7Œ­.k˜ż3\Ÿ;` \çÁ;à‚à*8¶Á»àæŠ?1Wßôçû°#ü(QŸ¾Sàš¨ãyÜó0x òù|¬ûLè6…î'ûfÖo¿\Ï}Ɉ|öÇûóyy?ö#uy½t»ä?}}]÷%÷cîƒøÜ÷Ä:û­xðc}]ñó&ÏÁÌqyù\Êu©Ã}d?M‘ßyÒÔMmÈã¹#ê2"¯(ò™Ù×Ëû—û¤Ïôgü×sÃy¶Èg}yžMú›æòì·&Öeœó»B·¸!oÓ{ÀqÖ·E^[äáÜêî·!ðŸÁøw¬ýå ^»WýÙ[þH‡ŒN¼é¥7¿ñ¼+D¬×£ã ÆæèÇá7G“‡Çg§<œxŸå>SwÎûp0úÖ½q[—3² Ò#³ŸZlMßµqK_‹Åâ Ñ/ßqŸŠKÎ lavaan/data/PoliticalDemocracy.rda0000644000176200001440000000545312104004704016663 0ustar liggesusers‹Y \“G$€¹I‚rI»H·(Z,‹Gž€ºE‹TêÖªEXä*ÈŽ‚–r ZÊÏV TTVQ[PkY@¤‚\® TO°bPҨߌëÄoñ·óû}y3óŽyóï›yI–;¯œÉ^Éf0š MÑdªº,MÕ‡†êÑU=|· €ˆo¯ gŸõ!Þ /ïƒ)PqôTGõ,ãy“½ `BQóºgí"Xó$¥ô'”›@¯OÚ±$ø,‚oNQ‰ïólŠêÌzÞÀ‚÷¬íÂó JOHP.%/¥ä„ˆ?‰#9RQ1ÅSò$^_JΔZñ9„}é«8T; âK =!1Px¡y„›ˆš'ãˆâÆa ö<¿DsJQvAAì½ÀÃjT£³³È,·e4rˆšò¸“Eº0U©Ð˜”3¡òEBQÂωÆä>&ÚÂw"{˜¯ÑYÛY–lLçНšˆòc2Å—©ùùâü"ó€Ä Q‘GD~ÐÊÓ䓚9Ï~}þÐê£÷JÍO*Þêû½=>%í£óŒ²Ï%ìÑࢿ©”]K">ˆ ÑyLƒ ŸðƒwÚü"õÐùOÆËŒš§‹?Aÿk‰×Ÿ_¾>öþOe˜Òà(½nÓ`†÷k^W›ÛàO‹;¢OÕûvýÙP-¿H¼- ÿèäèÎMªf<‡I»8î¯êÉG Ÿ,̳º: ¥.HæG*=siãIØ£ããytOèXl$ªžƒæå}Ù_Ý[xK¢¦Ï;}úÀg“縩ù£S9/H!µP×FO '‰³³Qª–#D»VÏ.zÞóüÂ÷2ºÍã)Úxm{ÐÀ=|¿">Q¨ët¢ú‹²‡ý õP±jòª<ÆùÃízº>LB]~Ôþ©¸}‘¶íÎkÑzXžs;<»;r@»"íׄ³Å˜OÖ[$>2û±”¡~˜"=>IÉu¤„>HžÂáJ6lŸÔ£Û‹F¦Nfç—üu~›Í°ª\h˜-H‰:ø¶ ~OŸ¬»Þ°þÂû’½jÛ%ïšú€´3Ñ=ðÿ6´Oäß„ò¼à¨ƒ¶Ó”0kÎyàÚ׺ÜM[ƒ)MCî_Í.ÂEììä:+àíú4uìÿ¥¾¥«GÔü ÏwéºóŽ¥=¦­¿ ²ÿÓ?¦1ѽŠùf_÷Ìܾgá×_tvéî¡ ê5¹)~aNs(~ÍéŒu¾ÅXO7ðй”Ô`0›Û¢­W|†6jßC–¥N½uû¥_I+¾ŽâZï§Çï> É!¹ëÿ`”ïò3yȦ­ÈûÏ$•b³Mëeü'{7.[Âzb"„·Ú-oz¾iýõ¦uè\ö¬¶˜P~½Wê{tŸƒYÂá˜ò}àG¶ÉJû1Îò–ìmËÎéòaêÞž¨n¢«Ÿ9dWÍŽNv‹FÛÐ%Úú‹®~Ãçmg§u÷º|"Çò¾¦îI•»°=ªÞÁ|´oâû3öŸ›qμsï*ôš>ÙªºG²tÏ+¯)ÈøcÕùõmšµi:¹¼un©éÓÝ Ã¯¯«°•ěf¬†“Isøi9]ý¼s‹>¬°.Ñ–2ÎáÞ0jåµ½RËŽC==e`±ú˜»ÜÄ3Ê:¦ˆæìþ GDÞ¡‹3J]@,¹þqz=¹µØ{~— F3Ú?7IÑÎRvN Èb 6[w€°±o6+·ĺ9šÌ^9ˆôÇwo|p ø]öq4ÍAx%)nc·Ö—IÌò ~Ü’!½hæ¢0ê«I)èqÊšP'Q˜w/¹Þì ’_­rŽŽ¹w<2ƪëw¼ø„[œ™ð…÷Mà¥Î=ùw*ð¦‹RÝ*@p¦·<«Í8e>ÚB1O½•}úìf‚¤ï“ÌtàWžêÏ=…í‰y©}åÁÀïoõõмÜÌcòüëÀ[hœÅŒõÆëñ†ÖŒ_ÛÙ¨z/{oþ¸Û7¬´ ?“!Î"-“‹K€¡÷^Ÿÿ*à›{íkOÝÂèKKãì@8Ë d÷jJE#`<ÜÏÜòx.Èž¸¯÷ñ˜²«¼©ó~/£rÝ;.¾ YÛØè·MõÞ6}ÔçîÙZb“eQžÀ_p#Ì6Abó¸Ün/è˜n\uÛöŸò‘rGÃIu_¹ÒB‘2ýÓ¯—u;tIEð½ü¦ZÖÌÆùôïÙóý-þ>åªÖ®ÐÿÍjdèCgÐ ,:æ lAs•ÛÛ[4€Wfe5 è•]ª­ÚÎAÕ¼ºÛVãÀ´ù-³{ ÓI§"ÖÅucñ—¿˱=”ïBÅ‘±¨w“Aœvä~×ÃZ0Œðšñ}Ü8­iZŸ b­Èò¨X;JWÛ?ýÄGýýnkÎ)óÎ/-Æ,iº_Uü&Ç»L¯ýD;ú.2ÝåOþ¥÷¥ëÆ é!¹]Õ%É-è}—ÿaã˜Ë^°¸‰Ó?w»R úa«\lßÁû@ï¹4Ç1¾ûÚ†ØV_ÜÁNoˆi¹8¿X‘ãW}ùÉ ¿u=£dU/o6UÉÜ RÞm»¬µUÀ_¢Hxº{?NU©ålvê®Â í‘À3ˆkýKxp#ï—os x5w[Ûëk€³B´51n?ð-3òÜšãA°§÷ò¶ôàÏO’5 <€¿rõêÐS0Üé‘ÿUòçªø>R\sº|vRQc KuŽùøn{¤ÅçÇŽ”Lñp’xïŠH0º ª’#"AüËÜ7#€¯…íþ°D½M êäcWCÓ¯}|4);“<–ûXÉǼ|Å']ß‘j·» qVÉÿ(‘ „lgf¥N–^+èþcuþ@¥8…¬òOÍYX_÷çð_ߊö­À mG7u!=¬ÏUöäÕçW u€aÜàšº4é#äƒ —kc F?÷@@Q$hH˾éÎ’øåñú×6âuy)ÜR5xÀ=šœ1E*fÙ2뮯@¨q¥a“Á`û.ô(ìa`9ý¥{+«0nHNägªŒ©qéòðe‰øxZN)Í÷¢çc»Èo=½¬ºÆñ­À‹q•Éù!ð>)¿åã“W0ô³äÊú˜‚Þšaл—•ðÑöx]ÃÆ‡‡Æ"ЬóÜOü“¤ìµÞ'\Õá2^üyôlR3Æ÷fâÞ,Ü{÷ìpo6î½{ö¨íEc{ѳWt!_Ø w ž‰Ä«>”JåIÒgï ¯pä3šd¯óŠð²ñU¨ôU£ñgÏŸ¹Ò©Hlavaan/data/Demo.twolevel.RData0000644000176200001440000053344413301004377016101 0ustar liggesusers‹<œw UÿÿÇïµ7I’„„Jƒ„"¯C%$•J¢IV*-©P¤2’DF%IFd½Ž½÷Þ{ïëÚëë÷Ïï{Î=÷÷ûœóz½žÏÇóŸk¨i,ÏaÌA¡P(ŒÌkGƵ¯L k*…‰Â¾væÖ¼zÓn·Ó];›«w®ÚP(Œë×~äZûðP(ì9PzÞªªJ¦ª1/Øý>þÈ™ÜëåI.úÍÏ¡ÙçÔÂ# Iñ®¹$ …ƒòÆåõ_B ίamã¡[Vì ]gætÊãëÔøqvÕÜN`é ÁÌî{ý¢Ï7œÖ<`gë…½~ExXænNGx(*’Ô Ìéü$«Œ…¦ØÍk$«éÇ:…亷j•¥Öý$«=ñ …ª|ó\¿ ×à¬Rbuš*6Y”KÏPIþ]·$²ÁåG½Ã{›&I¶£ƒ¿Ù× ‘lÕ‡Ï6„Ž;·DT'âH¶W…%×uH–%ÁSþje¸|•—áfΔé =‰†î%“Ï$æyÝ»½ðÈ ’ˆ‰í«UÆÄ ­üEÂ|8ñîâÏ÷ qÐÌèöY$´‡d2?YóÐPi9S³ŽõêX¦O`$Òƒsw¸îɳá%Š|õ“†U·ùïroŒÀ*Eÿ%Ÿbwõöc^"l$óÙ£‘^ç®!]÷i9Å §¦rù«%Üp1¹¶Øw,ôÝ-Œ9Ia7f¿ÄíáÓUg¾Àä1såd«VšóßmeKR¤ò϶~…iiiëÍ·¡g„£5j°æ ÷ôCn=îíìã8³µ9ƒ`*ff¹Ä,ç7W-ʃÖÉ &·Ðh_nW%9s¥ß45JŸL"d]^X,»ÜÊñǺ¾Ó™3.-Эqï"pîÁnÙ+Ï¿,³–êW’#×껢éµeú»{äÜÞ­ëOÒTaÒ>ëÈ&g˜‘xT¿ïÈ ,<,tèÜù˜pŒç%O¤AY–Lj¿Zñ©[®Çm Ã˜®èÀÝ×`<"lw÷ߤÿÊŽ99“@0¤šÞÚ;z‰`|ú톩bý9½¢¼‡±nÛ«¥ŒÝiážÛ S /íÿ2ŒmõzÄ â+Á°Ø9}+ç5F¬»ôylªƒdRxS«* Ko%Óê †¬‹,†50I—ðv' î;ïôDs¯A½±qÕ÷{Êï";Ö¿`$XbM?3í{LR ôôVÓQ Ÿž½:V€+®•Ä?RÉ©u¬wvwáÔ“‡ÒÆq¢ÏڙÞ“dÚ¬=öÈ÷ÉPU{lˆÄÅ÷ÖŸ²а¢%À0ÛÓ‚d¹°k{ø[,æ?Ýž2A2Ll ŸÇ©ù])ygŸâxœsZ2vŽ^dìô¯Äé»W_:`ä),Qyn™²~É“ÝpêcõÏúGA8 ¯DÚ6ß…©Nó Û˰ëö`ÜÄ®0ˆw|šwêq¾æ¿Ð%gDÁ¿s‚Ÿ¹CH–}UãA~sðsTEC’#Gª_·<^}¡Ã1<©ØÉšLS¼ÌHròlÝ5ôÕk‡i¼n^ºîoÆß;¾#M÷Χk“ÍÐ¥Ÿ²y7ÿ_œ+–ìö{Ñ„KW%8Ï”e‘œNiCŸ8|®ykN'– Ú¾~fÎËB:ÛÁ¤ä¦›ÐIÙ¥´="{„.[]Úhý.÷•ÓUaÏÁdã½› ðîgwsÕ AÝòa³å¦1Œ°ù'xŽÖ §¿~a8 pø/Ú‡%\Nîsë„`*'Òùç#‰ñê¥.&cS¿™]Êø)¢¬ægw¦¸Æ§Ð-g\m`BØ×„_i+ÁúòTßÂ#Ì룲߼³¦d­þ5Éÿ‹o%ÂXwÜ‚‘×i¦;mp¾ÞSh<ÂÊÙVƒ6'ãDCPôÙN͇¸êºà஬7aJ²ÐP}“[ÖKë½”{O^ÀJŦªf‘ç1R«¢ä/B°n¾íêû|ˆd5¹ýò5c$ïúcùÀTb9‡ÚZûd`´Ä¼é‹̉Ü|èW^õú¥Ôo­õ«ÁÈ\¡ ´†H)¬ûÍ+^â贀ⷉÕÖsаjǹÍqÆ2Õí¸u-pòNŸ[s®.íúè «C^¾×êñØ¡¿¤ø4Ë嬛 XSTD$O”Ãx^`cßÇ| /ž}Ô“€´u¯i’Ÿp𛡢¢£*w¬~Ž„î®‡™U5Qåöž&¿Cë–ý^¼¾$%8RÄC+›`Ðün@)!¨ïHgñáLÀЛkÙ‰× ÷ìàèKÎä/L0'Ž”|¿Û ³æ*áB*«°ê ^yÖ“àìèªUê_€ß|.Vs}â0Ÿó‰#n:TË„mÝÏF0¸«ÊIße€Qò@`žUÕ£ñ´ ‚=‚kPéa"´9ŸÖÊLÚ…í¢µ3³éJ$³¤Ë› ’ŸKdÂ;˜yÀ,˜¥ä‡gQCïµ`†³ŽWU(d).žØôš\7ðóyóz¤åñ”»I ýÃ"åN”´ˆv±ßcÿË_ìËÄ›’qäjK¹lŠËt•Mɯ®áüV £KYí0YùéÏn GW]‡¦zÌÑ—fáM½ }&÷$ êÅ·É¿¬ÂŒËË]áFg°ŒÞäú)úζ%']HCzA/•.&=úrf‡>”aõs¨Çq( DšUæŽf½ãâØÙ9+¹0XÉÞ˜KS|uöK¼ñå·ùŽdÂ6ް·geÐé¡n ©½—ÝI¼Ç•Ï6üì1 $ƒ~Íì¹6Îçôè}²Ñ Yç~f¼PÃy³+¿¿Z$yê¶o)¬Í!Y”ÏQÖ|½øÏ´Â]'¼[¶ñ8“œóÏÕ6òK’êÆ’­N¸Ð˜í8ô“dàî]Zúy ç–·|†±KÒT¿°ß|îâôKó]«}WHÆÜ=—·î…DbNíVK ]Œ;Ã~߆`x¹éy ZWOT{®ƒÕûåe7…`Z•Cåå®/@ûéÿiåü˜é¥ öéì›ð-ýôXØyþ<ÿolùùu[+t-ùÚ…ÕÝr°ãþ[:Nª¿XÝ’Ap©ìÛÏ3ÒUBõ¥Ås#0—ÌÉò[²Ž`lÒø–ê;´Ö«&-†LjßñK‡"a°÷µÛº|eHUÖŒ_ýÊGr¸Ë¿š³RÄ™çÛg¾-hbžQЛÎH¡tUþ8cáÊ“(y$êzwÀœÉ:",ºB‡G9½c8{ß/€£%ào¼UJQ…è ¼Õf0—°çNL*6*D†ýûcKÖ=!ÑŒšPz;íÿQcœItÙÍD ¿’íãç ê1?¹÷!yÐÙö\–Û]o”óîÞRE†®k#S¡ðÄúKË—I‚9ènó˜Ÿ?±Ž²Q°¶]éF¿%V#: »rÃõýÿ9aÎê÷÷‹7Ë ]$äg¯«.d?=!MŠÀPA0ë·jg˜3,ßÿ§ð~\ͽíREÁœíëØ=I‡©/$Öo×Ešªú]Óîèyö‰'äv:Î>{”6SFr„©q2œíÀŠ2æ/§ccqffëîÄCÈ(¡MÒÎCíÙ(õÃ׋±g…0q=Ë„½ÎŸ+>„™W‹ã¦ó€¢Ñ…!χ Ê­ˆ³nv×0± ¤7ú_1nq„ÚM•[=Ò{o¥Ä Ž|ZŽˆz†ˆ/ž }6¼2w*ÁP—”Za$+ŽÝgߦÿ˜›dÞm¯u]ž¤îNP¦¸øÍ›ew[Ì; ¶˜é!)Ï»$àïŸtæõµ®@ëìÊù*…“äbÛ*ÝóŽ'ûÿnÁ¥p ]§ö0tb5x8ÛrM÷nŽzdU”~ø5å-$Ì“lm>ÎX.ýñSMK44øJö°6ò“̆N ³o9!gWæ(,¬LÚ•ôàœÉ]w6~zM?Ž]â¢drâä=վズÐáh„ÿöß‚¬¹÷w´òÃtEpÄò’¢–¨€S~L»ï›ÌgçÞÞ"˜ÝÝ®U»ãËoø7ÕátÐÂĉ§8¦ø*)akeå ´•¡aKî;‘nO\ìVÛ¾ç"'ƒŠ0ZÛÿºÍ—QèžXwÔæì&\0Ûfö»óîŽU:iÿš£"Nì¼x:%ê´v0-ˬ´Á@ä–ŒEazÜöl ƒL\y8ßuÔ‡~=ÓV™o@úˆ0‡Én¬õ¨Yÿ1 :¯Øhrû|m‚=åáØ[7ý¯† e“ãÏ¡½00{•ÃÔ15ÂvÛÎbûϾ¢Ž­-è’2Èu6:%Ÿü­ô ;j$ºFoQpfu_`¯Ãn f7’+¢às7ë’¹úÈwÓ¸8ôŸgž)ý"Žc‹ªƒ)Gðû£(þ<«½ð(³¬Þ.WœÛö;%YÏ«þf¹®pÁdwÿ?ì¦o±“®Ö€Rê¿Õòö0ù„õâªñZz[ÞvÿK+4Ä›_|¥CE:‡Çnúݘ~LHåñ‡Lõ+Jί—a4´ìÿûc3Õ°\/p »øY†c/ÜÇ9Q-é‚·‘fϵìÛ‹CÒQbw“,’äNa[óa났ÍÑXèªÎ;¤Š3.¥o Ý­°µÊÿªª?®ø‰]¿Áz ÿÎM¬ÞU܃C,Š”ïÆØ"ÀNÆï›…nQ§++8‘Ö#>'ó?”{-ÚDI†ïé”N mb½Yè&b3âªÙbþ`?Iž]·eŽàf‰ö¹µ óijÅ0õ CªSå6Ì)¯S×}bCðNŒeª«í¬&¥ÿ?ŒÒÏô~NùƒÙæ“peüüÖF†˜ÚÄ™÷e“)Á¥ØQ7“eB0](Úo¼h†ê¢Â¶017EÆ:Ý!X%þ\Ý  ³‹=Ï?æ±bù©¤óôë>ÂYê Ó1’»$kð:CT·ÉØ*KÂîÌyŠ^8oÛÑxäßY’ê0yVŒ ¹ï ½šß‚Uãelò®a#ç ‹G÷M°*/›á?¡œ½Z½ƒ`gÀìþ Íû—€î[®~:iˆd0âÚs÷ ɵ4B½•BRY>ÛNÔîªûÆ›¿ ÌÁ‚tÊ-nIHƒ«]N¤=.0¹í«È@úðú=oët „>|úþî’’÷ºthH ‹=ê»ÇTH¤ï°d”q¿Œ-%QìÙÁ¯`ŒT¢™}P#¹ÅŸJ±P °ë›ÀÊžëyäzù§}jÇqÊM+M`¨æò—Ã×û4èï­4#ιÚöØ9àÒÀÒÔ?žB˜¼´'äÄ’LÎöëìnÄÉ?[ØD¡­=÷škÒ> W…ÊB·ŸIÑ œz Çñ_tÙŸá~³À+QÇÝÅ´N`œû©© ˜‘¼~`é¡)L޳QX™Îbò.ô¹¹®›XOÈI~²„¦”Ò §BI’áYx÷&­#ؤQµ-Æ!Äüs2õ™ÀáL¡6Ï×k¼`x]!"pÖmfü‚ Ùɳñ Ãnìµø$/rD ;³‡>û¸•`jM Rºa£WLíÛ¶@ï^±¡e¶nœ¦D/~iÀf¯ ÿ® œÕjö\ø”D2åçŒ>OjÆÑù®/vn8‘gepsÅ çäòÒf’ éfÉ™Â=0&£{ýÂbØŽ}þüø—æ^¤ÝÚøC´à .RìÏ F%’,X;l—ìðoøŒîæX’úÑcómn ÷vKJ`µíçRvÉ84h£-¡-;"˜UCçÖôj‹\¨[A=ñJgªõ-,…í5I«÷&¸|Mú"8Âl{°—=¡ ”uKêoŒ/ûòŽÁ¸DJwLw8 ¶‚íýÈ-Ì59»fƘOæ}†^CNýüM¿ Ö-é€?s‘.ƒC_Á<ý9¢‘m ‡æÖ;ÇžÀ‡…HïÝ«>H‹äßüeÓuì`š®ÍdÀÕÛŠµ¬oÂdgÑFΘ|ìvº™~Bkk)ŠÁŽXï*ó$ƒÅ«v²áÔï)ú­M0êëòä—6L©i>ÜW$L²î¼Z»m‡-6äaÝÄQIøyžªãY¥ÿ¤õÕLx 6áè‘êmÌù¢+:¼é‡í¾7Í`ï³^Ùï­±$ï!ü°#zÎÄ^û9-mzÓu›ßÂÛ•ó×ï†8CÚ§&{Ú/]-’¸Û™ ÃKG¶z«o© )E¸HüˆÚ'âMPz“d¾ûáØÕ¢OWæI–‡¦%wß•bOÓ€¤t¾ A9´õ¿χÍ*Vc1±uŸŸùö]Ûçu]«€äèÉ •×$'Ûˆ¶Åa’‘ý£/króÓÈŽãbµ^ÛžÝê8ÿª%YŘ[W ^?œ±pûXóV ;–Ëd­b€ìgš¶÷m€zîÇ;Òwzãr¥³™Îoœòù’I¾4%…ý÷=V1Äf¦Ø4™f’¡Y‘»b]IIµõ^rc 6žÌPÇy­Ñ m\87ß1§Î[‡‹Ggm//Œ%†é÷¥ù°g_åă\#ú—®'•«íÇw6÷c•´XNobô:y|o{A0Öˆt3s‘Ô[†ÙÅÃB$E,ïߘ}ÌŸzuàÝ>\=súÿiëì™ÝêªKRG݆XÍHV KÁ²ðû8Z®Ç¥OÌnKG\¿ŒçÎù­U¦4}† ~ù\_z yH‚Q±ôI+Õé»ÖÓ:ˆÙ¤ÂjM.ÄŽÆ2ÚwàØ|Mí¸K*¬ìºÚ²°I–ÊD&6P(ðvÖYôÍöµ<ÿèp[Vx,¶w'¹¿™ÖƪNå¥@ìŸ`œÈÇe]s娓“Vü¶W ^-[ÙÜ{ë·ý¥6¬~ôÆ2kºÓô´|~…;vÜúa¡s F©ù§ú"yÖrC’ßµ”ÓHÞ5"Õàv®¿ì+Ýãz{$i«ƒ³úá¾x©zXéx m ¦4é™þgEËcÂé}Ù’îC8ŸðîKä¯ ’ùÚgØ(φ4˯‰—æ †È§ão©‡€Îs¿ã²ˆ< È캸­Îè÷ #ØŒaÒÙp2 ­ö»Ú[û¸Ij³à)C×…µ9ö{ÚH°LN_ ¿KÆV> ®´У­µ"¶Ñ‹3n7fCoð¬~MÒ’âðsF.ªÆëŸ¼ƒƒbÀð¯ìɬéöøòë‹0ñáo͵ƒ$õˆú«(e=X>gž?Omß¿óž2NØcõÑÏÞê ¶¤¿b¼Ã†Ùþ²æ§u$pvÔŹõ„ vx4ܲ¯.!¸«m‡IÍ" 5ñxôêÁ_¹|ºsD›^ œOÝLð=PlX‘Äñ\aÜ|vòˆíC}X¨Ü&œSðêLÿù¿9'Br ÷%µ» KÓM&g‹×ò(3LJë¹ ÆcO¿»ÌŸ ví)œ33„¥Á7;ŒØÎÃr;Ójí‚e[Zܶ,u‚ÍÇí]r}t‹\¹/æaJp¿w©9³ßfYîoº÷ f‰è]‹f‚²}g«ÿB+,=ù}÷îý‚¹à“N­Ì%‚ò'fŠ÷ù‚[>d?>ƒ6¶ã*—¯N,¯•øc¹a9‚ñÚ•ßë Q‚Ú)ü× Û"~{¥…Ña‰ç¬·qw þ£kÛ%NB±Äµ? ò?&qÌ•bîŒmœqªwg7ÃâO·wMÿ‚þ?{Ï餽‡»£G7}#)·rŠ™ÏbÛ+#½ƒÆ8»Î«ŸÇ–Ä*“Á.û'&бï‹Î6÷2HŽ;kw§§Æ~n$nQúãÒF0nHÉvU~­?Ó™"väÂhýùãßÜ› õF§ÈÅØ—¸pFY@îöÁpPHûú®0³$[#WxÒÃË9|<\qþ×Ú&W˜ç¿Ý³™-i±©Ú’*°ø°íËâþè …ÓÜoa€g¢³¾+têˆç ß-€¥tfq.g‚¹´zÞ/ïÁðÙ£Qr¨‹Ôo!¯´+ösi Êk–üçh–]o¾Ã’¡[ú• eŠôž 1ƒyq×kQ$ÅÓ•÷9­swù˜ÔoùAÇEuv«oZ8ø†ßZR&â6_?³;8>$6e_f½;EP#XVÛä?œ¯ewjûÄœRž=…`„#¬¿z¥yóA/pÓT¶ºú÷|Žo#“§˜}G¥ÁmpÔ¼rÁºÞÕüÔœ(ƒÑíR|¦UŽÐwÇð@Ã…¿Äºžþ/ #0dð²n'W޲}ú·Ê¸–‡E]ãŸÁF¨ øXýi‹R[Øaõ#UˆßZ‹`ng-Çó $ó e—žø ÉãËß\£Àõ:gîmÿŽeš7lª|HWU;Ѻ$û³Ýô{òß`n‡úË¿‘8 ôòàQsv,8ÄÜE–„ù?&Æö[a5ÎÅfÚŠ`›iè“q„)í¶KQ-0û}Ý~FPÅÙä±B››GI6O•2{g”B‰&®P§/í¶¿>€dz5bók‰;5~Ýôý®} w€™ò»ZN!µí9Ž]µ0Ü;BÁ¥j®H^k3èó+yReD2*˜øìa´Çn׿T‹.’™¥÷ÑÆ‚éPúå~Ø]Ÿ*e°7àü±íJ:ɱ0®ýh"o=tøÛ{ævøáü«®÷-(tËÎ$ÇÂ皬¦5>“þåP|€B2]< ðú9Žç3”Óƒ‘‹Ë_Mfß,W¶J:Ž” Û+O¢G!i烠 ÷ ‚y‡Üö²¸=°Â³g§ÑkºÖè/áÌS"•":ž@?~÷ÿ'W˜m~Ö`vâ3Aý¬g`<- s­&¥?3Šqâ¡GÖïïBØ0WtojÁôè\N g ÉpDÇi¤æ´Ë´ºï-=#JÊ ®ÇpºÀ=ÀPq K¥ Ï­ÞNáùá;ÏãxÓŠ9Û9IXL:Éøig,.ˆÞø¦ªò?fÑ+òöL@çå¥G.À7Æ>Ö{SâÐ7ÎfÆé¢F0¶ˆf.è”àbÔG‚…õMãðI¨qq¯æ¢½ ˜+|Žû§ªÂ¬AþÝ}7“®»¯E,Y= VÅ7~uŠåÁW‡=sX}zò úg%‚ÉMÏ‚.‹•ŽšoŒ, .K‰ZÙ]ðµM«`D¼ë}Þ”kéJð øs.σ™šéÉ3moïFáã©G;Å']±Î|À|£õk˜¤jœ–šØý"²3Wò h%åACÁpsKŸ¨–>6\”é ²S&YbÌÝ-ô±/ó‘ñ€¸ L¼?ªÿŸ(LmK€3¹0yaã™ÜÐl§xØ/?Œd0Ïñ¹xd/I}!öº\ï ´íÝ&ûi/>q»§´#œ`h’аj½ ¥×ûNí3†Å‚xéF>è´íc2Þ‡¹ïcöv©]ù¸ÖYX w)Õjo„•›±1šñÐÚV-ß½9 &ººÞ$úIaVsIG³ïï%K5«‰þů°®bï–äoì>z4úWξ׾ó“§ljþ`QŸïýZ,7ƒÖ¥2ïª&.XæPÓk;àJ2,?MHSÆ™xs‘ÞHJº{,t0 Î&—$oÔÅÕˆ<wGGœS~>ùíLæž;P– !â' L}•¶ú®ç¼Or¹ ŒíŽY$˜ÜK§J›cÿ¶[Rߟ0}yîvtÊ’Yú`ìì,P–àdw|Ní4W½0ëAVn¾Ó¯ ãßìIÃ*?íÁ#28ýßÕ¹Xólâú®²r“¤þW+¶y/;ÌüŒÑ¿ßG2íxâþáÂmjºðìâ7&ìáÞΤúç®]³ÖP’ùzª†¤µ^0¹oMPY›$(o ¨;Ÿ}8i  s’/¸ž‰ü! KÓö ÁœÃï=©¿»`jùſӪr ìM‚rÿLàÓ²hX>îg¶ÜXJ0T–8nÞþ–àªÝ»É·ƒàӈ˕Üǵ½ÑÇ•v=„%¹ 2›MYëó·˜–q\ôdOHÓ^N*Z'é¾6'e{ƒî¼ºŽÃ.*ª[’‘îG©m+{L²ÛlÙ¥ü† 0ʼr—…›¥Þì]Ðþ޼zÂx ÊI#ÁŸE¡áø.¹Sé_×|B¨¾çÍk =ä×P¯L²øØÃå}9{–ýäÌ w®óúpMVÙsÙ¿ÞÍúy²ª/¹Òú»›}¤ØÍñ;Áš÷rÿÒæó°XP[t{‚ ;ö„¿‹ƒ¸ 1ŠV ÉÄ3´'Ìà¦*îŒöÔÆùÏ•‡õµ¼IV&K6ño¡›Êlú¬þì­;pÜ`„`°ë”²wÙ ì»÷ýØöWÎuïö5¾‘ý?¸¨|!r#r2ºn>¶¢‰“‡{\!¢¡øXçd]OÐí>O1׆¡Žú?œ^àèÃsKVÙ¸¼õð×ÐpQèNÞqCCftÄþSY?Q¾–9n÷?Át쀉CßÔß’LÛÂCÉÜ:(«½síÏÃâµ<-,n.c…£Z%sÈyÖÏn&PÕðýö %˜ä¦óòì?„Kwÿªíå'±-3sá-»9Ô‰3ܼ.×…}=9o™?@Ÿ©ªMÆSèíúœ,4 ={™³‡‹ÏŒSC_bÝ &××óyR–5fK¤=z-ºþ'ÌëRÚÇŸ£IЧÖr2çpB¨ÓfW;«isnúØ£‚bÁ@Â(w¨â‚eåˆ qA¦GnÇ4߀ÅÆM·(Áüq¸D‹8EpMÜ*þºõ8L(Ú }ïà%ÖëÌ=àRX„úëx]÷’!A ÜÌ%ÖâBð^IrÞµ –§{‚µ“eaÕŠ‡óí½r‚É6è 9t£jTùÅò¸õ¢°«˜‹ý¡ -I HͳX›Ï‰ý¤>¥Flùuá£?4ý0~T·ŸdneûÐüAR<úî=Ç/Ÿ‹±mª€ú».KùÞÇ€®{ÔjÉŸSlI¥‚ßC+Åë%_¦ tGW_aÔQÆö/ç>;'»Áè»L^ë‹ë0íÈÊVQù ¬|PSªåì ô ׎ê!Þß›1¶šÇåc·\U=ƒý>·äÝÿªAÞ/Ç´»j0w]6tÿKk˜””Ñ")öûôÚ±ëé•]–§–p©eÈj½’6võ1øï¿p‡÷þ²ôÁ•uÇNÂØî‚ÃlNõü¥b3× ¬Ï]ƒ‘_,)?Ýù¡Zlµ¢£c+ŒÕ譶ΔHŽ Cö$‡{g…Uöúq=:k€ñÿ°Íºw 2‹ÛC`ž™õÀ‰è X¬gû¸N»•ÎQ9ç5 Êú`»É¯RHPXQ‰?¶4ûí‚Ç«þ² Ö_fö Ë®Øs?#q£•þ#x9ÅaæøáöMóA0!jÚÝè+‹Ëæ§Úü%aùƒ×ØË†˜Kä¹7Ê„ûüîjô†fš¬áõïØ¼M$n9Ô V3IA(‹µ*ý•Œu£+zNÿ `R¬0TŒó T܇›®V8ùDƒãà$÷—ñ„uõC¾ã?oQî7BïçÇqˆ¢’¼i»5¶Ÿiè×ÞiK2¬S{Zø|JCÉÎ ‹›©õö;ï/ãÒdçïOn81(p&:”ç/›ï-ÊsBZs<÷¦§Æ8r¢˜sS;É~Ç!ñÈ\¢ ¶oº]€5%Cµ/‹°ïã{íâ'8„ãvY–Ã8amB¼zå Ý­92»ÕqòÈWã’EAçôо2iE¯œÜ$ì Fç ßð ¦WçË ¤{VÖÛ¿ô+}ß! ›- æ…|á÷ž8ç$",¢‘CPÉ3–¡!— v¿²áf,Д N¾ºD0N_ìäd3€•ƒ[ ïæµábІÛS0`w@°étñ›Ò^§Ä‘¾ ÷Ý*¤4oÑ}#¬aX™áK‘°)N½Ù®Ð]Z¢Ïda'“¨ëüªì§ík€ ˉn5àà¶¼õ‹×f‹èýе…µ/÷²^]óûëÁ¶£cð'ðä—n> 5íbNÛÀøªúb­­“iWU v޶º›Xyš1Áõ|Á÷ÉYQz.‹0¨oúÏ?ÿòÜð«‹-ê›áÀ¸\×·kã0ig)5 ¼‡`t¿üb,¡{?5r¥5=†…ÂýEŒÎG©‡ghB]²H껡/!Mâq¥Á£QØå¼ +kyݳ¥qÕæï;ÉÀêÄ”Œ@IìòR©ššÊ$fEÛS1“Ãæ‡ê¿$—SNfqUIQذýeÐfXÉR­<· G ]“IÄ…°¾fµÓé8ó…M "ì=Æ›ôG’$+çîúÁ ª$75S%òëœÚÖNg’Àœódû6Iè:ý­`Ÿ ,[„ >h ƒ!S¿»vÀâºà•ˆ›Q°ìp—:ýÒ™`5æ8¾œx—àŽ¹u÷yÚ­›ÞÎÜÒÂԭ· x“ ªLàsƒ‚qèáዾa° —u×Ûq,rŸ>ðÓ|“L©Í cߨ_°‚å3\òÚ±ëüï ø.¾gÖó~oe 7ÿ¼¾j—%'pª¶Ößûó7XºFR×êœn½‘ú»¯Nðås‘|ŠמxÊâR¢NdÈ+#’Ùq£ .÷ÒÓ}í(_£°¸²äÅć‹Bâc ÁB8g¹Ãõ•«I8oö‰Bک륄d .Üå{n° _-0ýÍzˆ3ý?ÆB›N¬•ã# iÔÆú¥ïÃÚ¿”ç'w“ . BW?·AUÖ”cñÖ%˜ÍÙS¥" Ýõzjæâ8¯®~#ãô}˜¢M?öŒÆþ ®Ó—tF`zYÖŸðzgœ¦n«Ôì)CÑ~ÐÄD¶k²À‰zŸ;îóQÉuíí°üÒÊhL '6À-ém0•Ý,{fêŒÇl_¹Å£NðªýÞöôDÉö™MÉfÐz*".ö ç@—hÚªðd˜}h~‚,ÿ‹Í—FþeºÓm·ç¬.ârÆ£ç,²0‹ÿ³2€å!Ñý?‚æ,7ÿžûÐÿ0»ëLJ$ U8üTÝø’´Š¯k“öا[—”둵v笟¼ŒÆŠÈÈ,ίØ‘ ˜Ã$£H0ø•û5߀ú]^¿Õz[€>Ëm&1ù5¨Ã5csõçÊu:ýó¿}Á0¿Èå6ai/Y—é¸Â\£{Ï6~œÖ²{köKaq*£<÷¤¶œØÓñàs8´F”EŠQ$±ÅÂ=:Þ[k¿TnÉ?¯ñ§ê§RCpü¸/ï—qœ\:ÿ6’?Žäwµò%“Ìx’jr¡LÞ"i{£#†F†p *¢EòH¶‡ÓÂ.ŠA‘ ŸÐ÷Ÿ8û¡Hðü yhžN/}ôU ³ïIò¼f†-9ºù¼<§€&Ñ!f1kd²eåihdûÆÿu1€Ñû½¯er°¹jáÌwk‚EÓàPk ÌsG¢G‘õÅ?ù ÜÁ˱pÞ€`z1§"ô•`Íi8Ày™`ö;ÌbhIPë˜oXËuÌó†ÇÅa¸ïÚ‡f©óXçxÞ#$5«TîÝy§@0^e‹¥«l&(?ìê4˜Tʽñ}{:±«½ÝÞýZ ^•ÓãÍ{IÑÚ‘{Óò¸ô½¬ccŒtô>¾±!º›eÌH¼ ¹‚ºèº8CK!wÛ:“ë ¿<ùyò¶7:r ëฮŸƒRÒ¾í• )&ÙMêøO®ÍùOE±¸AÓó#¯cùÖ®ñn˜Í§^ŠÛ9ocÄ~¶è«÷6¯bµ+ž8ÎÍÂÔ62ã\ÁÆØI[¤%{¿Tïð¼ʪCµóÊ8ÿæÈµÃ18rU‡U"—¯öœ.8‡8ºÑ5±‡êu,î]ºÚks3i“6¨%Å[*ôøìaôÔÙ•7ÇÒH6%¶9qöcø‘Ü6\.« 6ŠBc1¾}÷($SÓ­ÍöÌã¸Tx‹ž&vÜMõýÑ <a. A0e™}û q ¦YVKgS* s®Ðj°ªˆ`âT[(Ña€9÷‡pqÏo¶M—öÂÐqË(Zf°o+¾qO&Ø|ª;Å°í©–­Y KDÜüâùg$eéžÝ>3‚¢ý)ŒëX,3g ÓªaìñÑvöaè Û±»PÊýX¾îÂéd¦‹žãFÐý¾÷l @ô5œÕî|ú'£x6]þÈ@2Ú•s•\ÄÚgnËÎwaÂÀØr–»ºG”ßô½ĺSe©’FlßyÖRס•Ãe:@Øh_Õ¼ª\ ¦?NÕ=“Fò@ä±,•è}ù,(&ù;I)iŠ:XöÛÚ¿Üz£G2%¦\87¡…óOšz}{×òøõ£?XG q±<Ñòä »û„㌠‰¾uŠ:òy8Íê¹Ok;‰}ÆŸÙ¥Òp@6÷ýq53œ>ÒMP‡ŠÖæüÝצäû8©žôíÍMÄ6ëåM¼K38’yòê¿8¤×¶èOÒHfm¾›ÖWàè[ýDÆlaxn¾”U(LÁ NgaàØ9 Aç³rÐ^vµÀ>˜dÝP ôÉH{‡_Im~¹ƒd¼¸í¢{ïg’ê•÷ãùbÖ¾˜]Oo) ÂìïΩT݃äèB8Þ?Äðp*¥á[lW t‡ì¯ÞÍ:ÿæÓ!Ûšë¸òPN¼øF«d ¹Žox-z'ßßóÛΫœ}¢áo¤ßõÝG:` ¾Tš{6ÁçùœóÅЬ¤\ôùòÂf£˜^Ä«‚?f%¸n¤ëü'W„féS·$¾ÃdHaQ«“"A=ð5}è²~z¶Kku‡Î™¨Ù¿K%™? tu¤àxzhópÉcáõ÷Êynl~˜Ó£Õ@r%Ä©¿rbÁœ²jßIœÝŠGƯc³ò«3—ÇœpRxý=«8•°.Ÿ8Ô{×…­j°_ijoÿ=5é²È$êùKgÕ×âìØJØ­F^Ôûý!)ÇïQeö}ÖÆôÖüT¯Øœ 8pLï¶É›’¥j Eƒö­BBl/¹·a½°ÉTúª‰ý$Ì{bÓáBòÎQ$–þz? $×Ë©ßc3šÅéö»}›ÌvAº`àfŸÉ’¡CÿÖ–Lk’ñìŸ"KÕ ’Õý<£óÊ=’XÑ”ŽÎ'øÒŸ¼ñì³Å?o ³i=öÐzxî³Ûe‚¡¬pswÌêüyj}*'Ío„‹~Aº{#køzœ!³ô¸L-±…ýþž‹¤Aˆ{'Ù’`±Êx4øî5N´yläé’!Xì ‹?¾ý4.æÒ“k¼§–9»è³'šžös°Z“”ׯ$9ŒŠpÆdÒ³“'FM•o­[OÁñ%r¾+$5EôG³¼ÒÝ?_’øéºL·þv'ÙÆYw½±&rdšÅ÷Aîb±óáÇÊ8Zõ­4Â\'>çâw{ŽÓ•buHM’ÁýŸyõÓ:ð§lt+ØLÁŠÿRwÙ±`æŸÿô¸}%pJiÊ¢‚KRÞê¾Pº!H2D<ñâóÅ~¥m¢±V—é+Uñ"’»e¬ö`WwPô§|&ìpŸì¹Íó2Ó{[p á5[† FÃÄŠ¼Ä¾bǘσýüŽýª_®¸K²8öúÒ=ÁzcÅ(ÐzràúüÆ^_T[wW( »Œ¾õÏm÷"Xúòwýó€igƒ¯3{ÃpÐLß½½20;”“Ó?uâ|Ķð§Y‡Kàv˜ÒÿÁòQXÇÍè­a)\0#´š*m“KRþ)¼™ê8¯$δíî–ŠD̲žË~)·OºÀ´aøæ|b¡ }`°,Þ¯oK›v ZvC»®š[æ‡Xšè:(Ø,‰ÝT‡™;R|0úØ2ÉäÓ4t2úÉéoÇwTï²H×è[ŸÖ¤–óçev– ìÆ™›†¹‘'±éêØÎ] ï¹àéy –˜Z2ÚYÛ`Vkã·ç¯u êBBî?6‚«‡¡Ãä³0Ы„|O­ùwärù, u¶Ñ–_ãj—¼Äc'g̦uxH,ÃlçÅâ’¸÷KEOù7|KÿB2™üZVòÀ}½ ›‡/ã\oH sØQ¤ñêé†2  ðY½²vÀÅ¿É^_Ь§RR2p„úxû%Ê}rÍA8΋b¯÷èÓåQu)Q¼Exc²’'ΈuàôÏÞæ²We(›Â-NRx•l:Y4q¦j[bDç‚tvs] (½þU¶ gö«6;%(cìʼnjæ üÚ.Ìò$‹žëG¡Ýq8Y®_yÿtÖGÙää—!í×3æÊ+:$sÝɹ¦I’z`ë3¯¡ÿpq*ëŽÙç2·¯Þz*b²½Ta|65TÊó É(Pó1¦PŸ`ðUÒf>vàº'/³S| Ú£v¼ÇÙÕ·MŽÀ¨ÂLw? ž:Èh²$CPg—¶=‹ý†«Î_Å~ü|]_äUs‘ŒË“yQ¾åXgýŒ„á)ÓàS»IÊ€ÇÉl[w˜:8æÏrxô½Ï’T ýF0V¥ºî¼d ]ŠÉp4udœgÚÁ†“^ÑAÃ]•0wgY1ûúNœØÃwä0ƒ:ôéWê*3‹½Ðü´ÌÄ‚Q~~ËÎÛç`Ð`ÛIÕ‹‚'× Ö N¯øMñ3ölÏ—BNȯœÚOï£"¨U'/†Ï…`ә裲B¸ÊÁþºÒ0j¤·ãÂs‰l |ÊÎÛ©w ©pç)úg¬vó ÀªY[6áµ¹±´`“ì ³yAµÎ4{’¡˜c×Á#XÖ¦¶®l²$8²=­ƒVJ×|ƒÿñ™ôf’±èAÊ%«—ÐóÇ*f¡7ÌHó]¤í“…i±!ÚܘY—¸¾7xV8Æ$µ¢‘®8Rzxއdòd¿À¯€cçw–+l4 ™Íÿg›…‹—¼öDªãªýû]¯äÖòL¿êÛ˜ÙÏ0©ƒ>vÛ—dhôß}¶ŽdÉ 1ªî!aáúk†Gcp¦†Ð´TÛŽ rQóÛâ‚êKíÑVO3h}øWz«¦+ŒÊ"O¤Å1 Ý—Ýßr8fOûdJ&lÓ%^¥WÅaw¿ìí‹ @;æû4|c8t3œ:`ãOèoõxÞƒ¬µCoq^gŒŸ:ÂBc2“AÙÌuûŸç(ÁÙáùŸí‹Áå ‡-÷_Ch ‰Sõj"8˜+ËŒ}œy¦íz0Ó~yþ¾ ,òßÝÓí$­¼—>o-@Úõæ¢À}?và²ÊÏ7°pú¡Ÿt§LŽþSëÓéDzŽ›\aç0Žù©ðª\&‰­~[ca>{ë@X/ ¦«Yÿý½çn9Høå7Tºá8醽£uûÕ °êã&Ïï[ðïÆöwÙ¡óþŒÜ÷’IVlX´ '«ÖßÛê-6,ɰý ‹”ë¶þVU˜xîÀ»¶8˜”SÖµ›‚Bͧn§Z[ eüzðôÓ%ÞVã†m~@Ï20*µ"™ýñãô8Î:ÿô»'4‡+õ»üë¼h¸¼qõ œh!©FNš³2ÀÔPm‹löÜ~ð•éÎ$Ö>ÚËlŽýÁl;—¶5“ÔÐ}‡2ò0Y±wÆÌ{·ê)x7áô.ÖJº«$Ó+QϺcH-Qãa^Gò‹–ò|B…!ͫԛ}ö8päÞ^òAÉÈ{bGý•/0+}3ïÖ•8¼Æ²ýà -\<7éôGzŽ`’¨{fÕûŽd ]ê?d‹ƒ«¡´ÒH—[ç¾v‡âuý5»q:rCÙÇ”‡8{$ù@ÄVA ]ø÷(Ê +Yý_›Z”@›x¹M­êã¾2.xCvÔóÅ;÷\I†K6“"¬qXWdâåIR2§RNpi\_ˆ-U‰k¹.bîwáî~³Ùp9*“Çämçͪ`ÅùLëܽ6‚mD[âäN:v1ïû±ÕW ^‹oØÁr•d1gh:xûüÞõ†ošì67åo¤sæ§|Ùìò/‹îa#X9%?]8£¾?×¼¶y­Fö†üuÃeÿϽºûl jÛÇgm{Ilý¤lbCûUÅ91+“L±.9˜™pòráõøÖ ÈûPÑÄÙƒcÖ•G¾Œ½åMÞiÊØðÊç ‚MÔþÌåÇC˜°|6ýϘÁ°·üPgyL¾7¨sL[=éµtŽ.œš—ùìÊ”G2Ã+“?uF$“dKÎC.+rÿá17Q’ê:Ø|i­–<i~J6 Nïé¶Œý‹ôÛ–_ΆIΣ{šiãÔæÏ!E$“+yàŽÏZß]}¸ 3usÙ4»Î‚Òó²Zï–(ôi*,cÉÛG©ó5X<}X7÷âw’ãÑÉ^ŽŽ ‚Ù¦èŽï”9®äfï›9ã•c%ù MÑÃýÜôÌŸ8ßåµÝ2¸Wµ*ÜxO `Jz«‡…d-Ì/Àû8Õ>»²ÍOdJ÷2-ÆåªÅ©Ü9QlöÔâL¿ƒK\Þ FÕã?øìº]É^,óüûkÁntgšW°“#̺Î$¦;ZéÛ´æ#ׯÝÛ c«ÝJ¶Ó ‚õêQ¿ÂCb8i¦D=¡ËìÑÏ訶|Ê‹Ò&6v{¡ò”†Î Ô—g?V&X„¦òô”b;ÍÜIÒ€†_ÏÆ.B7‘[àú6Š L¿+ ¥ŽBgλÛž¥@æßd¶w΀%Ík1bi?pêŸÌSI‚éÉ{Ý›8µàoÚ!3³·¢žèß•$x„å|b¹q\°_¥¾rV¸ù7‘@ÓçÞ'2º ª×™5_á9ˆcYT¦«ÐõA)dýQ˜ ô=t¡Gÿ˜÷=‹C:R§ ‹¦[ýŠ#¹K‚³ž3݇Ù/ƒ»÷rÁøÕÍû~žÀø¢ iØ­‹4“ ¼ãEØÉÔçõßwè‹—|HË"Xk®sŸ]¯Ž+|²f2û<ƒ¥•ò;Ý`Pö¹áªž t,n}Fåõšxhä¥_°°3é&ÖèЗÂjà­ü#ûa°|&²Òn¦ëô„†¢`öŸìÞÆâ ‚E=µùÑL„šœaù±vÍsÁÊ«!{/«ôx>c‡Ùö'ÅU#«Øƒ\†#ÒwŽÕÞÀ‰ÓÓÍWnùC«™µŸë!ÉñzÕ£Nh˜¤²ZKe,ý€Ú=Ÿ¯„ÝæÂö¨º +ߎChöÖZ. vü¹ø€ˆ¸Y®:¢Ûïv@µT·Ú£?÷I†jåÔôõl0¸ñÍÜî6X %%IFÍ`¶"£pš¡Ò%Ó£ê~Xa¢é¦}áîõÊ9ê`¾4ÌКÚuÔaÈzù½ßf-¤Éݾ;œ»žøwnÄ‘7ä¬õªåìê|:'Й=ò4< ]\Íyäò có‹Çîæ\°`¿G8á…$dµ<&n®‚Á×¼ÁUK$C绳›^,Ff+”€æ›Òºð=ÿ½ì®ë€éý»ø~ ÿV=šìtaÙišÃTJ`È/ÔÍ&8ÚlôÑH䋈y6B0ÅâÿÒÏÓ{c6pße!([êÈ#÷`ù§Ðs¿Ìxx)}l¾oµjŸæs…UOsú¦Br-÷3+|J³'(§ò_ñ&ÖE¤ÿM²Ž„1¶òROéP~%ûØß”4( kò¹ý¬êùãJñ™=L¶~•ùzG7±$»+A›ëÆLÖòHX`w»¥²`#'=þ£M0KÇd͇)ìµ¶Ãox°ÌÞÍõä_{œ´¢rÂø#¨_ºñ/ èortY|ò`þr¶®Ï Œ<0Ì{úa+¦ûÒÄÙ]¡7ºw~åH4A 0*³ëí†ù˜A¦>Û {ã­ß‰}8Óu`¢+3š%LD%NãJ;¤(n å±.ʽY²$åB‘Ú+†?Ó¨AÊ!˜Íxt,ÿÆ‘ìOŠƒ£,pr‡fЕû@^ºj¦¸œ'í°Ì€Þ²ó·X`ú»Ë“)…uÅVè€Þ"/Îj…åë×:Ãô%fNmèOsñØíû–`_ìRlø¨GPZ˜ž¼o|ô?Üï«Ð>¾¥÷UcùJ¸b Ýlë@‡ÌÓ Ë÷“”O>ßZoLð.Ü2ýÚ×ïu×:¿è¯v} +˜é€LãÄ@’r?Î7QKF 5·™úA~z«ï©íÐ#ËþA`ª‡šCœî|SMÍ;÷=^ XröœX¼zÃfù;DZa÷%‡F©¼:£8uýƒ+vÅÒ22e¢q¸Äm¸mÛ .sl–ÚËbŠÕù¥†ÚWpÚÖ©çɧƒ8WÃ\öqó¤½Û>µüŒŠŽçI³œÇôúœ((àåJR]*\…çt±ß2òØ5þ8Ìx“ß\Kh%Æímbý$e)˜ÙòÉýæ°O§Œô¼ß¾Ò$6B²L¾:SP‡«°õT®7É>ñXèJT Žm=°á™Òí5>¸Èdî˜M ¬Z§ÅaÈÃþW‘ô¾¯Lxl- ×›ïŒÅY>Ë‹ÎNØ},åÓq ’7¼Xèbè2Iýcö!"ºNÝñÏÔ“Á¾²Ä¼üCw ç|NÓò†XœŽßñI¸‡wŸPò•¿#SÝsjÃÐoS,ðâñì[_Ÿ›ÁŽ&›)ÉZ\Sxr¡'S'ÎÜôer¯n#™)í–NóHʦ’¶op)FÁWø¡§ÇIU’Ðy ú´&Î1¿ø›h<Û¾~?‰s´zÞwNX÷y`úm¾ Lˆ8Í´"˜ª³ì&§”¯¹$ÏÜ#ùÜÃÛ“ñÒ®ò'-î$IÎ ¦s1âN$õû{‡ßÎL8`®ÄlTHòo¸j¤÷K…d”œíðÈT"š4?ǾæAÚðÇ–Ûµ8}줨ObNy4Û8o !©¥¶êjO5H¶ÿÄm—ª¹q$àxú!KH¯²9äC2Œ½Ì‰3]sµø‚TjÚg¿k9ضœÞUõY§²Šƒ`€úR®¾ý4çˆÄ~º¾‹`ãÛ,Á óý5/]Î ‹IÑvOêÚ} ¶™[-9íbÂì*³„ò˜w/\ò-‚åo½E5Öró&±k? yq:hd¨TÌ+g‹‹ì´…Å—F¡ßV°æá› 5t‚“Ù×Ò!H&Ö=ñ ×Ý%žJç¾Í( ½ä<›`Ž:Ù‘û®ÍÐÙLÜ;6Ã/Ø 7ÔuÀ‘Añ¹'%^)Ò%Fè™:>Šæ0ýpCDåÖ×@Óz9À¡m=Ww¾yk]á—T5HSá~ï>ÄÉ/ý/«?Ñq¹1aº|·N^è½ÕD Ôv¸ºÎÃŧ_ ¾ËÎŒ9{'3ÛI†¡‚  wÈ3ïzío€ãü É#EÊ$Ÿ2Ãöí!ï!Æ–³\߈ YœË/ovñ%9kÚ¸V¾ý6ü.¼Ù„+÷›*>?º¾‹9ŽÃŸ(åÝ)é}N±%ÿ¾~¦Ò_X²|™kè½Ó Û>§•j|ËJ!jd ¬®ü6.Ô€¸¯¯¤éÖ±@“¡ÌÙØ £Ãz·Ÿé ¨·‚Ãß}³Ó·™Wr/æÃhßÙ3²Ç]¬ÆU•R1ž#0ÿ…‡>ñ݈`³õ 9ÜI°=Ç”cý¹u÷Š:`üç¹Ç{›pqç¦ -»×öd¬þâº]áÛoýǪçD2Ø{áÙ¶ò 3&˜’RK=³G-]¬îWbÛóÍÆö>šöókuç‡Öü%¿²Œ—å6Xx@±@Wȃ³®$åk±ëÕnœ üÇtöbÌŠm^ÈÓʼn½Äã¿ñGßg; `@Ro÷Ч0»øú™wÓ1Ñs ÅÂp¶SßW]$ÅüÍ›b½péE΋dÓQÈz'+ü”dÙ›ádÏ ýÑ õv.8|íVöˆLÙ}zâª2F2æ—¼¾‘ýšX¯Q3¯ô L¡~fOlHèò·'Øb eޱ~Aúžìƒ4n?‚êU¿µúê"Á!='æøð5¬‡F5{½%­i~&ÃeòÅH}é[XI2LmphêE–us/† g¤÷ŽÙ–q(¿ý+zF¦æü3Ç>ù•ÁÀUÇ‘Qo覬*†…±)½¡¿a|ë$õ×ð@€µ¯v™B00'ÇXV Owt5˜†fóh õGé@ß|»êQd/Á¸{õN3«ކ:Xÿ{d ‹›O6Uù0Óî{ØòS˜Éôz°!þüÇTpjSÄìo?ÂîDÂ0ÓÁëZ{¤0{óëbµ0c\¹Û9ò?ó·7ÿ9&õv-­Å$ýËSοi¸¢ª»’»_'ùM/s´^#ç“LTMTIjô¢Ú'¹y’á59Ÿ¹Dâg«Ç]³$ÁXijRH!y•ö;oI#Yx ùŠJZH¦äÆF7vìzW¬û0}­Žbßßi”wâþò“º¡<¨Á¬7Zk>þ:Óyâ!Nîÿër‘®µvñ8¼¥HªšwÃÒâ’Ùå1¯ù1ìj“i Y?ƒŠˆ,ÆÂûal‹F„B=Ž…Ñ¯¬JÁ¨g|†ï½×8y,U~Js#ÁÌ7vkùv”^>Sx w|ÈÙ{)üTXrWöâžyõOKŽô8=†úKsbÖ§ÿÿâ¦cpŸˆ·Çå’Wîâ÷´qÚ¾·³>Á±óá·ç°Uù´³~C!vË9xx=´Çö}ÝË·åI˜ñ:>œ[‹U‘ôuL…²X%fø_ ?þéê jfÉ’ü˜Ý'kïF'©%¿Ë“7¸á ÇDWý%i\™L¹ïÝ…«N»•9¤‘i7ÈW¤À¯  gÍ~±¿²NBr/j…ò\é Ù"X·åbž˜L¼=FòÞ^4”¯ø ¢I‡; H®KîïÐZp|eÏÍýN/`f¹¥_OÀgM_t 6ı„pq· èq ·æÿü»>+ò?àS†i'ƒçŒyF½ŸZv—Œ‘þòUê¥Á ‚º;¢ŒùÇKìÎ ~Ñ]†J[®¦»ÂõV.W“)Ìk}þZW³Î;FW€ÎípaÀÁšy§z¶;aÅÖ_aF÷ŽãßÏÛ¡If/_œz÷{~Þ.w\Û÷aë«ãû ;Ú?µ‡ÃÇ[¹öF~ó$Ç—OCÐ…S„«iöoŠÚAŠ^‚xÁ˜ÛÅëp õó)Ë`u9t¿ÄÿP%LlztvŸä&l³`p¶œá‚Bþ‚­–“ÈvþX~Øf ÛmD&Ob±IŽArç®fœ=“pr¤*nôSÿ‡#•/\Ïü.ƒ†ÊÉk‘mÐn›uÿ¹WÁ@èX3ÂZíõ'Û—µaÁpÏDÖëk0©Ú|õÆ©.‚à 1¯¡+éâvE; õ`JpÀ«Ôv‚Ò«’tò,3´lÜ-xè?E‚é¥ûË­am˜ÐÖyª2?/ó¾‰ ˜o¤yTú± ÍwËN‘ÈW0³[q1äÐSUó¾MÀ®¾"›¹ý0*®-«ÕE‡Qÿ`eÇßfЮNTi¬¾ÅyŽÎŒÍ_àèžõaüâ? î¿Kßï ”k ì”æ ‚uö’]ú:NU2 œÝ„+ÆÇ”n•Áy† î ¼Ø³Zp÷¿)¬8ê€á@÷)éÚIúa·ÊzÓÞzG’™î™'±Ý zß>âOÄâ°ÍDQÌ3cèûZlÁ|…[Uwj­Ì8áü3 Ê£T­÷ÖãÞTóö„­0ëmD“áálj$^‡ãw‚qUljE(B£ûÇ$>ȑؗ±õ]—/6L7=3,Š™eƒ;`œ±+“q…¤¦õèE ÙBœ9fÃ/@ýæÀx>¨¾ÝO¼>+BrKnH,ûûŽ—wξØG²-¬Ï?þõÉè2+sxgÖ^ݾoIÝo^\Ð2´_ŸÉ¶üvîN?yÝÈ“ tù¿na#™FÞúÝ" ±¦£ª´X¤?^,·ÛLR9#§®œ©GZÏ:˜âqœ._ÿ®ÉˆÀŽ'Ši‚LPþ½ðInÂè >~¿)X(œÊ¥¾‰ nLw^t¨€ñrÆN`€¶¹T÷}º0ï¾wþÃwslŠ ååj–€öï8]Ä,æL°¨ÓŸ›GLôØ%q¼fÆí§),gX¬ËFAOðÍ/¡®Ø9y7éÁæ#u¡1Š rµzm» ½Æ&G[yaþ.Õ3éN1ü‰óc½ô«\ÝhŒ}0z pvSÄÐꥮôáÔ߇áÇèØG‹8!Ÿ‚+7C÷ÊH¯'Ö ÉÞØ>!m©NºzfXp¿Õ`)7Šuüþ‡ÞΟ ½/@Wbœ¬…e »–XÆþzŽ´_ö‘cîãXàªt™ËˆŽõ_/wœš;ˆ¿îgsdÚcãý® ãe\2âhÎß‹bë %`ú„ï7—BCÒ :}€Û¯l}¿_a/,-„ªí‰&˜6úköù±aß#“áÜgy$£×‹.¹O‚Ðxæìíd­áµõÞý'—öFÃßH¯‹Æé×rW{oÔGß‚ŽËÌÃrOù]ùK¬$ÕÇÊàöé>’Ñé  E •1Ê´™G2Ü+å(· F›¶jüù‹=µ•ìéiY8“zÚÁ£ú~ø%þ]!(KrÁRņØ#v^öŸc ̋ùè-gf&ÞЃg`¶»Xü„F&Ž—nsU<ÖƒCž©7LúÔ°ãž½¼ÍΔm»²çŸ ŒËÞ9üþñKXÝÞ\ n8Ç夑+7YÖ‰ŠW/ÕmÏåœf°:lËúg1í]||p´=É=ìQýŸäÉ%B$åýÕŸsÀw¼Æ:À~‚`þ{­õ"£ ¦Pî,f锕Þ/–W8¤qmèÌàÜÙGeÁQH)S,É4cƒòWãL[è$esØlúÔG¨àÚðß“Œ9±ÔZ“U€¥šT‹ Ÿ’¬*Z´”4˜uŒZ#LðɉƓ[°â/ mb¾†ûmýôg=.©Jn_y,«ç6²í̈́Ϟ¹ájª®Ð4kÉUD]ÀÐNшÖ÷è¸îÿFöÉ4¶Í›KPÊÝgOšŸLýž@TLÖy¬¸Ñ¤%-Ž9²‚Òûs]qd}y"×sMX*^LQÜ‹×.ªjz¨âBÜΓÿ¹‘¬öZ¿ªø„I^}áp•WÐI­mûNrø|ß[ṟd,I(5öܼ¦³ÒIB} 8apú9ë=c\:»%ôy×G\qqP²2$™ÊÖ?‹°}M2È-§º`ï’©ƒM´ɨ $\Prh jS›á?®[Á‘¤9.ôéˆ*·aââûrõŸ$[®ÌÎÂ03H¥Ñó‚ÕÐÓ_mƉG^g¿_?ƒ¬ÿþz‡™šÑN/|ªœµîÓ]ìLªo3]ô…¾_Wÿóf€…ƒ"Ç"N”¬*Ëví{\<èR.ÇÆßÊ/ò¯wª÷cIß.Iá÷M{‚žÿÔÂa.‹#ÿ DàùÁšŽ9˜;sH¨ ú͘Y †v\‘kÚŸ3L*Ûßo Àe. Ó]Ÿ]I®#¥lqf8÷ªÙÐé(ÓÊEó|ö¯HÖâ9Õ}j¸Úå²xøá\5W±ë/ ‚½ü%³I¦7"j­*•$Ϥ®>#E ò,ô&þd+㻎2Ë]\*lñ:{’ËX÷ý4€åÝEÚ«ù0uûî‘þê1¬S}]²ÝÈú7ßYx-Ž;ÒëòÂìÃÏ‚$ðÏø-Þù¬9èU¹ÑÐwn«‡‘Ú05jmÅÆ¢NP…f‘}H×óRY-Z–c‚gW*f±ízþÈÞú.ìH—U¸†3ÏtZ;~…µ‡¾>ÇÀï–ÐGb‡ E–›=:6¾V{>låƒ*» …RFé8lõƒJu;ó•Ê·Nª2ÌøŠŒlqú¢Ï ÊñUèójoªTÄQQÞ~5&‡…KU y`á~œŸr3Òù\Zƒ£ ÁÑ9-ûL ÌÆ.]2µSš`ÍyþïÏÓ²ùšK0´³6Lõ½¤wÙúîxô¤Ç*Œ=ûc«!¶ã¤œç™£vöXã¢üèúù8˜úó¬8ñåôæk9dqç[ ‡&§][;òÿÙ»w®­·Â\>YÍEª!ÁbÔ™ÎD±Æ*Š›õ(º~à׳d¬~Éü²ˆdµõûô]¦âòê4b°žZxéqî_˜Êÿ¯îG,K†&—4;í¨TõiŸF¬aWzÜ×ziìÇÏm‹À¾ºŽ%#$C¼|¶š*¦}Ú~¢QÁ€ ²ÓŽŸ *#Y.sgǽÚ4êJ”Å’*TþAÿÐÅ®o94%z¾S—.UÀO³z8È’»Â¾‰6h_8ð^:ª‰>7¥Ul5cK` “ÁåÙJÏ1Üã¼Ö¿ŒJð0Z§!Ž?ôÑÁ¨ž`ôlº¿éÉÉzøT¹°!ÉéËž±ú3 ËR½‡uD׸…ÿ¯=ë26ÝH;,æôÒ‹î½97W«M«Ôº`èæÈ®¼½ÛOÙ¼—Ùn“é LKiP§7@Ã;kñø›>8šÝf»l±Œã–Ÿ¶ÿP€…¸&ÃÎŒk?lo‘=aª1.ñµ§ŽßÍ_®Ø–¥sýOydYž_|ˆµ—ÎÎÚZ,ðÜßD-Ó;$ë}µ:­~XxYåÌöؘÌ{ðXŽ·çK µ¹áìåFK½lh.MË=6´{Ÿ¦éʇ ‹ÂT?×dU›m;·ø„b¯šû—ýÙ$uç¶“ý'Óq4j%Â0?s€³Â­ÞçiM8QðàZÆøè½PßñÙP ‡ÂþwÔkŽ9 ¥`CO¸Ö:A#ì3´ðüÇ#³Ó,“ÆqV°øöò{k,­Ûßþâ6ÎvŠ,L®å³ÃýÁp&îò‰x3è6x sJ|f5S%ãŽ'(ƒ»r>¾ï$¨ .sÄÇ6l¼w¤êÑŸ4‚ÉQ,ïÖo ‚_¯b9Ø×Œà–q1eJ‚ÁÌf‘¯=`6|*§^,;H|õ–ûDP׉F.Én%¸<ŠþƵtÁœÒÌNëšXè~¢ÂÚ½…fþ1¼K ª ˜ÏÅ»ôŽ,R[®q ,,Tž³Ý·Í†`ýþ£>8,r?ÖvWžç/j‹¦m•­‹Yb/N=IDÿ)öE^Ϊ9‹ ¦O=ÕÔ°›i·Û ê>HÓx_Mã‡Æ=0f!F±“Ÿe³äö ¯°b‡}Óõ÷uØðŸ˜Ñ'l«êþòå3ÿÏé^ý/z~\”lþ #—Wþ1‹¼‡ÚÔ?Kc™a„mUA¸{æï0Z[W…¦#|D«~}_|é@²©ÛJŽ'™Õv¼ó?jºÆ?Ïm²Â‚–̰?N$#­aÝ¿f}a¨üë%¡ “ y^‹î8Ÿ&êr7I—®6<³VxÝx{4&…qa@Ö™)¸‡̳Ç5å Ö¬ãÆ¸<…ä¼åÒ|ùÎÍ7¼¤%|Ãyù‘H7oœåÛ9Ö¨I2^æ ã½ìç|vØäVÔ e·Š9C³à+㕃-U“vfB²GQ=(7y`¸»ËÂc{2Ž©Œ|2¶c€™ É—ò ÷®Â^óC Hâº0|È&x™&M Bbìù7ïlMß«úûj4Ò–]œú^íÅÕ“)êýßÍpqì5ÑüW—“¸LÇ£¯ÂÀÜ!Ž®>uÉŽ3uÒh'Û5Zç®ÁRã]Þãâ+#›â _Ð%˜y3µþ­Þsµ”ömvÁц«üïVuýÞ˜>Mm¤Ù/Uáp#Ø3ÕT•®Ã6žÙ¦oaÆ•š¢¡™Qó{‚²=Añ•º(0 ËÔ?µ;5×ú<áÙ”ºØL2?ÛeÖV݆ù”GOÁÈ-©íª1°ÄTø«èí7 ë1` ·í†.Nm¹ø‘ù+êbVÒ‚^e©…)–/Úï¬7ä%ÎÂ\Âu «pìb`šê¹8éxª+þÈ/˜dû¤–Q÷NGd|™~ô€`)èp[tŸFòÇ©=|08õ$þn,ÎDÍu Îub÷» ƒmÙ$E€øž¼û8Ö}ÏG ùJlíÑ û'²ö\ ç?eg“Ô»¥xÝ\À¯-¢#äŸ3v5þ`uÛy'k³íþÌ af>åpãh‰{Á¹Í,˜NwbNP2Ø'˜­7NÿðÁ·ü…‘l˜4d@ñÉ\¦;ÝM©–%)/ïçlµ\㳳ݪ[÷)`ç;Æî]X1çÚ±T)‰+ ÷|$˜Hνg¬œŽ.®åºÕ k’Ïp»a΀z¥ÁÉb|H¥úŽ=öÅŠÊåzœÃE¹€í³ò$Ëâj˜QtEfÝ•tÀñÚg³wú˜-·Ó®È=‡xm’J›°£é\U‹§ÉÞò5º` f3GÁúÈ’rØëªÿ¯0'¥·ÜÆåcÙY:þ·Iæú™£µZÊ0õ3çúúXpñ¯eo$öØÏ¿)JMÁÙLJÿY[œO\Žý­u :°”ùÝ=:ýí.,¯#(´ÇñBs`ÆíéûQö6’ý—È©x§Xáà6þŽSF¿\“¼©LP?·ˆ×rÆŒë78˜ `¬åÍÇ=ÑØy_¥ç›ç Ö6Ü+¸ŸG" ÉcŠKaŠ£¾Œ¨OUõß‹ÅÕ*ѤÞÔ˜zz›(I¶&™DšÅŠÕa¹80@öœ-г£~ï»æW»˜å¥â Þçß›Õ.XÓ&ö/@?w¥{§¬8öøu¿}-ÀFpÒCJ2. ÂPغ˜ð+»KÌ…•*g¤O|óP; ÷·¤²”aîyù#¡¸ôP@Î w #¬Ko^ÀFÅ:ÆÛ^³8tòÃÃW©¢@_vÉÉkÙC¥ W—)Buq]¤¸ì3‚É}ÙàÐo+‚‘y£NŽìò¥'!©ñO¹¿m€?7Òšz¹enð13£+ÏžáÑœ¿‰Cš]G²{ݱélª€V’0L™_>¾Ç¦-ÿòÿ÷f͵õåV°ŽS;ô~Ê ÉæÒã3ÿr=dÛ½æ{G2eM/Mì?«Ì’7U †P Åw×°Ó¿*æñl Lf‹ëgªBÅïHc›}<8ú9Êýt7}ÚÚ¦:@¬e:—ðÏ¡Åyú_éíccÏÐ6ñî`œ|ã’?!çJ°•¼ŒÈÛ°ŒS #ÊE^‹Wƒ×ŠŒ>1¹¯% Ëq¿,ÇÇ¿bEC™Ãp®!,w,tH¼`$Û|,cŠÂà´à»…V‚]Œÿ¼›ê¬°ÑˆÊោüGIQNÃ`ñǬÛ3ÿáˆð¥¢‚ØUµ“ 1r¹ƒpAùd3Ò†…÷5b§Zùפ·ò8ছ³e»Á¶yæø!ÊeXT?yÇRŠ%OŸð©HDÂB2Eõá¼’„:ËôšÏÚªUlÎY«/+ã­×/= Œ¡pÁƒï=¦ªél â_‚±€×7R§ä±ÿÆ5î[Á@ç}—mnÛ *^-Ϫ¥`ä¯ûæ”t o8°ôúÏ5¨¶j~º^ ÓK_ZÊx‰s}Y™Áé!ÐauÛüCÙT÷Î8;Ï3ÔJ"µó4ÌÙXµkœÔ|xÔ©O;ŠdpÏ2ý¤#O­L7 ÀÄ Ù£×ŠÊp1öAhÙvS¬ú¹ùÒk§5ßòZÌ7ÓdÀU…âüßÑmÓ…ÜG\•0Ñr#âËx+üâØ“Ü8\uü’ÎeÓyŠû5îà`ŠAõìá½$ŸM=˵·y¸Ò/±¢¨ô«ßô4@¹è©x[ëö…½ß|ð-Á%y½¡.ø=L)JT=-¤bé±r‡s°¢¸Ãjqg,<ô;mkbó¾{Þ•ž1ÂÑ¥êä{Q†0½ïÆ`’†„ÎHÜ|‚¡,ëØ zH˜nZL.rÅ%6¶Ì¯þâTû[>§#Th*ñHþáKrËÑ(ÞáÄÉ=’˜@×+µ¯BœŠÉ8±;l‘ ÝÉ50ŒsRËwTqâÝ3ë?–mЮzõçG†J¨Ò~'±m;®î×8Ù5MpMÑ=òž‡ «í¬!Ó°¸c§Ü²lOŽˆn ®€1Æ o;˜ì«ˆ1eaÀ‘Ù0ƹö9¾ÉUhV ÍÕRì b¶ûyy´ÁȱÍsýsè–RùîLpÃlå£?>gÌ r³ŸOæ·Þ=íDb’8‹Äõn{h2yѾÇT »ï¼HÝVV«¹Ò±>WÍ`øô‹m¯2T 6×òÌ95q’["¸w¿îÒÜTnã[~¬ðô÷¸t~'.'¾ŽŒÇ–ɪBÿdÌ9Ô"u!RòJNp֞ǎ+^¹Éâ· ùÃÒ®±óóeÙØ, eW~¦¾¨ºîØÎ«S@0ÏŽh¯»¹õÈïÌcØ+G¿tEÚ –vQÏ=5X€É?ûzæ-¢!ô¶NÂs&ÏíË4æÅº<ÌûW\a(øäM6º! Ÿsªp€¹ÿÝÔ-DÏLžÇ¿oTŠChø;œÓ±Ûv½­b|ͧÍ]aúO•iñ üR]T>ÖmåüåŽáX³èys*z=Ò»•òò3_bÆöO÷¤xßA×jJÞ9†}@ Ù#ð•xeé;GgþòÀìå£íBCHê–ÎúÔè]ø9ø.ºg«Øs¢ÛpPÞƒ…ýy5ô]¶ýbó$æº,kέéDBûPCÁ,ÜÂóø{£,Û‹ˆÙIïƒç—,v¹mã¾Ù:ë ìøæ£ÛXM°d]—¤ñ;2Ÿ¾vûoªåµ³z/åa¿,§ï;‰ؘÚ.Z—dG°ý˜äæcYãÖ:†¹7N @Ú°žª»ã]ËܬNQ¸àà:kMPâF]Íy‹‘Þ3òv0¦6°GD¹@߃¹W /0>á\È´ÿ$s“fBr¥¦·ÇþÅÛÖçÓ‹Nû굫 »çÒÐfKq‘–úpÈ•ƒ`W¢×®*ÀtJª¼™ ¦ŒG~’@>XeÊ”òŽûNP >ý˜Mw„±ó7†Ù/À«ûë”ñ·o‘Zë9†÷4'f< ˜Â×ï}; ôÿ²ìînsÇ^é±+?œ ÁwK™¶#A=²å÷5¡Hho2ƒ3OÞÂRÓR‚[Œ.s´ß¢²ÍÂ`6}ß1~1¼8~zǶ0û¶™™ÿÌô¹dVYÑ`^Ü ÂQªŠ $ü9þ¦›N0Fð>yëlÓŠ¥;î ?…Y©Fî8˜SËÔ=Â+jÍG^ô,[ b6`_VÔ½]BÁƒ“á#¯2‚»«B°xw7ÁÄïp²Ôˆ¤xn¶ËR¿åtrìÀÐö@•ÝÏ{¡Ã?Ûªþæ#¬‘;ùñޝ Ž]ø­uîÐNøÜóLµ ˜o»Î›³ÝÀE“ý|êß0ËŸS‚!¬ ;D ;£ŽÃ¨wýÞ³c;¡®Smèlêè|”ì§yb‚d®±˜á…Ñë¿ß/|ˆ‹BÃnMï7ãàÏáÝz_qîR¼Šõó}¸,_ø##ë-õ3hƒÌ8ÒdWô†:yý*ÕýŠX£òù’^Š;~º¢øÚë1v,)eªÉÁ¼‹9ç›g8xÄ÷š,Å€¤úΫ7sBvŠÃÀ›Ø~èYúÉ!áxš¤¶M´8~Åé:}g· I$ƒX½£Ëþ8i“JÁÎ&ǃçÔu Lì§ö~»Ç?¸¾ëãÖH;$xE¢úµ<®¤{áÌÝvnÇ” Õ–ç¥ç•:HW*:Ñòæöª5nJ:t+Œ$… ?bG­—ßÎö ü«éÁ£°Âw³v½Z~ÊÏòW ÆÇïɇz¯pºyRñëƒó˜]ç¢Ä‚±0ÁŸ¹{ýÙ4˜:›Î8ß;§ÁjGp؈n±)æ†f§¿Qß³C×® Ì»~ÃÁàRÿüÄ—a «W_wì'_ñ¶Ê'¨˜t“›ô+¢-z0R|òí­ÛïaºOa»AûAü]‘»»CÇÎÔŸöÿùg§ù7qî„Fö›œŽ@õµÌæ6A êæèÃIB\×:Uf8vy{þ®†ÜÞXçC‹Á8U|Au‡¿ôÿ}é£qô/$ìÊÛ$ÄsjüÐOµeN‰»‘:ñ½¼P3z䟸óLœ Sc¿ÿV77áÐÎÍ õ%ØsR#¸Á[ZG;£¯ði`0Ó’6 ÁdU:&1d…óœN÷)Ì'!u«V³&v²½þp¯t1 <<‰Ó32^uÂW!3sÈ¡šý=LÄ1ß­ñø…½½úuÌ‚`áÛ’oæˆC m*º­ë``äĺ¸º‡0-zó_b/Æk)>:÷.§„$3ŠZÅ ïË¡—‹¡RøO&˜(×ÅwÙ ¶ôR ãlîA:TÐíW^ß…eÞ¤I« Ùw.tv}À…k>ü“êôé_ª¶• ,Ü#.îüRƹTvï)œ ^¸äŠ“ú?îå”àÄ‘k;r Ö.Žjì‚…LF ±$6è®\¡Üíƒ>‡MK|Ò æ™]ÏþÁÏ?ÖB΂-!“çTlÌÛ¡Pƃ*!zÕj˜ž¸ñ–‘‹AÑÓ:ueûvìÏ:¯2…±UiÑE—¡û¬áù¸˜ôx¥´±æ9ô1së,e?K뀞waV£v0ç ³ÿW‡ ìè¹Yуʥ>~™¬û†tM†HJbbi«o$T ·Å©»ÚCg€ô‘ÌÌò½aع‡çÝEŠ7Âj‡{Gé8šÆí©*)I°¨_ÝÌ ­<êZŽ|&â—3wçÅŽ|("íøc3›O˜tNêlÜ ÿJ›ãV¶ÛékÒ äö1-\„Ÿ›wÝÔÖÚÛÛZÅžñ80°úâ‚R&L9ý„fí.èÜä-/sá=4^èx}à\,Œ•šÚÌ] /¶ OC¾Šãª_“càyìÏöî NHƒÆöâ“ã{a"6/ÞÑptÉÞY™£é/d"ïwà€ÒxƒtñÈÚÜWlT抆`µu{Ò¥:p0ÝFÃŒK§j_pÎP–H¦Ô‡—>àZ{¿>Mnzó˜s²ÞQ÷š$4úØ2M¯<…üfmƒ:]¬ðKvàˆ_!Ø»d¿í òÇf+éÄòH‚Aå 7ß06ÖÕt·k©Aå˜îðN),â½óß+œSû1]ß)ó²Ô~³X§Ú%zù>4,t½UEp‘¢ÆÛ›v`Ó³uŠ}O¡¶úâ=Y4”ßþm}R toïÎ ÜNb[‚£•ùÁaoVŸºÑ ª/ÿ>­™ARZ£/ž[ÇM°˸+¤}BÚ=šó·¾a—•–ÛEá.\ŒÛ)¡þ@‚dO2/ŠMÅÕ²ünhQÄù×°ã:8›S¤|Ì´KÜØT¹ü­°GÔwúˆÿÉ9³óŸü^7’¯2BÄûÉj_d®¾šKÕõCqÅØÇæ82apëƒöžW>°œ‹–.6|0‘¾â88 ƒW?ÜÊ<Ü«BžJ™­Â,Þ×ãe'x¿äѶ…‰Ig¸gŒê–/„K}w†ŠÊ¥MÆ+BØrbSb™=lõ/ºøFõbò=ÓõHÆlŸ» ìJ¯Ûá˜ÎÑWÔIæï7¨bò$e@ uï;&¤]+›|´Æá·–oh¿‘€Ù”ÞÚ]k}³ñpvÇsn¬ /) “xA²Ü{#¿¾›G×½‘Ÿù¯€dÔ¿ç+z𸠊…Qa$à~ÐÑá’jž¿ßrÝ ¤Ú4tyó‘¥Ûõop¥â­ôõI¶·PýZ<ÚpÐ kc^¥bòV‹øÀÒåûM÷5¡ò^ç¹|k¤¥žþ›j!}[þDý¾ƒå7?<¿–ð–1¿¶Ó Õ–<ã¹»qåàŸ¢8K‚¡÷ÅLW`ŒF™{lçÁ²×Ÿ¯ü¦À„~Šté‚ræ-¥ì/6éõ=ÎÝ´c2täb`ÖbΩoO~(/m!$‹D”Žà—ŸWVŸ“È’Õñ|Îkz¥ ”óý,N>J×ÜEäAf”)¹>' Oê6 à¤Ø¬ÇþÓÞ0“tª»öÄ"Éœ°çËf±{8ypcy3Ç‚9œž–ÏËG°Ö/2öÒÃâ¯ç-A¨ X2JJ¥cרÐz [M‚"Ÿºë¿‡Øó‰²ý¡ˆ3ŒÃI5‡%´O×®h’€öÊTõ.ÕK°")³ÚU;L0œöîéá7‡Yʳ…¶MM“iàÃs—‚¦M]*«Ý sžjß'(¼¿~ÍMàln˜hš Œ\æÓXÙèyL y%uª¸Âpéùµ\1½ë¸ÆJWÉPYú8ê¿)ìÚðîA×ì¬Ôl’>Ò½–ŽR®6_€©ð6+ÑØWO °ð$d?×E3a¹ðø¸þýœ‡ÿ=ð™y¯¯;cÛ`®,ÌàÉG?¤Ê›>³rÄEû6NŠl.–¿rêîzÜŸcâ¼}çs;Ȱ™’ ë·ÓF²ñÃk‘Ä×Pͯfh¥°º…+'¶·ŒŒÅïå¿Ú[@Ã=qÒ=B çžÔ^TÖèÄéÜÁ†Ä‹$‹ˆ»õû aœ™ù-X!/ tå”öì5=m¿cbzDmmý7gÚhŸN¬[å =·'yƒÙ{k°»\3Ó·Çì¹ÿÜj?NÏÅË öZઇó¬i…uV‹õ?$©òv¶Ä-ád½ÀX’ѬÍý›0èé‡Ó¿oNîK+ÂxýÙeÕ ’%òppü)GœÒ–›»‹&Eë{ÿ™â[Ô§C»×êÜp‚eîëH÷8»m„nÒo ª^ǧ=>¶óSp&#s¿Jâ¼ejµ[QÉD+ælO^ë•æ\*,’OÔ{Æ`¥ôgbã]ø%ÿûcçwqè}–F”[\#ìúù|0æ¤héa Ùg_ý|ëÄËÒiy™aá°Ð í¬×} Ýy­ý—Xð± „^壮FñZ¿Å9ó+ŽtóoêØJPX ½O~âÂ4·{OÝÀÅÒùçé†8?¯ükéÎÚ|–u'-_â‚ø-Teýƒö g®ê*˜ªõtþúŠ Š#R ßß–„™Ë„‰¤¦%F®z¬Zî 8OïxgÕ@°:6OäcéFÉÍ›Šæ`úÎã‚ø è·Û¶7⬚‰/Uüyÿîuaf"Aµü|o¿çe‚åG˜ã¿¢!‚¹=*bT‚&•9³Üf–yÕÝÄú{J;³šB ¡Oדž­¾†õþŒ—-o ~ÅäÚ g1ªîäñ„ÀÒ©ŠFÿÇ=în.Á¬ûNÿËýkÜÒ5ÿ÷V46]m[wQúJäM+|‚IÃ~]o¡ôþ'ò´„ÒrùÆî­qøä³«m¡—¤Ño.ñ"%${‹¤Ìÿ„.«ÌøW ~¸ü®ã˜ÏÀ.™ïçx-`ÄÁË$¥ž ™þ²ÄŸ}’€sBñ·¥½a4´ªæÔ~:În,ÿ:3%‡³«á3›qÀ„IÙ;e/އÎ1Gä }zËœô/.’éçÝk¦YH½»©Ó“ĉK´Ô“ðÃz›–*ö¨¨é!î@ZN®nbZ…¯}¿+öÝœqYÝNÃ^­ƒe/?ƒIgžåÒîØo!v™WF=VõïjÁî¤ËëtX=°Cmâp¡­$AÝò&Å æœÁ•’°±œ=È€ukÏY~íÊ'˜ódÉß ÝkºÜ§•-±ÿ”çrÄ  ?gBÓóXd˹˜òv9<±ÈoÁqž+÷’ì!ïM¡£É)3‚ñ÷Xü¦d‚‰ïÛ]+ëì½²tûìW3˜ÓæµÉöÀÝ_›²`¦zsŽE”Tˆþû¼ûÉI˜½Tá;õ0g¤Â]l_¿'ù®ÚqÆMÇ’Ôo¤júNq;â2Š¿nÞ0º¿ÍÒBÝYo‹C(É0}®KœdûR`­æŽsUâƒqn(8Zî©´;lóS­Sú©_t£C`úÌžKÿ°Ùà7[” – îáå.#)Û/&pèã´ý­Ÿ›*ajþëgõóÁP)9 ýG»³VO=»øl-?íW8´–ƒ: ·Ìÿ ĉСqé­Ø53üðËÆ8\™eŽS]z¤ì÷`]ÿ\Ñݪ1êñ6öX~#¡D\÷Qd‰E7²G˜ Ñ~D%]º*œî½³ô¦ÁMúã©t6‚1õPÖãŸ$£µüïìC¸8ô_z÷­4ì^Ðó´O†¬êz¥oÎm¾?Á¼¨´e‡)HO¨ÒM™·ÀÎ>NaUV_\øï"gÆÙ¯ØÙ6¸`}¸Z…ÎŒõ=‚iòÃiŸø.¤Wµ^:á`ƒƒGã†ÖçåáÄîåg¯“\iÍ·Â\\Éðo³ÿ`î}ôžêÙ)\Q?G©‡j½Š“_ž[ÁÄë‰õWŽ3aßa=™XÿgP¿ÍqPU¤k[­¥’ëø éãƒøÃï* CR³ííôZþ\ÙPr;õ<¸£ñZ¡‹d–Û®,Ù¾!sH†+N°rŸû•K²¸‰ì;P¯FR¢¥J,±Îmà÷¡ïÖ8±¢N]ÄAÉ1“—KK8»ýfŽo=I2ånRÚ£ð {ÿ¹qLsGælMœ>_…aÉÜÙºWVP±O¬><\éÔ¬7Ç;nÀø²Ï¥«LêEŠM‘çeè5|ßs¥ c÷åfÁè}3±K)×àWÚŒýüUPÛ¦íf Ù¥•Ôüm0¿SâÍÓÎ@̰Ëç„ÉÈØª‹1ù8óÄ—g³èš>_OošOó&©SoÓß„š-¬,dq1´•ü'&ˆ3ëª×ãÌ!ÎX!“dèº2f·ÿêOì+õʰÇú,ÁñE r#^²Œ+ÀÙ|Ö{ý™~8lº5Aµ» kýçeV@]?õmž´2L®†Å)A2-ä´WÚ8”[’Ç?ïÆV‡BûƒA—FeáÌhü:,pBJ&º˜ùÜGÓCIöèªW†ÞÉ8–áÜ7^ ;oò‰Îç‘̲@V ÎÞÜMß*IRw\“ŽÄ¡Ÿ>Ý_z¾ã’Ð)éžå Ðÿ»½åÖ’q2Ûí‘“ vJ}•)0ÐóÙ:HЗmY£´÷ÀéÚØeƒ®û$«¼ˆù•ý“¸üýTÔÃ[˜t×¥ò½tý°ÿµ—°ë å$=':—³4W}3ŸìßúÇfõt²´b—鸛KÔ \¬£_T¿¬‹úQ*{¿bUtˆà,«ŽVSoÔ4—‘Ì¿,å­ÏŒ`Ç¶Í CIþ$k§áéù‹0ûüþ‹9m5¬ˆÚØêR>Dr—mß.œ ">[7¾—ÀUýFͨbì žY)ÒÅÕ@éêôE’dÙ¶œØ¾™ êšÝ7?ýæöX„¹¡³7ʯž8¼½¥ÿ’=¥¶ì›-ô­•™ ëžäí¤GG žãÃ;_¤¼Æ……-iX{yÁÝ®æÇ(™[¼•°}÷·ì͹{ ®ÏrûÕ™¾bÃ-;IÛÝ{Gk^Yé]礚vaÛå|–TIjò„ñíKDZ¿²ð4œw<•ïÿÇ=3JH’JRQ’$Éû&$Yi|¤’¶JeUI”¤h I*JB²ß·½÷Þ{¯³ìõóýã÷ÿ9s_×õ¾^¯çós¿‰uâÇŠòî’Ý0úãqq¦-Ì”Qw9‹ÔBµáe~¥zк0üû#¤„‹Ú+Ó‰½ÂGœv´šáX¦lnàÚX²hvl÷>MRî3þd> ÅEçkxòpŽ*X½Xÿa™/¾Í,Ú‡4]«gb'4q–™.ý”Õ‘HçbTl:Ý›1¯‹…pêQ¡è-#ìxë#ÿð4&Š¿í«èN²ih¾}ùÅÅÅ´ÜR$¹ÍWŸ‹>Ï„‘žDåóŒ%œô›S´.—ĩœ| ÅHcèâ!’³qàf!Äu¼ßÂÿ‘„Ù”ðµ*iPí½·ý¾0×ñÏ’Õ«¡kK潑É@ÿÇovߎh¡’Ÿ;ª7aã%Á[ ¹$å¿ñBéj@ívýÝÏS óçã]ÑLú­›gTß™‡l—tzVXïwÒ'˜&aO™ÇyaXžS’JáƒzY«Ï`‰mo5l²…±—³æ¬M/É®Éÿl@x7ö\ý©æ†´gå…*g¡5¸ØæP›.[¾ÿ³ÿ¶<Öž±¹Pñw3tOÏ^;úev§ö߸ŽéíF?ƒ,)ØË¡ks+‰Ÿ¤H+_Îõ&94ùön `Z@›e2¢$·Œ‡Z!ÎlP:¸§Â çl^YŒ·ÙAMÿ©›rÛ¤I¡’ Ó UƒXÀqÉî½ó¿¯}¨mô8šŸc-.l`áYø4JrœiŽY ââ”ðÐÐ0Îìó¯ãI†ÙwzWÔ»Höâ Bšw‘ìüŠ]ÛuÙ˜ÜWâºL²5 wì “®ó{›×¬Ü|©!06ìéõíE ææHòù¨¼ÅRý”‘–÷`úÜÐÉŸoÛpæð‚ÒŽË8˜¥szÝÈ8tÄ«þ{p¶Þé—¼F°Hý›ž³TÁ¬âÝÖE„4,—ëÿoü}gfEÿÃèAgõ½g*aZa¸Ç„'›¼Bç_Ùã?7»öõ[ ¯à·˜ƒâjÌ:çÊîò=úîÚ IzǬ¸çÉ5L‚]®—zmò!LþjŒyßÌM°Ïû™ðô>Á+¨9gtÃæu=u.z±ã\k×›¬ºnÌ>ÌÉÞ\§Žt¯Uß='aZÏ)¦…§z_Ÿ\Þókº<ÏÐBŸÁpë'¶,ý œ’ß³Vê‹#ô‰ˆ§Ýš-ÆéÝ™æóï‘Æ[<ñâ%6/ó…Et>Ç¢âΩ>iV gK ®qZÉù2}NæÞGоwì¾g}¥òümÁúóa¦“ÆJŸª?bßÊö æ4cã”ù·A«PÀÞ»}ëløáK/ÂWzS E²<€`ùOx‘XYÏ¿ž™ÏÁ²ÿð½ ;›7Á6ð²t“rä©/t)›¬‡h)ÊÁçº}iš¦HÓVhÍ¿€ô¯žÏ’3Vúó«Nîï•{²åÕ;.S jkÖÝÂZ_Rh¥8öM:ÝüiÛ‹hß^"Yë2öŽŠylæõÇ"ó`÷ÇÐÀ½ãÅϪõ8Åeœíhß“~Óñjˆcß‹~k©¸´Ýíµž¥+ÖD½rüŸL}{ÿ×áï¸ÀûÕXyã"Ìé/eM„fa߯é,Ör¨õ"¨o?Ô@ϧ¿­YTg‹/ï!“CdýûÈn¨ÛfÛÊÿ»ez}ò×®Ðf­ZBè\€Ëì×4è£=³šT´ZTÆÊœS¯Æî? i‡YÇ6b‘K½ß!wâìæ8wUfÞÝãW!Åœ¾YÌ&N°iý>vBw'*Ý||fÇå·Mc‹l·f–@ ^©7ò¡#•2‘šóˆueNæ²U…_Âø£“‚f ž/ÞGY A*§»÷ñ’`oµ½²au!ÁÊö°Íù®0ÌÞÊ]î²|N°%H·EÀxè>÷—£=8™ÿAZ):º–ó¿z“¬™·Ð²å"Áz/ªsf5Nì¼ÇÅ]›@r컚ÿlÕÊ:aVþæ¹8ìÚ­êØîÆÀú[bÖ·<Ô±ñûvŸê$ÅîÌ©ã'¡Wàc–ÍfÊ3ìu’Æñ[$ AsÞ·é,ñ5GRcpšf'7²Óƒdi}1[8ë‡ÓéFê~B¯±Ëx6ý²Q=$»þòÚ]µ«cTËqú±¬e‰á —]L¾ê7“Or¤Éñ ª§’¬¿o èmM@Ún£ˆBÃ\ܤäžs,„ä0Ig´z¦#í¸]ÿ“[Ë$Åìßß³3ËØ›òöšÏ(Éyú5#N©§E¥–é“\CKE4pö¿õC÷Ö+AWah_›¸6É·Wªjl¿ Ž:¿ —]"Yd?¼ú»ª—äqN&eų¬šcN–`ÍõÌsåuÂGÛ˜3e{.cÎ…Ÿ¾Üÿòoe|™,›¥ðÕnŸ?ŽÐô.g<Ç‘z.®ÓL’"ÃÅÒš‰½ve)ãÎPjø˜? œd}Øæ¤¤DR“ê+{¿ÂøÓ~Ϋžà”Þ{¸yg·^}é-º€Å.lËé±+þ"øãgÖþ@`ÖŸ·[^ƒ…òæï߇©Ñ3½b¡Ñ1- û•7œ—.Åž½‰=äk!ïÆf{g`¸Ç¯ö<ê=»*‹/Áwñ®o[` ص =±6àÕ›3ž’Ø÷…Ï©I¹r÷O|{y¢—öuÿic>@ÚPëÑÂLèl&†ý2‘þbš}ø™)þ溺±ðµ"P¯iÆÝo‹Ày¾´f Ûã8`62¿–di¹uïÌ¡;äê—"›?œJÂ!yÿc÷gîâï©åÀÄG$W¦À¸¨5LLGºP¾…áLXuš“ýW¤u¿4Ç0ƒÎM¬êÚ©'ñ“[ÀÞ iwRà{šä?q ̬ .xw™ÙV~2Ž.8³P]{û"?fíIÞ#»’+%‰·¦iŸVrjâøy-hý3D{±M›·8WœñÌNÕìýãsXûMõ¾vã4¹Öò––.6jQþ¶~ "ýûækµ7aÞ$mFSè1\Ú§p÷ŸÉõîZÈÇ ¨4ùVaK«FãæÀÈÈ‘ê… ¾g•tu¨§,‰\¸öC_×=9Ft?wõ˺þƒ¹N­®ù`OèÝÒNþø‹36JZÐy-çûS¥HÒã"£žž$Ø—¼Ëޚ܇BuîwaÄßIîªÐ’ƒ}s‘º…*ü˜ŠÛîP­Ò§?ÀÍ ™Ue·ÅwwâÒ¡ãÛ춘cSzöÉ|1 FšÈ^kŒÉÀ “ÃIÙt‰?a’…Rqü>VïyÕ6ëá@òŸë¯|f° ¶ïxÉ ů•únl²êÞ5׳’º±ÔôÇ÷­¹½ØátDúz .ZLmÈ9} —ý~œ±|×QxK5Þasëñ()hfÑ­æ1Á8ÞsëÌ9bónÙ§Ár¬»uåaÆG’û„üǽã"¸¬wn䙌 b§ø|&zÝvý!(‚]6Q—NèòáM·©8¶C;Ëìô¶í-W:œ¤E²~MñÈoĉm¼êCß:HJвgØ¡ÝXrõÖÑ@UøTÒðãp;Ò|[;]ÿ*âê#Fw¡'ãA·$ŒV3< ãœR¶”·È?ÿ$±—d±ó|ìVZÛk—9Ý¡Û4tŸ»LßÎB.%’³æêSÓa`þ™5ì¬~G²¶q¸žh‹78.¥žÆ¹¬B¯>æít…ÄP«cD;{ ’Û]}‹sl.jðú/J—ú^¿“€•{¯âº)»N ©ÑWN ®»TŽÙ™Ë.S0öÅå¾`áEü´ù!p€å†£ÔOl¨À­ñáîØ÷À+_7¦[ÞœN,Ž øák×NqЛ:›é̤`¿ô¡Î/ÃØåéÁ­[©õGl¹7~»‡‹ì›ÞqT0a I%fæç82Üìô+–Ç$᛿‹d cãÁÍcQ6P8?wC´˜`]}çÂåÏš0;ÑVê‹58½@äfEæGɧû½Ïa¹V¸‰‹ßj:³ÃH¡Ò'ôoqÉеÓËïèªï0²YBïÏdSN>cîƒhrðl¸Þé./¿a§L°¤r•Tßľ·~îkü(@ÍÛÏ~^¨·Æwëè®ôHÇ.¹¿^ÀhÛn~$’¤Øœ ;¶hÃoEˤ%vëÇ[ÏB˳åXÚ•Þ3à} OÅ™mƒÛNús²ˆ^ÚmëãP÷²ný½ø,‚å"ý}è9X(Izòg-47f;l’°¹5!&uaÒာqUdÅž¨t÷{ƒƒÛ3ƒ«5l`‘CYô×  ßÛv7xU1æ &Ç –5bõθ—aЭµö\›‡Ò+ÖÞ¹¬¨Ë:-Û{Ï„`›Ç Z†îj‚bâ›XõÓ5úxõ\Ü9™Qƒ+½ì³ýû]ey‚íÞ¸ùÏùn’÷¶îªX±H]Õovz÷GÈà4ªê’¦`—öIâµRé,÷íSC¡¯1ÌÍb¯/0÷2žoIÍFêÉ7izúM$KÓ>à ÷&0vs_aKpÃT#×ÕÏ9ÜR×ïÃÎxÞRNð&¸´+D™ÚÜz»ï_]á“ùc,÷º?ÀïGÇÖC•8ôÝwwõánèí~ÑwÉët +/ß“4U£Ú²aè^ Ù°w ¨ŽœÂ}ÿ×óÅu)â8ÿL?|ÿ3E‚}ËÉS™…X{W)ÒäóJ_>z”ÉR«Õ†5âzvî+ó–oʸ? Ýö‡’—`þ­clè” Ò.ˆ*…©`Û¾Jv™«À”ßð2Å·»þ™Tn¡«!Òn)^&ù„¤¼øK×Bƒ‡Œô¡úwĪþ©eÐiAyòId7ަÔ}àÔ—<_Mr©I ×ÃJÊk—QÓZ°©U”¼Às•Sðü .´nó£<õµÑÝ3ײý°‹ž±Öe+ÖÅ–žÝfös$öÄ8ãô‰Uâö9è2SažøCºJ¹óãoË@wøý\è*;Vó¯ÎÛ5Ëì/³oÃEYÖÜ{Êq^rb×|<&‹7M³bzcÂN¯³â8mô')Öõ 4mš~=sj–›9Y_‹b×™Pµ‹^HU(ù17fƒ½>n¿›‘A«[]q0.³SCz…‡º~Ívñváøè¯?ã9$G íB›3/þf‘é—&9›®“õ$Y³Óbî°:“l},²ØáR¥’!—†'v?¿w|»½νØ5·{b—÷EOÉÎû“}±’Ó¯°û£fæÆ¯µ8E¬9ôšU~Ñ­²È0ÃÔB»í¢Þ¸,0·ùç’'tŽh=î?ZH ^ß>Tñ9Ò…wKO†)SoIgõ•ß {Cù´èXVµ5¡'Íÿ{”ù þÖà;õI§öe’C%µ0šÁ§Vü–ÿÙþTn}âí?³NG›`Y°Ð²…iý5‰ci¤Àå¶oN%¹*ew‹¥íÅõµ[&ó±¨äÉUKý0’cQK×õž8N¨¦,Ç]!xêÓÓI~d$”yËQ³¡3ep¨÷E'4nþqHtZ‚¤¼•°\wû ÔF54MáÈ¡iYÍÎaX6Ijáúûª¦÷`)#ÙX2y7AÛÈŽöÃïp&oßC“l ‰®¡Fªcͱ_9uî¸Ö+¹ç†‹\øl®®L²Êvõ}–ÀÙp{Ë._\X÷ò‚|•?ö^ÙfvÌü4ɪ½êR²Éî§íD¿kLŠ<u¹µ/ k½ãZ–5V¸¦4WÂÒBÆòø5%Ó¡æôþ«;×€,‘m×MwáØ‹ùܾ—ZØ+º{ì7V‚Gâì™óW6ÃÈù‘Ë⽞°TœsÝñ)Á±ö”¥ Ë É­ÑÒ†¾c­Ý‡ÿáäÍíÿÒ¬§ a¿Õôü&š.¯±o.µ€–í3'ØKá2|Ͻƒ½ûë7¤Ž÷”¼ªç§Œ±îkn—ÑhSp £™Ãÿ"Ï…ª.гj(¢=ƒ¥%8O?þ֘˭AéЕ÷¤ÚÈ=©Ö[ö¾uƒÓ¹lê06ù8¢©7ˆ Ø¿¸ýH_Úƒ‡z\7ÃòS·¼Ë‚0Ý7ÓËòe‚àHø[tÑ[ÇLrŠW“ĪQÁéÛ//"óm䣵6q8rÜÔp($ž²¯º±Ë‡œïZOboqpªÊ‚­¶y·—ùU¤ysl±P>UeïVm8 ƒêclK^0½ºo—¾Äw˜ÑüuôâvìL¬ðŠÎ‡!柛ãå…X©b¾Û§«[íSwh;‹!Ýí—~ÃÅP®KvÞÍÍìÝ%š79ºo Âä# Ráö&L[\²–»Ggä^úAÚu=£â²VLS³qÚàƒÉÅZábµHSLJÔà„”¡¯ˆcÝzµc!•ËÐr[´ÄQ’Òhz*À5•`¿k²Ãª¡˜Îèy}Œ„>ö’~™ÒûÐs1¸$ÙÖjúÕbÏHçcRõ:ë¿  3·lË7À!âB³ ®aæbœí,Ìô{ÇÉg×\ty–O+ùzérc¿Áõl½#+¼Â®'Ããr› p!ëú꬯6ðssåvçÇqИ³î£fÌ:^½þÀ‡ ËûýOÕYA§°ýä—-k –ÿq¯m°Ø7g­ßž@°0”#º¶W#Èõ"ß„ ÌH9ó©o­f†žðã1Bp2&ýb¶0vé•ôœ 1 XÏ3õ+܆òš'ÿU¼«}Ý­>˜¢r$iK< ÕsÙ¾ç…`HT;iߥPë”ø‘ñaÏüé86†õGf´ïÌâäíÖ'ZqI$ÇËá¬ãq»H–»¢‡ïvºàÀ©—¢–·Vzý¸Ï(õ)tøþ®ž7™¤Ì&,Sm{¡;÷šÖ ‹4vX¯»¦¢/½?¾Oj£Ü 77Îñp!5vË ‰e-\îο7tªïp‰¯3û‹“Ëf y—í¢f_b§}—½ÄšÛ¸ôš¦g©IƒâëS¦Ó¯f°[‹ôËÐ;?Ê$B“3qqlz§›Àyœºtør[Æj‡šÞŒÀå¼q¡‹°È³o¨3„…³=ÿUµaur#¹£@aÅN©vpaeXìæ¯¦^@UüýëóMq˜šsYþUwÂ8¿½ýñÿï‰Ã~)a–Ô*N’ÍŒ}>´r´²²¿ýUˆ­oÎÃÁGir¬Ž+s—ÖÉâø¨¯•.²4 3æde\8ΔðG êáìpßÃ+õ4\Þ¯¢rÏ&ÿýb×`n%9unÜâ¥ârO—Ú›®Ë8mºÑ;iÕ&ÑÕ Ê|‹Ã…]{Òy´VòSÍZ…#ˆÎçpòÍÇINÊfB‹0 Òœ|‰«²*6¸=ÅÁØ;ýB¥Õ)‹®Q\Ô™üp¸.d¡åÔpÏÁ…!IÅt쵸§©üéö»qaÛ•v,ÞkõÀ’ý=ç‹¥…H–³gÌÏÖãüí;ûn–Ý&)Eÿ ^»…†z¢YõØáËåÁu] T˜Ø•;T*ò Ú]6ÞóR^Yßû ŸSP–vVk¢„{Ó×7©æ)Ç×s7¡óÎvØ·ójÏTL¤@[´ÊÞ>Èøþ@µCm‰dõú"4ÙD²î²~vƒj@²¼úº¿Èëf~£Uj•‡Âïìf¦ÁjñÏksà‘rW½¦`÷‰œëE”q¢MÀøµm!tï6Ðv<“'机´oAj'ˆf4¾ `…*ßcó’G`¨ /êŸÆ²ó ÁzmRÐjè¤S‡EÃ\}nb;µI<\_ ÷ÆÇôÞÙÕøÑkT÷8§8Æ9µ%]wõ„™k­ß´µýáÇãíмûµ`°³+õŒ¶jæÈã‚TøS¯Tì)RöÝ~C-–E.Ý„` ¯Æj÷œº øCöÎHÄÌ/8®oiút+£jÿJ4ùàÈ:a~{U‚uþWE:ÏzH›Š(’e'ÙÜ3lŽEN‘”7ROØs®á¨Ð‡H)‡œôn©g\"Ù"éE|*%¸ðÜsZ.á Fõo Éq‡ÉI¾mûz N¥¾~‚݈`ñ¹þ`mŽ8Ì]PŽ0åúÛšƒ×Lˆµ/½â|¿Jp¼;nñþA;ûÊüáHÁÉõ ç†\¨Òºi(yi…é®ÈXœ¾ò{Œ/ŠÕ{æÊEêq‰]ùŽrgÎmUvlÆ‘!'Ö0#*¦îÉDçÌ8ì’\+#ðÆ èÞºý¹Æ!ýêì£+W}a^OÕäš]*2w}1´‚¹Á?UŠé$«÷ç–¼˜>Z$ï:Gåw8WñƒÜõ·5Zq(‹g¨¯k:Õ¦†u˰Uäª`œ V=Ø'ûkú;ɪ’U3.t ;IyLò®½¬6’ûŽ}¿ìϦŠc¬|¹}—ÂDzÒ7ã©$R•î¶luÞ‹“çÖ|7ò#)«ªr$$ ·wQôpIýõlÆ3è_÷ÄQ'튯Ãk…¢¤P¶ˆò NµòÝ_·’÷£ß–¢Ÿo¬Û]V›7qÇA[êyé–]Ð_g¼ÏYŒðú·? >à¤{ħû¤" ¤p_å¶³.6ú¶Âì¶¡S°·:hßÿvm²6{žYg¥N’çßõî¢òfa†‹®ê-c ó`zÖËÓØ¼öé†ßñ20¿TýíXG É*ݧÜyôÎ…·Y/T{B¼Ö¶M¶EÊØö³óÈš\_è?sÂ0K´éM‚ÂÌHK({cÍ^íŒ Fðž‘+\uøØÒu54Þ{WĽ~ÁöÊ‹}FéLoÔOÊQ†ùäQÕM&¢ØÏá[ýón?ä(úìØ3˜#óŠ[l¾Âˆ¤†_ë·9èo×=Ä€ý¥®§¼ÛÌ?úiq.I¸´¥ùôB$–n©Ý´ƒý¼Í3únû:ýö ’5–ݾµœéýÇ»ÆWµ)”¿…×xÖŽ”0IÖ›wÂR€KË-J/Áš\êR”Œ]»¢eªË·agKÖ± _èóÜRO$VI¶Ê³½½FP$wný¸ÅûŒwäŒ ïÅ®©/ZÚÚ~0uùÂÛ]™Í0,Tw¢dñ-0¬n·lHÄ–6Í+²E¼×J¸È°¨ã vðbt4îÈó[±ÃïÒ›”wbõ>×/ã ùÙ+7··l- >UÉËSþ§ç:iŸÑ­ ƒBKWi–8!ãKq!ÙnŸˆ)9º’Ç?ZMgû”‘öôoèÉ 9”»×Åå*XÆ_à1C²,Å¥Zu^!¹ÃÛºîÒH–«RóN”p®„"ó¾eÅ×%?_刓µì¹Û¬ô˜2ØÉÝHòüªÉc$|CÚ¾¶›Ç %`áå#jÍÞ0w ‰å§ØÝµd<¢W ³Bq”õ)XóbúH†âL>]ÃÅq ÛM é=ÁÖ3öRAŽ'`º1Bo‡¯?T.nü”òG#¾=↑]/‡1``Ñ1ûµã\Pý+ö†ò˜Ã‚Y¾¹ò+*”ÉÍðŠßŒÓ.šCS}¡5âqeéo6§‡\AýõuêçÿÀˆ»Ô¹-±®À̉ÎÕÛš ÌýRZZ–šOoû%ívÜúªèÖQ’çü‘—_?!ugâ¿ »1Içh4®WDÆ´fÚø&l}Íz1Bi̬۩K3Hý¢UÎÛ{ ëJ“wŽ®ª&)´Êæùzè¬XÃw#N~gÌp¼ÝõÇ&Å6H%—].%÷öQ˜£øg §cgÌÙ‡OÃZH~ÙXgSßÝùv4‘ÿ=KxŸ«æŒô3¢ÌƒÙÈp‰úùƒ¢†=îïÚûïn`›'XâƒÅ¶7å&·ãqʽcDœu/4ÿrÒw½ŒCdj“æb u×¢g‡ >ö™ª¹17C½íè~…˜U||ãIü ïü¯óßÊ<4¦®±8@B}Ü¡­¥Ñ‘¸hS{rùÓXº•S²áì äpPxã}3qIWˆ;¬® ºoÕ ÝÔ©ÆešÙ¢ÃaªýÞë ¬Šâ0L>[ ÃåñM ¼‡`hò[…Ð`̰­S³W$¹²LwÏhE-§Šý­R ´rç]<”ŒôÇWmƒ¾ag¬ÐEŸ—q¡K=Ö£Oê|oÉ=x‡IÅå{UªHhi<Ί«äpXj覹òW(™dö8'•Bÿx†íÅœ(•2zi{©¼k},Î#ÕÄt®o,²cùnµìÔÆ¸]UIhg‹7¶èûâ ¨þ™_«J¡÷kcÀ:¦;.k·”ÝÚôØ8~]!ñûÓC ÊH<¯ô‡5·xÇ+qNϽXþ?-’MÅ4Qò|).Iú_—¼ûóÏQ ~+iCÁŽ÷‡­ãûHÊÚàú­Ée0Ø=›iY…“5{kîÿ憩_™§ìÒc[ܹ­ÂþVÐ÷ç'V:Âiës×<ûH¶³.¾Fk 6má¢[£GŽ!똇+ùbrK.kQ*ik¿ÿÕÃî@_ÓpÛ0ÍímÉÓÕXÉÿÕ ™ÎÍwl•_@­Õ¸Ö¯îè ^¿\QÛÓ>ŽbçÚIjÿáWüé VwPÿK‡ñPoMwóP@ýÉVØb…Í9_¶w”c‡dn¾7‡׉Kðyÿñ¯s«ÝIv™Ó‡/R q9ñf:ó+NO³ œœP†Q›ÍQÅãc$w$—ÙÃ1(xõ®`b$çÍÛÐݱ-žcvèèìèµ6_À ˜¯©m&á8ÚÏv¢ ªîFX‹·­‚¦Æ/——G¥±9FÿáÛÐ)%Ü››±(Hà|žõœ’ù«hâïŒC»ü/v猬pÌžCï/DÁ°2Z¤ìÂREOæ¶&*”ÊsG°X ýs@µñÆqèÓUŽÚ!шù«‡Îj`“¶ëÃëÉ9Ðw™·Ñ1pjÿ:è6>Ɖÿ’œ"Û¡Øýä;þq=X¨´{!)±@ðº'¿¾©£/éÚæX‘ò·°^\nï“æX'%·Ö%˜C§™É…+ι+üÏù,ßú ɺµ.óÕvüqµÅwùn´¸öšpÛâRÀòˆù¹lÌÿønn ›'Gž4ß8 #±^á|íaוûí®âà7ý…¦]g ?ŠÂuËpëìöZÒÿ"}‡õ¿yœqteÓZ 9ûuœ6hä×j¾ÛepéÖ‚ïVõë°˜}Jý³Ü,6F\½4ŒCÖ›¨ß}ïÁŒ°ðÕ'607Ú¯ý²lÚ ¥¹ü !¹s©æAP9Ôozÿ-øC3PÕ†)¡ê ŽÌMë½[®®kƒTC‚Å#³ð–¾u Ïö~ôò9ÙD¡=·EèªPK^ý-µÁšÞ®7×5”‰‘Bã›ØBS_òä»@°˜:8©Û?ÛhÏV.Lù»îE«Éf\tKRi2´DšÇô_9 6‘DSS„Ò µœ3ÛòaÚ.·Ãwò+.Ð…—Ý”ÓpHÛ¢fMY7tì7!VY†“ì¸ßöèãðýýï>Å^>–Óχmµðßûƒî"nÚ0s{í%Í`(6ÃÍ!R &Ÿ,8ïÞ¦…äDÎ`V[¤So ²GLjQG#ß(ŽÒbs¥kqÜÛ½xê2‡Ö$ùYŸGÚ`®FàoQ’«õþœ€tP]6QOòB­_k’ÛJŸ«É SD¨Ðø©Óe >¥£ïô;…Or“ýlØüŒß»†&íÂC›Â¡O-d¢ä¶+Îð”ëíß ½Þ{†Z÷”sS¤v™¡ÎedÕLMnÂùÁmc³æâ8zUÆ· Hof©P  d¿™ÆM>»^¾ÁÀ²Ó.#¥9 ï“ë¥Ø 1Œº!·W¼®«v|¬>ºÖ §µ6SÅÞB’ì•ïf‰XÕ#õ—?I'Ÿu‡p‹§âübŸÞ£.M’µØã§Ác]’˾§cÀ½û…ëy2ì2 yGØ™} ˜³î‰Ø0“…ãM.v¯á2:n¸mû‡ƒ‰·ê­Úã„’l1ìÞ~…GÏÖ’ä8Î~X¿û5É*»_ÒætÉöjËùƒÛûI ¹a‡ÄïçȬ8cÛ>Np‡ðÚÂ}I[ˆ»w2{…Ãï99ÇsãDÚ¯—-&ð%cWBØzûÏŽëqHAçÕv¿Ý9دZ!è{ Ç'ØÌü¹áoµå¶ É_Joä_UŠó­Œ?†±!R›úå$É'“¦wwÛ,’ŠeQ%؃o®9?^CJåÓ!e·lÎøJn·½a?.F°ßŽPùïŠ,c³Žqµ A)©âK-°„ñµ»µÎœÆnÞ(j^'’Á’™•¶f˜½AP_é¦ ÁžÅc¥2²ƒàÊ©÷ØýºoîÝ’&x¼VÕíó?ã¼_§w4ÿ†<ûSšOï%’¬Û»Ý·ëâ¤ä…ms¼pn8ûõÃÈPµÞ»C‡Úm¬+z\¡ÿ´ýžw“ÐûÆïvü+ œ“ïïÒ³QÚ\ÈéÇ­8I-ÐQ\áĤƒ…ãа|u<{,ŒdgÚäë]{Š 'ö;ÿv~Æ)ˆlžÖ¹ŽOn„éÔQpÐDùè©ïÑÐ?»ó%S’ä–s´Ø)€dÚw·&°Ý+œPëíüïµç3¹¡¾¥7ÙUz™Í‚]ÊHõpˈ.ò@æë—ŽRA¾¦L@æº"d?öîükíM²Ö«ì\»Žd½k¾åšq.u <—ÌÁE×?»úp$ìyFpæ"Ô'±œŠ3è%¹;½Š+þû M”ü„wש$¿÷^jƒÅ’õöú#Œ}^Xìd´)_ߤÔ}a£ç“PÙyF‚ÂÙÏÜpxŸ}úyñ0ò{X~çWÇÍòEkÅ“Øiš÷ëÖûÛ$kƒÜ^i (ðlèHžÃ S®˜ ìÐûãî¾}‚S0Bˆ?ž6Û •Ô¶ëTýJ’‡»ªËe›Òtú§Ìpð–m}p­0^Çd •c;«î—ôgq‹í®K?ÓsžD×ä»ÏÀRSÆÞ{@µL£¥qñC„ì5‰ ›aÆEóÎø¦»zìûK‚÷†A«)P›½kV(ã¤ôŸó¿¸ ¾™<áá[0¡4Ž1Y$åx_Èѯ¦˜?rãÉs’}Fz³ªy.Rüëó$páÔ Ã†k`êêºékàð¾‰Ç6<C´j±í;÷9…Ï98àg³÷»”ÐMØÿ;=®Š}>g3®h¾™[ÖL~‹`¬,=uûÓ60}^…7üÅi’-‹ËãýïÛ0ͪ”ÄØéU ‡™w©P÷3ðëÞO©ðüg„Ÿfæv^ì¤Zܬó*؉}ߣ>ľW‡±ïö³»š¸)Þ_ö+ÏÉØú×Lßœä\ÊÉþå2Ö%ykcôÙÉáñ#3Pô߯ªÍ©¾»ìª¿£Ó› ûÒ±U¡ª4bõ–‘þÁü°t¾²ÉïÀœÛGß ˜ô=¨.ë,‚Ýz?NßL[ÉÓNvûšpÎ~oð–çS@{ѹNpžñ8»ÕæT tóêldÀ?‚¥áµë¿·'€ž¿×^(CæÂ?ÄÒJW<Ì}!D”ÔÀ ®n«[úï«úË£À¼ö‚Uuu;P+$„èa3È´0~áh„ÌL¿=·%z°ëÇ·5qî8uðŒ0çŸ?оüKQZó<¤h+fî?ZCòe—ro³êÐÆ—Ÿ¦XáÃ'2ÛtqÚË40Ýà6ŸüWõYa G.l°NᎄV>ÓÖ³FX¹;^1͸Ÿ˜FÉ={%ZʆJü ¾Wþ„ÚÏß8dVóZ;{œà‹“ˆëêz‹k¦:„Êea†©Í PÎæŸs5zÅ+¹ø[ÍÃbR#)(‹`9tÿÄÛœadü=ÜgLPô_”%|(‡‘CïûåÝwÂ@Obîš¼U0·ŽúÍ™:ˆÓ¢é¾=­…øÕ຅ìY˜Ëµh¬õ2§ýûHB—ÂÕ=vÀXaB´ó*7˜9¶¥Ü«>è'þœàhD·¨+/Ü!M[᪙2Ðh›yyX ™®ö¶§:ܪþ=+òÄE–Χ:w0dUd=³ñs”Wˆ¼ÄAr´ÏY¼oƒ¾_%mÏš‹°+qy¨©ƒ1ÿÚ\y>$EKö‡¸ÅdšÞðºùÉùôªí }`´VOÇx£k> ~T/ÇR# ¸‡žÅìd÷V›PC’u#CWs׊/©Þù*ú¬þ¦ý—pï6AR"núo3UÁÙÇnŸ?‡Ÿ#y<ìí~ Fá¼1íÆl»6.š ]Ñ¢Â0CJ‚h„ÞWÖCs[OA­x™|¾Å˜òZ<ɽÙSªR˜ƒ]j6™8EÅÎþ]‰fEË0ýéø›(û€÷ÿ5¹Ci„«î°£6þs0|î¸ { fî„ð@x˜Žïó2}˜›Üý xêôì&ÞlS–ÅIØÞd2·rϼ‹Í=mÀMUÛ‹B P"P}±6¡³B„üò’Ú4¬Ì.E¿€¾„3ÑÏw„!]YÔ{|Ã9è¯rþöwC Z5lÜ^{’‚½•r[ƒp`Ù‘žëÄsñÿux{y -tÍÉ>Ýè(HÿÏWfC¢Ú•Òç€ùíø_¾ëuÐß;}úò0`‹â¯ò;Án] ±æs"Ö5hÜú$ntáCU»^CO¨Õ¶Ž«ç ®ìè“fá‚õx‚Û ÛUhzi^~ëŽ Ðýšûï˜"¸æ^äþlüÓ»~¼ÛºZ‰«‹×IÊØºÁG¯¡éÂÏ4JÍGÈ{r…}û>Kdn™zòYEf7…k,l"X9ù»×T‹À/B0êÉúPœ7²8óÄß;"fø½›ÃÄcÏ};õmá§¹óÅn mØ­]©ý®{T$۵Ëè„  Ò.¾xUzЖz¬õVIA\Ãc?†‚Á¾é`öú7ï ö[íòGKúI§¬âzèXÑø.%zü'V‹¥¤õ3ùŸ¦‘#ÁžÉ{%™w7ŒIKß«˜‚ÂUBe¢çpbŸ»’ÉÈXÚú*Á¿üÁâ¯uz»PMø?ªû4cýøG]•yWèËéñ9ÆŠø£w&j@Ÿ¸‹àõtw$¯tïž)g¤;{;ýÝ̲y´Àyô ÎÓ×™ÏÅrà°Én¤1æ1×ÈP8~û’R| \î뜊ÜóÏZ` Ö䙲"„¡tW$·€ÔW¤žó ºûl,MŸÒ‘p&Y£ØLUÅ€ÙÔòÐüœ̇_pðoÛ ,©&®ìØ)>դ܎K)&£ºX¾0º[5®ÉÛ:oT4— †CªÂEª³}ì¯Õ’¬5zž³–ñPЖùá›Ò2F.Vš¯ÁŽÉÌ©™’H«ÛyÓDö:ö}½µ£Å z‡YþDú‰ŒK¯·õ8‘¬ÆÚúc+>ÛžpФš ®³^Rwùá×å78ÉœõØå&€SÆ>"J…{°·ÐƒAÎ$ê™—Ûq׸zLŠ:3зþ¬Qb .æ†<(XÉ¡o=‚ë|`>óØÃë…H=¹¦¢Æ@è”Z1-‚'H÷”õ·~˜g½œý“×*n®+òþn€£uZÏ›÷e@ӿ׳ž;¯àà—’“ª×aW½ãæÖl‚;p‡ý«K!ж?ƒãuj·ŠŸñ°ŠæjkOM{N¨yº*ë‰EÑ™Ÿ”†À´¤Ð¬ÿ«¥oçÍ­3|0ÞúFäÒ\Áº}@§0";â>'€þ0j÷¾œu0¹/)6b3×_£-ÐÉïãðåOvÕ9ëew ´5~-ÉŽ*FݘÅ%]ËÏWSWAÎÑÿÖr\OıÓöt;gìr ÉHÎ% ì›Fš],ŒÅÚysá ÇÆŽ{Ý%yr’¦”VÁâÆû-ýÝ/aFeªé»“”’…š„nÃT õŸïµ¡«/È_ÿ—/æu®ñzÀÉ<´Ös>‚¡ïHáX ÏòáÛk±|լća+žzbøõp6Ô‡(ÎüY^ñÊò^QµîñRßòoœ †ªÍUׯÎ!Y(tOwjÎ)•>´€½´c¯øþ[TÁó§R?ö»iåOöáä¡Ä°ŠŸûº·lA¡ÙßóÎÃ[WH6­ò3 êqíe5w¼‰}=«Z{I’åc/ÇèynèT&>•…y×ë<›ºÖƒ«Ù\°À]?×6IÌ®ðxG'¹zûOkhì“mižQÚ'Õ1ë‹ý8+?õ•v ’2¼Â9'H(lÒÜh€ƒ®‚c'ãpa½SɇæÓPü[Ô£Ì2KTÕýÿ£+Bí©e~¹5÷±'ÙÇ>?†Év›K´d(ž·a+r ƒQùgЉC–âû,\¹ß±Ï{µõû­ê@õ‚‚ê;g\9àœÖÃÑû;ƒ.&"G/X[4ìfâ‡ÍËaNÀ³EüšÖŸZKèRÅúîË9wîÂn^SãæÖm0C³zygµƒ ßgùBå»­ †sXðsõx™™%ÁÚ÷ÑŒ„ë0dôx{0[Œn}÷ò3 TkçzÃ÷ -üdFÁƒ8¶ºüœ¯Çaa^~Î\¼½£¸É±­ÚUÔtŽJÓËã0ùðHû»¿>мiùøGim˜Õ°4(;ñ:½‚»ååõÊÞcUÕJI˜jòØðPN­UÌ|.„©›¦¿ˆ„Ž·§†ùC„°.£Ç×äõŒûÉ™”/ÒctbÍÆÑ ¦¬Á)”’F®ûEð^4íòuÚŽÕµ±Ð»×µõÉ7'˜«jgÄë*ÂàÝ«<âGâ§O>;µ_t«Ùh.œCrpFïŠ>ݳÏÎs†âìÌô¥m ªØ{jSÚ}c>’ò´‹kÃ5Vü¹ôÉ3‚NîoˆÌ‹ï°éItÞ²ö%’â8}ocŽfýN”z…SØßAæ¿]'£ Lï{%I›Ã‰-Fkô\XaùtÅ‘mÞPõóÏ«ü dÜNðú´#êÈbr›¦L|Ú+Ô€ø£.=0ãóìfÒÔ5hØÕ²^Ì!šN/wjZ¦ÀrÝ7IÆÕ70ÿPØýâ’ Ì?ßTÂtÃ?W5¼ö×»C‘Ê…™l{!‚²*ù3b Ýå0YjH°©°Ø‰ÇÊ@«nÞv2kKdǼž^•ƒþ„‚ßæf½ÐÍ)^÷d¢ƒ`WÚ,A–~à¹;]¾LõÏD!5"zïÍ5’0m³àÖÁ§ívvY_ª˜Ð$ñQh«p‡Î¥iCnñ_½Ì]K\²ØeGœÆY¹Ù^*|‹U{±`IòüÐ Ì{–ýC1OË!c“m¡oŽ9NIðeƒàg¤>R im‹³¯IõËÞu»Yh¹g0 —®DY[µ—“U¶h;cç-9:s» ¹þUKÍ…ä?¢isüzÄ Ù,d“äêg ßL}H–²{$HŽZú±Ã©Ø«•Ðå¶ZgŸü×Ãÿšdùu3ÆË ‡£ÂBªdy‰ÃxYó ¤NE 'Y»iõ…れԣ¾n MÝ´æ°?o´t|*Õ ƒ´PhÛœŠ³ÑõA˦0R’–bû Šoaj‘šŽ”å5ŸøS½;*>B¸füÃÂ\ê~-ü3ŽPÆ{ʧi&Ûa¤9÷bð“\  ·ù9OvBçôeÅÞN+¢sT×j(†\lßÂ4KRoP­ žtlM/øI°ó ù…ɨ¥† …T3œ¹ú =rÀ&/™GŸ}õGá¾îñ’?òvpÿQ¬ vÞy?¶:}:Ø_–Âtþ#B1-ñ0§ŸrVøˆûÅ»µ×<¡Ëc÷úª0¶qø.×}ìààìúß6‡È~˜ù”%d– S‡æâ—¬ÎlGö?q>ÃçNý>žM<Þ&Íq>*QÍ$”æ5¶ž´]ó ­úrBöT†'»¬Us„¡b1Êk§4|ÉJß.؉CoÝ.Eª-?Àz¥ç]]&Ïþ1ƒœ€.5îuLXL*ܼ@#q`ÜÿRÓ³)’r©¹SÛ‚™6b{¹’‘NS¸W¢SO²ôÕ)~݄ˮ떆î@¯ÏÏ]ÉúÂPîyØ“ç…:¶Ìn—ÿ±ã1vuNÝùmø ‡?ìO:xÆ®Íɤq8áÂ|â ýôNœ}ÿÏÈ;AdYÛßküS ¾l©|¼úɺ3#ð(]fÕ_4½:I‡š­a%{$L€ö§ÿà_ŠÌ•Ï>™zžŒ¸j+»‹µÐß™þ†ïЋ­"ïpý"ص²¾å±ÝÇF+ưiL.DþSlš$X¬bí¼§jaÀÜ÷D~aÎ,%}J],FÈ•¿t¯ßNäpW‚j³‘}[¦r¡îD¿ÙÄÈŠ÷ÌÈ^ׂ :µn_•…Îv–qsíZ ¯ÛÚA1Ø…K¿Sÿ†]€Ú“×?žŸÚöEºº™¥> ŽXænR\ÉÛŽÎñþÙí$ÿÝoõ›ÇŽ`ƒ÷™|®F’Ï |ÖíÂAèÛ³.,É¿âg> Äéý4_—_ñŠ{óKã4›‹‚vv‹«ÕPû7ÌŠo?öRZ ïÍÎpôдڃ•Þ˯½òë縎mçútãØŽ¯áŠØë£ŠsgËvª“”çm'†¹…pJLêDŽÆ ~)Ÿ¶½cý3Mˆo©8*z;}Gf+_Qè\z¼²˜JµéÍt‚íïûH×98_+ѹ¸Î§¥Ž™³agî;íÐ, ¹``‘¶FÖT‘µbÿ°¢²É÷[l:ιÜͼšqÛ…þ+:°/fOró®3•Æù¾Ü _‘z6/DÅr+NÒúO„¡ É/®=˜7µ¤x¶±>ÅÖêÝÕÃ$»Å&Ñj6\ØÜwo‚y–äÚåQ›°+gíªªÇÒ)GM!a_^Ü–{~¢ˆä(1Üq-Î'?ˆêüïui¾+ß$9½¯xªÚ/nŽo¾aéŽS¿Ïˆ\xƒ“A›6‹Ü=‚™W}ãðÄÑ0ã’÷|+9uÁJè6—Ð\ç»æk¯cfÍZϼ?Vø‹¬”IJ$1çãõU–÷¨Ø™UFÞ¥ãtôØwo=’GÚ!gË©“¸ ^í~«çË^Ýø}Ý–ö¯í °_t°[sŠdIã­!ì‰<öÅô).ÈÊíJ/å¶3ª}_©¸œEü1Â6] Y¿óâP}Iô5žLF>²æçKÃ¥™?Ûn˜ZA{)soè–c0³#)ùÀ½9 m”È\~‚}«9u |`ê— ÛsÁÐ'þ1mç‡Ígý˜“’ R}5㤟<&ùQÎ!9ïÁ¶VtÈwÃC¬úÄ6è¹,†±g È…¬ä˜ú>qs¯OPTá¿´¶fŒ+ÆÎ™ö˜wø ޵¦8r"‚7$OªâO°–¬xá{9w± b8²¡èHu“<Ò‚M _þö¾øûJ”hvÛ„W¥ÇaF¾;‘鎕—4ûoÂU¤^6JÓú ´7ºËôR ¿Uïµ[«ÈU¬&sI²í×û«â²Ð½~žU´àP÷Øû§ÈÚüW®ˆ5#„%5SxýÏ£ŒæI=yMnMç+è^׫U·i…9Ï>?¿ó$L­2Ù{§¢ŸàQ“sÕÝ›MðL·ìNX‚>[ïC M™8ïbó&ø@þc=š`‚Ý‚æåòà©qøõ˜¨—f=(„s bgRzHJñ#Åt]_È+”OíÚly÷~­G§³»¢ì,ÅH¶Ø >Š!Xóâìnu fp — vÆî}µã*ø¼?}ÆmËôï…)>ðÆNi¯‰3%EH»wœkÓK¤Ù¼ëSÈwÅ¡K ‚}‘Ö8ûæ·iT"´ñ65ØFGê†Ùôs+PS~瑌݋›ãQ6›Ø}*ù÷Ñ_®úÜ—¦0¨ä,YÔû$€Ì€Ì“¢{ø7ècˆ@ïÃÇ»âÎ!#÷˜KçÂìy‘¾!µýf?xÒÆ~ æ÷?2$/ûàMåŒÈ±+P“èôWéy$t› Dp‰ `åùü[BßàD_Iƹp-;vv§Ž•õŠ›*Ë% rʃ¤´ð]õK§ÔëÜW8úî3sƒP\úyÂçÛ}¬y£+|vЩ­ÞôÊD9¤M·{°†I¯$­s P~øÕ§)+gN8~ÞA…4ååèS–'yá¥ðÉ3ÿ‘<¢Æ)7ÿëDú^žko¥6a•ZTxR‚<õ~Û¡aƒó-Ÿ”ÚèX9ðø•ÈÂʹ¼c½ü—{ºïñ¯ë\áíR¡¹ët¬úþàíQ³8œñ¨ûI<î‚”âìé,3o¨Ý ³ӳ'žž¨ ¼èL°yº­+5 '(®óuñ 6ñ껑"Csó‡òö»ó³}·µ_"ôì‹M¤ž°ÆÂWòÖ žÅô›ñ7þ ¬5y1ÆöwÐõ6óítÒ€²£ÅütaÊyäü˜Þ Š>øûY çŸjÐ`…¾‘ތѰrøÓíž²â]c.½¿+SY¡ë§©aæÏ?X×G/ºÓë€1•_tzKCŸGʉH?` ì~>~L‹ è¨ÿW’÷”àX~yQôk;tSwGƯcÀÈq—ïÇ …`zã'œ?& -Œ§2õnÿ‘Ðÿ¼ša°A½fê¿*‚;râ¼}~¶IèÐÕž/ÚeŠÕµ‰1rkΆäsÞõ8QÂèzúx«/Vì‹s«…ÞîhÅ¡¾€9MH/ú>ËŒ¦Šø™ö°9h7ßÞoT¾Š|~›>OWÂÖ‰!÷×ã8¶oüôÄyìä?7øTî ä4ö¦¸-­ï·Rßû@ýýâÕ[UK¡1MH~ޱ²>>ûuúo7bШ3ÛÂs˜½??wï Öu ÍP1ÛüX¹Ô9&ʹÙù©A÷ó^Ú°»)ÁæætÆÖ¼–ÿ|Ôý³½Ÿ`¹’¯¢vµ” ô}æ~vÞn…‡çŒv‰v"cõMŽW]˜® ±Ùî<†>m)Âh3ì(³¹É–y kìÒ”gq¤ÕmÂòg~ÏN8aÅÖð››Øéù$ÁZBë®ö„ÖÝ!9å_nÓ¦*{¸öV{÷ízPÀ’§Æ»ÃÉU¾©"êÉ8vyé´ç6ÈX¼â~ÿå 07”_­YýVï,ŽÀ®ÇRùÚš–Øk´ÿâ=glênþ÷0 hûöFÆT–ÂrDì—_Í@eZÚI¯Ç.Z¯ßãylU.Œ È $[« yù°k¬7ÅMñü§ºÇÔ3]J¥»4+¡÷tù’FË; S:/DöFº’âϼP]h_µS6b¥÷öNw®KÐÇá§çT›®[@§axJP¦W]V›xäÔÙý?V8kžñY¨¨1ôöŸ/ù¸á.*‡%õIŠGä–s¥Ø&¢[²óÌîû¨Jµz„´Ži” HV÷ñ>…¿g±‹}ÅáoòØ0°EÅÀH§#‹GÿËÇÂUE¿hüŠ8û ã9‡ËQèùòö´²ÏMXd i¹º®GjôgÝz¡:¤ s,š¦yÀИ+…í;LYÌÜéÒ<ŒÌäµaRÊç*<\üåY+ôèý`»&iÃ|¡›àüØiàÈHPß/uûrï],$¸5ß³¥)ªÁ0Ç®™Ã; ñÆiµýWxb6K•‘LPÆUþ¾€ž›õKµÏ’¾øo‹ ,ÏHe¥Ï|Ñ“coOºì‰ó:o®y@þù3û c¦/ÿÓcõŠšË—_„g ƒ7%TtiÈ•Ùg±ñawØéÃÎvr0|¿ñà©Ü@,‘[ï~fab)¾ïÚÙ2¬ûmê"»oÁqœÏÉØztG²j¼ª\975ɨ–û%0m-ÛçÉš ¥.*b!šÏ¡FíŒyt½/tYžsàJZÙ§]mœ {°úWpÐù‘0Ê>üî×çµ8°|áé; \Œêü‘Œµ'‚æ¾=Å®ü’‹Ògý±‘±Þç®_2vW~*eÁ¾˜¹ŸÜ¥ÐsÂpŽcßwè¯/¼Û #|2šGtqéJÉïüPœÏ×øš)‚3œÌgÃz7 ¡˜áØã•‰i2¶NB0·‡¹cí-5ÈØ1yG`³?ÌDºš8{÷C·›Vu£0îv ݬX9§ZE¦ü@54jxYÍ^-Bu-½0mf³*œ`×9nú<‡æã‡—6h^'¸î¯N¢ùöà¶^þ>c3XcÖÓÙ÷±øb@L¥‹?þ8øÖJl,,ð²óü·:Ö,MJ Ä`—áª×Ò~ËÇ&[¯ñÂýçÇ?u¡©ÃQî„ÛZœ~äóùF5”S„Ïýɉƒù€µw×mþ Ûs B^Cªlϧ;úHµH¿¯åM0¡Ö´ø)®zâøcŽÏàL8¿R]  Géu°1Il‹Èxoøû§ÓeB¢ÝaötÆrÆK—~Îè„¥.á(-ñ,W.­â’Æž§¿=lð„±Õª5§‡OýïzûcWqBÜ®pÇ·",ÔY®sº§÷±Üˆº·Ìì¶´,,ýº@[g‚ãõý©”‹Î8’ ñæ`?Éb©¨Ÿ‰½f¾û˜ük±æÀçÓœ\KX®9ª™=8³qŽ9;qüîµíVzsWÔDèîpXŠÒ=ôð¿œ2{3´硱í|M.û&œ„Øý—Ü’7)cWÞîö9JX³þPÇMŸbdœE;ªÎ"LJèû6+"ij¾£ó¹mº:´Æá4‹¨§Ó¡ÔÿlcnÔ[5GøŸˆÀÔŸŸûéT\Ú˜0,aÍåGJU8Ê{Ü•««ÚâsC¾ÌãÀ·M›§ÌÝaìîôRư`áyÅ*ÅGv9½›üR‹©:)‰õrÉX“,8¥¾+ƒ¬b%dáéœÅÞÍU]²“Øy&áËú%Y¨ùñšÏ^Wähhe˜¾fGZü!ö$ \à½øXHËJÞ6Ôï…áÝ©>sî8,~ÔCÊË»¦³†wùBß_Ûéœu‰Ø³àžRu‡ûIÝZ¯‚å1NhäÝÇNcõ£Özì0)Ë;d]'‚³&v¾—.M¬Á³*¹ì1Hg½¿Ëj×* úø Ë+Á– ÝOû×Da×ßá;[O¾D:G±½ÊãpÈçåÙÄ]f‹Ò!ª{°¯=$rkõ)è;¾wøœ¹ö½ø;÷¿<¼”1½>°—î8;µròcì‰síÔ Ï•'{:¼Æùœ½SgBaW`öˆª™l %9ËxÏtº‹lC›²b 1.Rt¬R… ôîq=ÿö´xû5>ß­†µŽrÊ—#RqÎÑ¿#~˜ÆäÈö-ª$§Ñ€`Z,2 wš›J@Ü’W"b•Xò›gŒ :ž>í2‡RtÕʼn¯0M ½{É`9n2ÀÉsGÊö¼ÅŽ_÷ÖDâb´ØUÍ1§›&…˜Ûï@ãêxb¦:¨º]e‚z18Wåé¶¡ ©•«­¬Qƒþ¼–®‚'a&‰ûzýþuk”¨QÝ÷{@WæÙ}Ü&o*Êš6}Çz¥§†Ÿµ ó¨—’q#Îí˜<Ÿ ==ñB¯ä›1¹ÐòêD× èï¬U8 %÷ŠuÏÀâª%¶'[adÎà¹Ú¾F˜~UrÊ+eŽà`*6úóÀ˜à‹©"%ìk)e0ªŒ`áWšŸGD̘|yæ~Þ Š:yvI3í°¦{øGÆå(˜/Òꊲ÷ƒÄAá”o\a8úîÊ€ ¨ÿüx!̓†—yÝu÷@Ûª ¦ H1¼x]yŸ9ìßë`­`öžô-Rè³ÁÈèÂ;E’]ŸZAWAZàµ'i ûaYÆoÀ`4©;‡S>`‘³ß#‹ßá×õσ¦ Hn³0—Ó]\0­-^4‰̡sõ8{B0(»ì5ÐV»Ø.ï'9/Ÿ9>RºiæÎEŽô{8ÑðSV:ªHÙÍU…ò0ræGRD~Òõ1xM6µOûhyÜ')Žyû”r@ÏnÙ\M'QÄ“ò‡´¡–Ó©ØKË2 _J_!ù²cÇZ à¢CL]˜éuÓpݪtm *“ÞÆûy­ÁŠ‘6/s¨0©ºØôï´ öUµHŽC¿P(o¼æ{æH+3*C¦árï}ì8(Ìfž9î ÓÞÎ^×j cÛîgÍd¬ TŸÈ±”$(¯ï½8RƒÓ^͆_$ H¹×[9 p0õÓÈ1~kœù¯ÍüÂ\;ޏ}M~•ôó×îÙ™Ÿ{gÛëtŽÇ¬1ébˆr¶²üikÄ2¯õ§ö !u•Vxv÷]¤ã ‘È-œ Ù:eí¦Ì …ȵùƒ0èÇêÏ9Û‰%µ•\JÊI6ÊÕð³—Œ ­wÐýèzC`Šý]ÃX€Fa—Û¢±çQœÅ‚ügèýÙo·Þ[àÎí~YwMª5Oò/sC‡Ž_°`Ë/‚#9¶õÝöëV‘õiþígÖ—®Œt¨wŸ>ÊäPÆ¢õ*•‰T‚M¸e~Óº(‚ý¦á¥óÏU¹ÆD¬Ä€ñölÌ([(”.j¾¢A,'öá¾WEpÐZ–Ýâq4Äã¸ýÒiüqãÒëÑaKRðu·Wâ³(%pHUˆ ˆ˜æ–³ÂØé+E-Gì`ôŽE–¼ÃΘÇÞá·N³ì§ri+æ?wðÈ>ÈÿcY¥Ûs ú+nÄÜÏ„®Øòró$g‚wïXÅ ›`,ôãœab±@Ahº¡¶ëéLº‹CW@LI_ú}¤ž‰©ˆ¹Ö C÷;ãn®†9‰Î'½\0 F§ÇnõÇ®½ò¤H ù”‚ƒÆ:’ã¤VÐê+'p¾Âxæx<'´îSʳª x‡gÏ/1aÉ©Ÿâé½þé[Û°†±‚aã© 0½Õn½ÜºvìFv¥Èw’07÷ÙT雌²ÐÜ ¸ŠM" Ö`6jä_'4}œN[³ ŠÒá`ßQ hþÈÛÀ¾Õ©ÁÊ« 4Àô†5æjªÅ°\tûÈŰ  ï$¬Ï&Bi/ßµ]Ò¸˜úÅ.íÎà¥ïUÑ„¹¡Aûß›.C{È}µŠøfL¸Ó›Í7AÆP ƒ­Y½•ó0]«Ëugc Xõ›g—hR`¶ÔfùÌÃS8Ý(wQ}¿±®ÐK/ <Õº[tÿ̈=¨ÿx«+™³Åì»^‡g›‡aª>Ð8“) Â3>«ï­æ‡a$jÙyOþz¤µuï`ɇ9¿à3¬Êý,Où[Œ½Y“ùÀ™F²e´qj&9j$6Id9ã4ùâ¦V´ßw¬½l‚‹Íµ÷OËCÞå*_±nK’í¬¤mSà ÷:å¾YZéŸcVçLiaä*kÙëýœ‘yÛÄJõwÉÑÌÔ¸e IòÇÕ)©é‘BŽzÏ;~¼üèÈç8‚3ô‰ê‘7˜<Ç5UÚ• ³yq­‡)É/;J“ ±Ho‡¹—+ß»ôGÿe t9ûøuëÓ‚ãÊ»’ o`Ê[uУø)0<ÒNf­Á=Öe1_ml ´íJƒ°ðÅ’M์„èlÙaQ5/²4¯JõÂPóï¢;¸Xó÷þô—Rˆ+´64”ÆZ»£Yû%9aò>ÿ†þÜ^ì×èx$âéŽS·n=zn …’\¼,N–зâ Jæ=ØuÀ<Úb) »ŸûÙ•§Aç]þ2µ¦v¤žãPâ—Ä9#£Çùœ3Pžü=%7N U?ßÒ'ج÷¬ùk“G¹¥£e€v´õYî‹h ¶(•´uÀÔÁ‡·¬xì¨áU­‡\;Á.nÑtÿ Š*ØÂVŠÙÏ¿ëR%y/xöü—WM°¾ ò|ÌgåãÇÂt Ç*p o +޹nbÝ3êQ·ò8ƒ¡GXbiÑì-LmÝÒÌ­…~gi( Ý÷kÖÂp¸èž =~ÐWQÖ¨úç_&ļË8ÃÁ'jPz-3‡ö½žÀ–g7´ºNzcÜÍ¡ÆþîÿY°¤.))Ò~r6мÚj(E`ÖúÀïÊT>(öÞÞ²«)IÓÑ` tÑÇ×bÝ–Õl2·µaùsΑ'ÆÐíÙéï’˜UAnw¶8²’l™&¾¥o^ÜlÁûÞó%xíôM`qæÞ§ÿÄg€qñ¼áÞ× ?vsû5̵ÙtPÇüqâ[èsìºM²=;Iµð+Zé5ZÈüMˆMa ›·Â©½NQçÕǶ§`lš2R§¼×lí@Z¬Ã¥Ï  W^½/3!WÓ³˜ÃØm•"Ö»š„ú…«„†î"ö^Åœ­VÊÈ89>d”‰Äw.¬­…–÷Ò?…Ù ùÛ«o%}'qãÝŒ‘* Á~L®€í«6 ¶Oï ² ‡.¥¯{›vç_R|g Wƒú®Í:òz©|AëK!MâTÜL 8xôˆX…¹ÀÄBS8M˧ݿ¯Ý ³‡OÞ69q h^}C­ü‹IÑiåà4œà L¤s‹@Ãê%ÂÖ0ÌÞ]\µÇ‹ªB’¡Ûe‘;NAõÿßCqÁª††0èáj|¨$r_Q×b½_v~ƒÁª9çUç̨0´šrérC+Pe³‡ïy³ÍÜucqÜ6ïþȃ‰øÆžÃL4íž•T(t q>¶9˜™á_/ÿȇÎCŸ_|=¬…v"K³éמ˜ÚóË5; /kU, …¾Õ×¹†O®øí£§®'B -â¶«hëiÊìY÷K} X‚Ž)ƒý­Ì`ÎÒâÕº·›ʶ¡ G; /lUÕ•>4ÓobÜ 8IoåewÅÅÚÛõII¸ˆ­ß>ÿ÷zÖ²jVx—B×÷KýŸ3DZ}Ž~rÅsJ«¹¶÷…aÃØ1t æT¤JãM+ü&Ø»@PÖ>¨$^©g¢c7ÔÁè¹§ß ?d¤qÏŸ“q@½óç`®K(0²ëv:Fì9öê‰Ýë̶Úw0zì’ÄËÎ0‚ý°ú‚gƒLGÖ1Ô¢a4ý³E®˜ LŽöu½K•ÜÚA«K0?8õãòœÒŒŽë[õ?Óᜡ+. ñJ<'ÿðbMªïQÂTh0+?~S5Ç·LÄtmƒZÁV™ïr8¶vH"ÃÂj´Zo·J@/ߨOWï”–cŒ+qù0ÏÞ±oó½3Hã˜k¬ß“ˆõ×íÂNS 3úX¨K¼4Ì´ý{P;%ƒc­“œ[C;IaOÛŽ-Þ@½äQuZ·;“³Ž¼µ€t‘ZšÜ\2 ò1¯ºãè%¾o¯yqA“övŸ4v¾¾¿‘€W¸Þ|7ùÌQ –ü Í$W}¸g¶ƒ[éëvò ËcÃ3[mUq6ëÒå¦nQ’õ°Ò¸P:Iñˆÿ˜Ÿ_]87HÊÓªõ³²™3N¾üí߬íK?/ͪyƒåÖÖ³ª;—aÒÇ~ÔKò:2 „L¤Xǰçše’+ªJ_D {æ!ƒí|Êþ`XŽ?õZøv9\^¼k.õ£}ê /I å¶Çêô«ÐRyåì«3Ϻ=ûºf&vß3y»š_ßl©íe’÷ÝôÆò¢l˜>¤zøí‡`‚so&Æ…øE¦ýÇZ¼Œ„5ÿòÃH±ã*]ë~ úªÙ>;~cuê?n¼ãÌò#ÙJN*0Îvi¹íšì q5&¨¬ÍdMg%Ð4gº’»¤!‘˜šøÛSI°yM¢ìF‰×ŸÏN÷`a‹€ŸÌ2tôÿdæ+A‘TQ“ó+9º1k‘ÊŠ‰µ»N•ÜØ%ï’ÿ>âLêÓžÜçʃÇC6çz˜Ð¥—º~xZ4u?ØêÄaö¿™„±ÀØçÍ­IõÆíSO<ëu“qÌËÍpîÏè%Õ<}œæà£NzëÃàùß»l³f fKéý !X–œkyl)Ói½-u@åÛme›qÄBƒ²üH`êð~´|_ “ójÖ2+sä÷rÓÛfX˜=9QgV“®5A¾+½Q+З<¨ sb{ ¥€L/¾ËqÚ9lÅç#£*½{kGå1( ‘Pý­ ½Ý¢¿À¤‰”`×”D˜¼Óñ4“óL™N>÷ÁflJ°ŠÔ8­lE¶?šü±ì¯ÅöÐ8ñ*ræh#&œÓT ­K‡Z>q¶Ò Êí¬Ê‚O*Cј¬›XÌN×T ˆ}!Ø©~oü€µZãëÎ]Rz~ò¹/_ah¢ÞkUÅa¬²ýoV¦ã%Ì–}n°µ˜DF̺JË6Xqc®ŠÙ[Š}Ü>I÷¤w,n#ù¹˜')Ë7aÚ‰K¥Üž~-8%7È»˜p'ó{¤H›™°w9¼gÝ÷©8¾ºD²ö)mýº¼rû†¤íŬK›"÷ÈÍ·DNˆ‡ç¯ð½EªìŸßPÏK‘<«{ h…¿œºR»0Óvy⪽,ú=K¾øë4|w~ù=¦"mîÀ'Vì¡•®qJ9ˆƒŸ^ðtï%Ø?{?Œ/Ì„ÞÉØ õ?‡`HÔü–ý7˜=Em[âX€qE±»jØÙÓöß9éýؤxã‚ÁýJ Ÿ»ÍX‰í„Яʉ“Ãq˜ÂöâAãœ(r2=K²±\ˆxŒÑY¾ÖL6â?ß7„îÉÁÈëéÅÊâ’õ ûYÒSæž Š„EļË?u¼ýœ` ‘ým ‰uvöB¯ºI ÷Ãí­'ôk«+{ìN‰R¯~Åô‹; j(gaæ‘ÛT{Š$ôú1ß·ž`ãÂÞÔÁ•™?½ø½ô<2º[œw)aý´Äë€r\>úè”Ý4¯¯Ò„ž‹ Ëí›Ö¡8ׇÜ@7ùlÓ"SoÏ?]-€õ#Gö¼9&Sw¿œù75F¹/‚¾ÅaiSSt^ Ø-Ý|’¬c£eîzÏì ž[KÅ»ÎÀ€È‹ýFžB˜¶$Ö¡ú]ÓB´žÞ.!±zƒÀ‡MÖÀhÖT:m—Íåï™_XÉyöŒ²úÛ¬‹¥%å°ñXÚ»ÝCøy3TÖ:&3$aô×jzÙ å_ö‡¶¬øÛ©hÎÇõÐù+û,“`NÖñ™¬žV²×Ý`õã‡þ¹Ÿk_§^FºÖOþ·1À`$Ï¥7Âü±§±Q©a0Ô~t³©ôîä¸òjK(tž?{hÝùk„|?dÂ#X8ÓKK‰èÓnÏÛ^Bp+lsSkÓþãª5ãÇ6BwË4ó(5#Jn æ(R`»Óx%L~ðbÕçéÆfòïØ{±ï8;•R;Ÿî Iâ=^çmIáÑ‘ÍìÃÈØ²Gâ‚A6Ž$÷\¹›ÇýžµÑŸOdB:Û:öšÞYê­£Ü;b‰£š™÷æS`<í—ag&`×·3%,JPšo}½1Á óusÓŸ9ãB²¡ã/qÚè:h¸Â×áKeQVÿêkÞÌ¥F¤äômO‚©³›R5m€ž%k|#ZJ–ÔÙ“0³ñŠdç*,0—N9±Ã²ë]ƒ³ÛϬäl^¬‡±8þ)};ôRžÀRÏF˜39y”;%hýå=rœÐsê˜XhºÞ=ÁŠÁ’$ÞW篹aï‰ÝR´5vø°îO~¸‹?4~ OŸw–Â~Ýæ„7M? ×Ì…mé.Î'[-·le’ì•,Ng¿_D†È¶Ðà‘*¨”k8ô1i_|(õ;7`ëeÅÑö®0ìj.Øc„­<^ÉcŸ‚FÎzÓPÜl~ÜT@‡LÖÞ›º‹ 'ކ\\ ÁŸÒKibG0SãV²º»&:hTEå‡Vý…áNÚ8~À3þôR–îE´z%ò ¼žp±ý6‚¼¼ÅëSœØWæÚ¢¾Š'·=ßдâ…|éiZÝÐU›•/qDúnñrJVA_*kˆÅ¥ßøÌœó5…$Ù¥B¿ì5S&¶ÿ­Ö!‘”±œ›OYÅÈ5GŠ %Åq±2øÊ•ìOÐ|ç}ç÷xq’÷ðÖÒtIŽŽ€V  m›‰_މTõßïÄ7Z“lšÆ5%)ÖAÌaµErUIŽîlûnœW6ÑòiH'Ù·<Œž­ÅêÛ¬yk=¤pn_ÃØ–yr3N½÷€…âÖt …àÔy 60•´½[Öì¨/†ÎB'ç÷¬0¦ü‹‹÷àŽ+¨ÿÜ]]ý6!ïoΔåëoÜKÀ·ÁÔ.- ÎÞ~>#íâ]÷|’6Vw|¦ÃÞö`£hoÃQˆÁùßOhгâLå¢ïjÿx˜´Q-õîºèå#¼ü`¥Y½b²pn®y«ûy’5肟qb.Ü+W*ìwÃÙåO4³÷2‚Eß& nêÓÕß dر3÷Žp3…†K¶%ÍÏ/Ï`erÕ½ 4’M¨ã3Ëz5ì¶áŠbrã¸(Õ98Wç} ¿9âåI›‹jî0yóïÞ/ÕXßÞñhø€/0޽vê‹æ¥{›ò ÊLc*ör? ­;Oûvsg$Ò‚)—ƒ•{Fî u}Þ9E{h{ùc»ïð5X˜þd'º û¶guöÈãׂ½B.Ÿ¨ÀxsîƒÃM»Oñug+ƒž«Šï®s¹-ñ­bÁ½u8q\5bGo㊇ÚSÿK…6Vûw¦5îPoÝÑšsG¿îjbZZÂà«t{Ôû0±êv7o–9|í0«8xÃÚ­[/¬øÄˆ­G³ÿòcèZ³D9¿ø¦>Jz» ×›ýk²Ò¡³T°N:h=4CÀí0˜×©Pw€‰Œ,ë¸ Š¢ã! ¦ÜïqRx‚³Ù]EÇD<‘öì.ëo^’…nm"µûÒæ ‘ª<Ì®P»G®”ÒmÇ$õ³`æ†Ô׋¯qéû-}ùõHC1}5 åýÇ'𦒻3Ÿ\q€…KÖK¾ë]žÔîW„àkÕÊí̯0cj`µáYÐÇìÖŸß¿ „a„+[Áyto_~dˆÇN»ˆºSàÙÌ©DíRw˜ç¿­ÔõÅFw¨;b#? †Öú¶¯ø­ÌÞÏOä â…z›20ž'Åþ¢5NtNîD'´ª1ûjÏ7B_t/#”¼ñ$Ó|ñRaìÛ×ëe$;={±>Ðäpú~BåE>ÉuböÔ‘EOœïXgüúS.L|ÑGRö^’­œ‘$9;üEo(Pð~•§º17Éêâù)Xµçè=Œ§_OàŒ³uÌ>’ÇKsWN93[F‡Ø0ÉÑ|aë÷8hl›æ–LR†EånÔ«‘,»;´%اqÑl¶yüéCSº^$ñ·¦(!/óÓ ùu]lN«d_ì0“?s/KµäŽûâò¿;~Šö×HÞªke£Obÿs‡§{Iöz·Ÿ×pFd˦±Ü8ÛRaµ³á2'·>5 o@wèW !œ[§5£lÕM²,¨ì·bbw]ìI1ÞWH:Ê:´‹dɸBý÷µ g—ˆâ;gHöãA¥ß­¨$Ç&æh…»Èsù:{÷çʾ”/{N“<—ÿþˆ]G²l¯sŸIM')âa¼Á«O‘/lÏÊšÿG²ðo’ôöÁÙÙpì!ùRgþaÍêóEß2Za:þâ²=¿ ~.3O…û¹8"£s[eûOœ°º¡é×LÇÈMWñ. /6Hvª^Áy®t‡i³<èå<Æ»«ªÓ®w…=…†Á¢e/wa›“ÛÄ£èq sæØÔ‹×"=ñöêoKC8HqY[–M°Òšw÷_*ÁžÀáØôXn¤Æé-ñgAÓÁ7ÔB.õ¢»ð‹u,À¸Ã¨Ñák…vžž#‡ŒaYG0´,YhNU–è„Ã4{[+_LÁ+«ù[^{¾RþwùÌ|¤ûø•/ü2 ß|ÔÓúÿ.LêgS`xºÃËQ0Òó1ô°øl÷ëÚÄ“»·3>?‡ÅòÞàñ3Žã;-•²Mˆä¶‰y¿&ªËO/dvBßÞ8ÞÝI8ï±ÇùcØ vÉܳørI«ŠJ„ô6%Ae>á­½='šôå-jðÛÍg3:Ñq8R˜ýkR¿˜`}{’~Œä(—Ö‘Ï3AZïºò†K§I–àì6ô[½ËõÐ%wR°@Å&Æo%vtù&?Î<ˆãóÖjE†¼è¹GÏp^ªëä¸Äf’[yÿåÚHU ~G91€3Zþä–’zp^›‘!E£ ƒUÜ.òÙJ¥ïqù·«³÷GÝ÷4 ¡Huóë…8óÛní+»+¸ä×ì@kú„ãƒùþýÕ°ôvÿÇ'2a$ËG±#ÑÿÍãÄV³ç“8ì¬SˆTMÛ7±`‡÷º"Á÷jØ:xl_ê]þÏÛâÙ o[nð(ˆ%)¯hã?^¿ÅîÓ÷BŽDû™z_s¬<î¶áUg8|8õôl6A²øUê&>¯Àj¾õ/ ØÏyðõ¯äšKû„ôBm(o`Ø D̾jéð6¢HUÉ“Ãr8>¯'¯úçt—ykn^ûi…Ÿîï¸!çc/õŽÜ›/!7ÍfÌ1"ëË|có£O[$`ö¿ö§É( Ÿä[»Ò+s×e‘ªWþï}š>0ždÜTŒù™ÏÖ¸8ys¡ß¨Ï°'àé뿞⨦¨ÀñG×p4@dýß§>Ø{ùâ1}£Ó0çå-§¬“ƒÖ¹9;ÎàbtÚ›ÛA„Àø‘«g¿À$ÅNì‘§3ÌÜÕÛÌ.n GbÄÚŸ:UðŽ£jÏ3õE,¾sÚüý×_³Ø-¡³oJÆ뇘s9;ž#ãÖ#£éÛz8Ý(QùÕ7'Ê…ïêêÃæo‚‘<#4(–íøsÚ iíå+¿àðÉ3гJŠ0-*ö}½ 2_Úd-%â §zMAÙ””ÅÈ€ Ò¯ÊÔ¿[¾‡ƒ«f´=lå°eSbP~™=ú·yE<;@Âß÷† /9™Ðºg}ø®°½Ŧlò ±ZÂè¡–†Áz~–ã1}¥ÿæl¶·J‡¥¥ Éϼo8žÞå!®wõçÔ`êOÄl`bÁ!õb]] /!”þlÜ·2à ,9sm,SšÔZtÂÀµ#—vºXJ~6JpòÉ4¿õ ÚÄ¿¹Šùâ®î¾ ÷¨fßïý&ºœ’ûY<þµ¯^,ÿt„Å´ñ»vrƒ8vý¸ø•Í«!­\çÌjÝåKßHÝuZGµöæáåø h~tõÚcè&xv®‘Á¿û\»¯KC±––<íÃLw\)›ö†nŸ—Wcóâ¬38ô‰=¾—~æSÜ^ íuFꎚ¼“âÚ¼_Ÿv’5¯–&òQpXN{ËòU‚íKŒë:Uè4´^{ù¬«—ç}°þ<Û–ŒFu{ê¿~ƒ—‡Å>UbElvSECÉ:òLûÄN˜u‹º{Fç %Ç9wòÙSR!Ö6ù.ÌܹÄ™^‚ŒgDrצ3Ø3²ê´ªÒ§žd¼}÷ :æ¶ íY‹ut™€#^¨l’ôYÉçF³Œý| ìn`\àÒ&±?ÆHðÓsœbc£Š½HÀÿ£á¼ãjüß?~Ú{'IK"dDC×JZ’PIRøT´H%)B4%B4¤’¦âºKÚ{ï½ç9íݯï¿{œGw÷ý¾®×ëù<ç<šgT~U·W×ÌeŸúk†)‡•™,`@Gx{?ó‚âBaªÝ-€yï ù÷¯Éõy ï²}0•Æ[ç¶ø NÑa†Þ$ËG‚’*XY¤.\ü_< MX«Wâ’oCZ}5 \;qâRž?Ä¿t.Ë;&‰ÔÆ2vF¯кXþ³Àª À—%šç±rAáåÉ'œs½$ú¢d“ÅtBª+‘Ê&wô$?_esù˜þaÿöò$p¡‹EkêФœÞíÎ u6×8ÇNÁôÇÕ®SÕѰ"Áø¦/:¦”$v¿“.€Õ¥ãü%7 ¦$_çŠ.I¨òM—ó.$(›vûE…ÂÌøíá5žW°ÀÿvÓòmG‚¾öü•¾úI¨•)”5¨²>©ð<É †¨±üOï:@M©uÑøMa‚•ÖõêµGARŸÔiÒÄœÖÜè™ÒF˜Ûì´íÏf} ~:ëìoq½;®ò‡{J‹BB ÚuT7©C÷ ôÌÃÖ˜L̳{qâÌ™ëskeøA[:×]Ì\y¦û;É®<>Ð-Î0'õ¸&SM[^¹!¬ÕTYûß&p VÎ9É@Ø© 5¶¸ò¯@ÿˆ{7ŒêþÖò¨âFgPF^öçÔ¦œ(%™¶ šõœƒ1Ö£öŸea"¤¾õóëTœ¹Wèóðé%œéˆú õÅ [Çáæ$×)–ÅáŽ#_£®òb³LªÇ± È5÷ÐrjƒœˆÎ·‹¿á¯¢ŽoÖõ!XKöwŠ›xE2¶ßlò2ÂÚ¯?N,ÿ;Ó]-ñ5ˆ­+es+gg±TFû˜“N-f–l÷Ñ 5Á™Ì †õ”u>“~ãýó1ΨïÆÖ’B¬m²xå(]…ó«²Yoà‚E„«”¨×É{Ÿ«®zÆí('tGºû´=I–2PZ¼z J&3Ç^$¬óÄÇ Ý^ÐSþóDŸÀ2.Ùm0ªÄÖnG]C¬°¨ûfRî ®)>z‘‡= W­öû·Âüå;¬i ½Ö‘=¯™™œôêbÊØçg®si\Š7¿xv9.Z¼+ºjÇ©0öÂ{[Æ)X>a ’²0½Qÿ¢û],] Ùߪ¿ÙeÊ&2pÉrV®Ga'½%·ö^¨Uå âˆyŠ]åÝJ®ØõWv+cÃ4ŸfYv;ýUg´Ü \O³úÆßvœ§©NŸ"é‚_G:^sÚͪ“'Ãu±KEJÙ°ŸÀ¡”3ºJÎ@Ë_ç…û2¦Ø©}ZõÖÓ|ÌÚk×Á߈s:¼k‘gIó¼_¬ÆPäšgøpöŽ3pí¸™PŽÓ_4Ç‹5Åqm³S•Ýy0£ÈsâÞCl×3·Ïê$YëÞM{“Iòüý)Lç3B‡Kc¢Vßä€w'öv®Þ7 Å¥ŒCô-·°vã(óS!ΡՈ^ ƒ GÔÇϨ>{!b‡E'RŸ›qQÅN>åU•1 žª‰¼‹ôÎ}/" °¬sèâˆRÉÒvàĦ|’£ô©C_Él%yI•ä’ÐU<Þq”d=útæIÙûþsÝ{Rp`ô4³F6ɯqNåL+Iòv­-mÐ3"é?Þ»¹2‡$„õÆ"îáÓšèÃÓ7„²ýË EªÛ‹›°™?üßÒŸ<’Õ¤ßxÊŽ+n–/ŠËw6ÔÈápìûè]™&¸p}uöÓkI’ž)f è~(βúê¨õ^.·G< [ùÂÖS1.0òCÅx6|I9ªE 49 3Æ}l/K#pÆ&å]~A_Ê“úM % Î%ÔlÀIùÄ´ÞAîõô.ö8 ¤ÒvIÇ,/h® è±ø± UÃ5þm®¸8+’ñz¬FÙóoþŽ‹¶fèÔ°ÐLKø£†(Ag/p#«^æÆÄù/ IãHÕž c–ú0eبݷ^z=gW®JÂW¦ÛYš|HºÞ|Sn’ù†¹ÇÆpg’#¬À gÊH&ÕŸ2¡'ê,ýÕŠã"4Æ­rWN‚‘ØòóuÑ,Á<\vRô¬]t•Ô}u˜‰úF3zJÐO3]t¬#èæ­… _cåÓÔV¦÷ñØ•)2jî(‡Ó˜üæ>Vb‹«Ü™ËAœHí}¿”5µ3:A–C› ùQѽnVÌÙR-Ùp†éùèpÉ_ì«éMªy³{}C¸væ@f·òŠÍ`^º„çcÇtœì¸eñßú>hqk?ì7ÀFíóæb!àŸöcÔ–$éíE½WEœ±´¯2–Û>‰dÙlðÈþô w#%ärH Fg…¦èÔÏx„há¼ÌÎìãÆßpY3à¸ô:91›Æúdd‰3,³G;[Jqz±ôãê“%œìö»þzÇ’UiÀîgÒu’ïî¤ïȵJ’ág¢­•í=œïx—4q"W-tÕâ\¤qJõÝC}W_\sXU–ƒáÖÐi£G–w®»œÁd«àðÖu/´Tн“ µc}W3` áø¾‹ÍÏÖ9äéWÖ^l³¼–>p¼ 'NnÒ”¤½ÁÑñð¥†NN¤ÆŸßÊ…ÔöH¥øhÆЪ =¶î;G–¨|ïa²árVʰS4¡òï–®«v{ª™Ãä_‹y•Ó•P—Óä~Û© ëdÝ?jºâÌ¥´ú¢zXŒ?»º\ýÆôÎì°.ým»gí|bAЛ÷Ø/úêãkWÂZ„¡R/|˜|Žå× en;“‹²3«ª°# ÄBót%AùÏvu¿,ªºíЗ†!Å-3§^¿'è<˜ýQÏÕÛ˜ã>$¡ÑËþeI–éw§ qíÀµîÀ0ð”ÊÔ-ÅâÿÞ†Ûïl}‡9;2T Í”Z?ÜIгnxÿ){wo{ô¯dÁ¤¿ï|˜Šóúõ/8ˆ¬Ìú§ƒtKaÉË仜˜:,ZN/FÏùaŸâCÅ—hˆ¿NÏ.äÀôšdÔ˜üô†åe¨Ï o™e„¹é3O¼f¯ãìo¶€Ä%ø«ßùâó…d×ÝjxÖ«iJ£iØOÏ´ú.;”™ÞKbÛÐW«z÷péBÐи; ¬ —ì°½8¸\ŽýTÚjü(­‰ÇŠ Û™r+ :ö‡»ùö¤†ù”>ªšß*ÙºùÚØ˜YÙNïÑ:¿6®œ=þ’¤×SÜo¼°ƒråP ½N›î|hzê5Ì®Ž_QçÇÎqÊ•’c˜QÅ÷w‡±&çÊå}ò[°/4ÛcÿÄÍ«±Sô!0Ðß÷,…A©ªIQ‰Š8¥¼1ãî8uõ³~[2Ä¥Êg4bÃf(+¼:¥â§ÔZÞÒÁQÐór³ µ¯ –6öþÆ«C°2ûà†paôïàÍñýщãwíkMŽƒq‰¸Ð™!KèRì¤ãƖȳÅ~jaÄAèSFî.œ»q0=÷¤Œmz¼ýò¶#@½V¥~ñ¿wXmÎÚüm¤™-÷ú• ]ŸŸÊÜáû Ý&WK‡9D°½bôœi©ôgØ}®g‡^v÷BweX}~;Q"yÓúõm¤Çó–#ú.áV³ÐW«Òó„yýïߺºñÚ|3Ò ´ã9 ž\» ;œ 7oçÙsvAØŸa¥¢©½ŒüçÇj`^ŽÍ?ñ~7¶Ý93¨õ;³Úÿ´W¬{á’K¯±î w™ï¦¸,ÿøî|Î1ìÑUÞî¾û.~:‘ý.úN~4sÖà.ÁIû*·—»pyëS‹úÇHcûïò¼Éèê›æmôÉh³vïú{ìó5ª-;Pƒù\y¿>E—b×pȾU^’¯ÏÎKIö -¿;$ ÔŠüþ$—LœZÖðŽâ³"9·í Ú‹+áµrÓ:!ؽ¡ÝÈ¥€Ÿdzzž!û¬ÒèSÍlh¡âˆ¢ÂëtŒÒ®ý킌}F‘¼Ž'Ø÷•ÿ õùd˜9“³MìÓ7¨veMÙçô±nsbúr-ŒÎ†tÙäâØÝïÂåý¡{çZœüÕQhrmõ+RÖ‡ •v)+lzzãâóݰð²æÑíÇÛ×ýûTç)K(>”¾U4…B0Ÿ¢÷}>õZK¹Ù#+¦óÍð„w°b¡Òß”³I°öõÈžN;häM]ð–ôÅÉÙÛ|w†ÃL kãÑüãX·}4蹤œŸ…ˆ'vœÞ›Ég?¥¹FóÂÝF¸È~“¶¥xNîª~éºî-“zâ*N‰áX˜ËÜUÖ‹Ø©z÷Ò¦G°«éj¾Æ£×¸fÌLêva[±~ŽÞœ’™ßbÔ6¥`U;Ø©ËÀåïìx= qVßߢó8d%Ÿem9ª¿SSeäßc¿û´´Ä±U˜ÚzÛü¢•ö_ý·o»B1|`¨x”«C2„Ý_’AÃi×7;ö„ðtŠÖLº8¡™bô•Žy¦îfù%Þ&~íwO®†°¸j+ÞNœ³HMü)„¿÷O.dàâºMkœëő˕jÇNa{;^Èd€!Ã+QŠö8»àjÖ§{¦†þ2}{º²y Œ‚©Ï\Å\Ž0w ãL†;A÷0(ëWýçÆË!÷ÿì&˜ý”»YN!Xüv:|Þ§«¼¿i¬þ£Öy£‚aÕÂ&À#ÞæO²±lȽB0$½ôÝÔÈE°ö¤=ë—…¡s”?*<'„£5³H†ÙÞ²wœ§IVÆæ û:Hž7u¿†%ûpQ÷ÜÉŒ²<’âðçeoEÉý0?d@“¤^-|H’jKé1"¹¤EŽØÓ‘œÆ4q=¡5RP<°éeÕ)pijhÇõB\øíØsb(dSm}1¨$JrßãZsJ!áJ4ôÞ¹VÙÏ e¼—Çꌠy¹ýÔÂÈ"LFÄ_Ý¿‹‹jï:*ìÆaµãÙ\.üÛmù©~+I¡s^d8‡w´<]ÚSI²PwÞ=,ÃL°2œÖ¹°Q`9L¯z@…JÐoØõ}Ínô>{ÞüX5…`ô¿òÝ‹ëÁ:š!6¬·+fœì'œÞÀTfyŸK¤7Á¼<¼¸äÜO°ÚÛÊø‹Ü* ‰oúI»]6®@m Û[ãJðþ­­ì‰¿O°q–•ërÃe£†ëfeH Ö¹y_BçØqY5£ 'ŤÞÖžoÇ&Æ:óò=÷¡õ—+Ë¡“Ž8#øg"Dû<Ì1øZ©y gÚ< ×|à¯×ƒ¬“Pè*cõW Â…4›ÁÏì˜Å7q=ÞV|ÎMƒë^­Òj¸kÕψGáŠjúÍË…? ÷{FRšØysƒ‘ÏÞV ‹¸èqè|‡<’öã„üeÊÉ½ÍØÏp‚[ª”Wí¾Ëp‡ÅÓ?Ž’ÿª`‘Û”.ÅdÖ~?{i/ h*Ê\fç 3—ª}Æ6­óñpåS} ?”ë–U7')Yß³—øjÙ†¤q"ã¨y¾:KXš£kZ1£»ìªˆ‚P+¬ßµÉà¢Åw¯¤ÖÅi3— S±øæ÷Í¿öwMo,ø/ß9¬q21ÿãã€U£Ag]Ûô ©/ñæ‡QqXü]Å Ë¡VòµhÞ·Qè5«áÜÐ' ˯ÞhÔ/„.mEu£Ç[pš¸vÄþÈWüÛ¼58iƒ0Éj¦j‚Ê8/ÂfRû´d²ÿìªp? §ÎŸï ø>Ýì[þLÀŽ/û7­NâTË׉%ÿß8'*¼©ÓC'm8Œ«ãSò1"fæÖ¶î÷²ÇLQ)ÓßHœ¯oˆíiŒ%yyÞ¸¨,TáÔˆÖÅP·${­W%Ÿ/I÷úÚ÷9ÉìÜb‹­ÓòN·^ûÀí¿üÈ?¯péj¥ Ó;! æmw4)¿‰ÿöÙm}¢! Ÿ"ßqæyÂJ®ù`í¨W¶Šé”0Äå lš_8t°Ð4Y;8Š Xž®©´ÙŒ£”WUç2Ö½Ö%ÕF“ý–gNЏAn€¶ž¬ÅŠÓ“—)í¡$³3í§õ`9Içw PÜfÉpÇè˨ ÉÊ÷sÍwC!ÉžtîußΩh=ÇMå$ÇÎ¥?w«®áœcéhf™É!xÖúÏÉÃ!Éó¦^˜¤Kß©xv}_«_øXWI2}R:ý8qÜâáÐúm—ki Pè®Ý;<ÏMWÂXnø·Ù¨Lù ¿DM`¾Ø‹MiÕrjíR;îýªdþ¨¢·ï+ª…e31ûaM‚¯Âüèãɓۦâ{†®{«A’;† ö¥µÈ©µdLïÓè$«K ¶' þFBÆø›’GPõ¹}jEÇŸ`«?r;{Jæ#² òBì1§ñkX¿Ã<É(rq »Û:§UÖË%ªÉâ@kÿ¢-w<– üc|žBÒíæì8Æ$ã¿°þÅéÃNÅa7€êõë‡x».Vè+!],¶ÿ3´h«ÉÆÿ˜~r(ÃÌ»²D¥:Cl—)/Ô\lÆÔÔÇnkö0Ë÷VÙÖC“¤“\ïW›Ul<èø`´XzÝck3 }þáüdÞZFj RS"³¤H¶u“7Á¯±v‹û®›i¥He„ šŽ·¡[ýBç‚y ÒêkoŒþ´Ä¹.Õã%WÝ‘Vð8ÀõÌVß4ð R")ÚôªöSc8¡Keœyw¦¯{øçvAŽØ¿ï’ [Ó÷nÜa Í 6ܬOùf›^"V$—úŽ+']ªaiôvYïj3ôùª›^z{š ³NÓYbγÿLó6ÃDÜrïT쪡JçÏÇå0ñ©Þ6:ø›R1º½¸ZO¦'lff4Qã93TÛ>|¦Ä,ŠÝ‹bìÙ”‡0ÉtF÷óO=¨œ³Õ½î⃱B½!´ýè}Èæ#E09ÆgrËIc“¸i\­v½ùºïûë£Ð¦÷1Ê£ãP/(~_ÿ½sâ…‡ÕÇ´`úòoÿ’ö?0èÑ4o]óîÒn‡Až_Éu»=0Ãó@\Ò}¡/üG2&Mg°{Çà½6ùËØVäöùµV ~Ûz¡)Èžxaá¼PœJÏ}äFb‡šDxõÔ<ÉàÂý݈[×îN$Þz+J2æ½/´o1 )os¿Æ `Ë û“T6*.½²dµ|£iß”ù%Iº/g ]ܹ±{b~Ü] &™$œOÂÙo^éýÁ‰fî]ßóÒHºý2÷Š/“L¾™ntg™±V¨ Wb& ûœ•ƨr¸zÔ·ÑŒ/ë¨]ƒ 1u²iç…?8½Ñ€ïy mþúnr)¢8(´*~Jà"¶é??Æ“±HÒy”òÛ~„yñøÙ½UÐ&M|Øëü›Þ¶–Àwš óu€uà ô|éÁüÀ>–¸ôm0#é ””‰ÈýØÎñ”¼ °ˆk]ƒû}xã[ê8XWÚpÂÇ£ouù+ÖìT`ß±~ê6ü' ô/*Ù‹'à¤ís5}-‚mi^î¢ÿ'¬¨ôpf ‚µwÜÍžT`eóâ颖`øÆ*£©$YˆÔ?¼\!â80Ÿf(c´¾‡gE{mÜŽaõý› ‡±ç™?Ew=OÄt¬5u.æé# œøóÐ¥›³ýËH}QØD[Ú®?ÊÄÎ!üþ‡%©%vš`tç–™+θ¹ë`Çþ3mí„îdÿûæÇaú 5uL;d†ØI©Ã1Ÿï›+!Цò;º¢ç*,ÐïŠ\áv¢Ô0Ý4Ic8¸·` ~™Ç¸õÄú~‚,ã•'øC^üëO#¨b㋊nÛ‹óA÷2Ôk$±–µ¢¨ä¹Î»‘ÆG¶ýª¿üKЗ/ß}ÿ2æ ón)PÆÖ/£ã§&aÖäwÏn€qJ£ÊVŽ0õâS†º¦Aÿ óÊiy!ø÷vñXøîQìz´“{²» —­\Z[Í`Joî %çÁ+,Æš±m:NªåûB)ëÑ£+ßópIFŸ¥Ä!EvkeJƦ–Kïoa‡æ§WÖÔY‹ŸÇ  ”×¥ò©4”8ô}Ë?IWxŠPОnŸÿï#³–Ž4Å.evʱß{ùNü—>ãÀÎŽ“CNr/½‘zù®îî<œ‰j÷eŠ YÚØcR–“H:öDóâb'’uƒæf‰-žX¡žLþm%VK”å[%IÖÿÔ¾àFê±w; T¡+}zöPŒ8N:ìéO<1—b¿<{íAÒÅÖ¦WLW#z]â¾CÎKó¦ðõ9íôÁêãŽ&8Èá×qgôôÎñ™Ë²Â‚{ùÍÓI˜A½l¥†³Ú{0©*Ùìt¾—Gtׄ÷M½ ÆÉ[Q;6Þ #éɸ<¼+w²U>”† ¿!³d¤VNÁœ>…[ºÕ@ [ûV™T óÓ6QUu†Pt#á<-Äî´·Ù–ùX/˜ÏÓ2† u¤ å’)ÜZ¹õ5|• Lš½zç ‰7vÑW|Q†²‘Ϩ­Bµ¶õGzš=ô´0늼< r·¨©I˜sOÔ’]Pšïîòpø ]3G·í¹±µã‚ÝòqÐïõlo¿˜,ÌyØqlL÷@ê#ò•ÀbÕ_Síß¿±ä¥â"çÜL7^‹™ýŠH»{<Æ:æDl:Ü õ¶2ñ+ú1PéùO܃ѫÖ÷€Šú;FœOÿ }ß™'ÎS`äŒof(ôlÏ6Hç›"mL±æ×Å8ÐÔ´/­©&—½jÌ —ßå éÃT™tÍμ0w(ðƒ¶ý˜y“b*uï.$¿2c?C­þK7—œÜú^<¼c3æ!O—š Ö)ÍøañJ 1]1ÉR‚å>ªGZß ¬ jKØ} Ú^>ǤŠÀ‹Œ—]\0›Ç,}ïÅ:ŸJ‡®Ö'X”ÈeWZ6ÌWd3n,XJJs Ö#ªÑù†Zsmeü5Á{$}øíGƒÜ Vµ³ŠZÏ•`âìsÅ»&¿¡ŸS`Vï 3,iÐÎ8=½ +NFô˜€Cƒ7K¤Ý¾ÂÊÈ×T×÷ë &)M¯]ø¹þ×ùƒSØØô"Ø{í>ŒÿXŠÒ2Ç–¾F‘Ë P™ÇÏPðùNÈ<£O¹½F¬qøÂŠ ÌËE<ú%m@б¾è”7QÄzÿÓÁ¢š±Cõû õÏ$½´A{ÍCu\¼~Æ4©Êê›»_æn¬ª_ˆïÁ³ þfñ[Iœ²xIJ‰~+Îð3Ú³¸©Báï·6ªfØ%ûÞxPÇ‚¤»°‡ÛÁþ.$«‘jÚ¸v^󆨚Ö½“ü`L2O‡n¤ëšÃy£ò iFìÛ´/öo$R£0ͬ¨áj']Ýè?K’þéÒÕh_’WQüUo2I¯`@îz;E²o;~^Dbj¼£Äó¨Þí þ!ŠäÈêÞéU?ãþùÃK²%¶_]¦oÆI"—}7Ô †nm#[z%N8Ìöžõ'6ì,Tz—œA²I\»9æó•d YN½ ÷dj”µk|:H0Ä–J€¹M£J`ÜUëýŸZX̰»!íC°:~Œ ˜dj¶ðÃ3Í,Kó/Œ&n§eÈ…¹ý“.Á1ZØp§37Uÿ̾ÿäßÌ Ôˆ\è„¿×ÜŒ¸³â¡Cϵo°Ì1O÷„&XãÖ èNé´(ó w`•«ûœÞ6½¨Àuÿ'vzVI6¾‚ý„®»Þ€1ûGUC1g`¤H!ô‹¦,=zpàÒ«@½vñ’»A?mû©ÔǨš;§6KA¿|vÏ ½?Øì$\áÔOPø¿¬ã…îLK»“0Ôó¼f©nfm·hKõKô/°ŽÙæ¬ühv3ëñÀü˜ÙL­ûT˜;¶óבuߺa¦ip™`Íߦ¥]ä¡H„",ª¯hëgCß)Yë™Ûq­’ôr`öÆ(þ¹!K„öù®p ‚ò<1ΣK {.¼ÌºŸyªöÛKfaÅ1®‡&9’PÛq­é«¬è3¶ˆ6Ãc_èC1l’TŽyÃî ìß^²n‡œ€Ý›#aäkϽçC§~NKo¬>Ž ™ ý9ÆUœy¿G¿5ã»/ n8'y?·Óë0ŽldP¸RÉMR^IªòDê•2ϰœ,hd;—ñ·Ø ?¼äÒFÁμ¡CŸlü±àlö•eQsŠ£†äR±Ò”ü¸Tÿ‘d:Í´ÛÄ€D*·›ÃÜâ$P£Þh.~€tÛã íû I†Î7&„âH–KƒœBHî­*ªŽ^‚ùâÕE«’$Ë.K…\‹uëmÉ”ÍØœÚiiü'ø8IO t÷n &Kc§Gò@èŠ2 \Ìæ©<{ ú¾lW÷„Ô îrCËDZ“fþíˆ+=t&Ÿá=ª|Ȉû…K&¡ø 7'øS3…`¼¬ÈµÃ >ËxnýX« mŒÁ6£úJ0³>rü™×z^ÈgºdÏÁô9 '›áfß}ü›Á$Á¨]°wód‚þüZŒ9 dzªYó'¸·3À û{>µèÛ0˹öÍNEX»ÿQeÆ9-^óe½'è·°×¹jŒ¼¯©ÿU2ÉÁäa‘ž‰Ö0q ÕFç!”¿¸¶Ë@Ï ¦—©~wßÌF“Û.$häû>òùœÛ ”À<öó­°wŒ>Âñ)Á}ps†ÿå ‚­'ÞÝ›¥–•LϬ&MúÚœÐ:Œ7!RUÍ@ã†.N²05^þK²reqËT²“¬–cÛïçö#õù>ü_óppY~¯Ìyãu?~øqãCzù!SÁL=ŒÅÏnƹ…b¶ ÑótÖ ²3<)†Å®†ã–Ùa>Û~ß«vò êÜ¡QuÖ†S¥†ú Ïà¯/“Ê·æ+¡Ãît“‘¼2IÇo´2g‚a•ŽÓ$ ã¼ó{×|\¸Â¼ùw9ä=´Q69‰3펡oŽâìB«í«Hhž=ê^“6 y¡ó<ÇxqRAR"6nçÜÛ'©MÁ5#½ Dš$¶$¾Ï/ëÐÂÕÝaWOâìL:¯š¯.®³¢Z¿„n…¼ÀÆ£Ò¸ê˜&±_Qª%?ülÄévocqãõžµjÎâ1ÃÉO¹ÞóâSXwæ¢@v]ÎÚÄò?S…¥SK#"ãé8Çöê¥dzÉ!qOãsI'‘yÏ<ª‡eD>džm$YΩm¾qêtr b}±>>¯¡`Wäo\žÌ+$9®h¶%ãìv憽-lH_#d/ÕtšwªÞPp(qõ­Ôse˜>Ïã%`ÒKÚÏV‘ ¨új.g.±Á<lq®¨Á"µè³í†g Ë.uøA(;L1è¦/ØA÷Sün]BÜ–~Îj°Ü+›,bõÆŸªªO92Ãdܶð“_B߉­6[ˆ^où&î…eÕL·wèöœÕÚ®É=[åéÃûkEi¿w0—¢õŸ²0ô<ûmcû@æÎÇ‘rÝÒÐß/&ü’`ŽÞõQýà5˜ÙÐ9Οx–›>\rß‹3—ŽZ⊖ÇLk&Ô e²™eÔ“ó€Åœ¢Ê[¶7±Ä>;Ì©W3÷°§¥3ÁHvÿ ±g«ÐS¿eñ—ˆ1 îjâðkl„¾ß5öÅÑÛÖda¬:ó|4­è€ÓåI•}/¾’v>£¦(˜ûO]€eÓÛùئŸE&åJ…ÍÖùhö“âËhÕ>9Üü2ˆ1iû»ç.ùÜÚ®ú#^,pì¶²kþ1gèwäíöã<ev.Ï·‘§0¿<å[…±®vu»ÕÅ\ÀI¶;²-?±‘©û¾Øˆö>Ü¿ÚJ&âûhUuývZrý8Þ]áÅÕ_/Fl¶ãÁш¨8¸Ì­½LRâ£5i&:ädY^r~Ý/z62ñ—$áÊèxK+¹†ˆž0%Y±lÓ —å;¬1+ïÐ>¬‹¹ïªÞÉ?C²4>MËõðÁáó¶ÕGpîjÕS/¬þê8pt# «+Åû\kzqLÞÌ&¢3Ët~>þNÅÖÏ×"ârùY™±¶%<-zúšG-Wýw”‰ö_')/$ º@ 't•,hÌ> [™µ•±>?=òöafXõ½ –תóG"Ò/Õz”cªj©¤Ö¨JKòÕvCoÀ\±¹ÚvÖn÷’‚Ú³S¥;"×ùìç×ú„´ç°úËSeO…4ØÇnß êî±î3V/F·„@ï½òŠZ Ð$¯¥äeç@Í~Ò#îå5lá/4μŒo[Õ“ÿü…®ì¯—yhjPaùéÄ¡¡ãHõ•>Ös¾ÚF™Æ²bžçÝÔ’\쬈¬9Ç~|,7"ÁØÅ}šFO°ñדC?qHŠ$ò•¦põ™͸Ù瓳*é…àÔÝ=ñ"?ºp‰ñ‰àö„óX¿)ꆂ¹ô˜)ïå7Ÿ'XL/W%¨n‚™tƒ=4*Á¸ÇÅMP”`L²ÞÄøœàÜi÷+!‘ŽDÞë{ïóž´æêÞÃAÓ3¥cõ"9˳ã\®ï‰c&&HõŠZ¸¯‹}>ÓŠ0(a–´0䄉—.ÉKžÂ•mç¹å ÙðhhZÊ=(Øã÷ùzôØ\HÎISLÕ7ë{1¥&½Ú˜Õ*hØÍ+_È|½Ÿeù-ÃŒAÕâS+¸TëÊøÙÔ&G= ã‡aÁøt¹C%AÙùÛ¡\‡‹}bjÝLªô¯‹3`¬^ŒkÓƒ“PvWô¥+aŸˆêñÝi8m´‹÷Iû[XÚ}ÈM÷]4Ž}\K×ñx“òw'h§Ó±#ŽþÔà³.¨(«´›¾_YÊ6ëR±³Ç¦šÑ"ÒºHãOŒ¥ØŸ¿%?Så'tYz5{Íb—¦¯¤e{t*ÓÍùã’ìÿø‚aJV¥¨µGó¶Š¥ó«À”ª¹†IP-¦èuiMºÎ§=~¸iÝÇ™J§Lg¤‘ÆÃõ†;‘¤s©¬iL^¿¿ ½ªÓz§`álNbUB>L?c§ß•€M«V_¡!¶þŠ•Ñ>èV ´f¨ÇÊ—„@oDÖn*9‰õ!‚/ݶ6 'µ¢–.uÀŒ©]†$û^ ²nØ*ø6ŠÿÍ\OxÆ“û}žÃèæxÛ~ae¼¶¤! Ó:Ë1ô OÖûg€±õ"Õ\𵦖+˜•¹+`ò•ÐÝ,f0—µ¸Ëdeá7Y'›¬ 'ôQsy{-…ó¼Íè݈Õró%ÔÌl(ç?;Ê®l£Gi¢ÁÔ ­˜ãÐ`Å-{=º¨/ˆòûG¸Ë“¥:/©sMP:rZŽ2o-‰:~lljƒ„Ó~ìý”_S“ ýìßk KõŽtå0+¬¨z[¤ „Êß1g•X9tÏL¥„ß_¹+Ôq}Ý£äZµ3šûaÄGéJi¼:Χõ&‰Í<Áù†ãLÂEp¶®âSAIgvò1oqÞ·ñ’“$A÷Lzl‹+AoY>þZo«ú)xZ;wÅ:Í— >Ïæã?€†!'a×Ýì°tþ=ßõ>«a0ê $(ê~íÛ•Ä`­åÄ&§œÊ.¸%º™ç™›+nÝl‚1C¯‘–˜½ØïÎL?T‰c+ªµ[4k± Ùß÷€š%NiøŒˆø=ÃÙÓÁ;ÕöšB­9×ê´r%LÜ®¢û^ Ó^E¯zó?ø|dC>)«ž…1?’aÄs^æN¡tÖi¯œÅIñ3 i$o߯ì8 ’iã9!qs’—£mJUXx'£÷ˆu6“ Ÿ1™ÀOœž=9ÿÙ«§bË™;åp­ôW³·e$æ¤I7rZQIfn1éIì׿#»úå&Éw ÊÓÇ/b}Çð_\ «ÿÖ(¸L22[£.“‚¾N9‚ŒÇIV¹°…wÙñ$Ó¶»篒ô7?ç›×;ÁP q3¢Ew{$³Anêomêw D6XÀüÁcb5’0ß?¯÷ùȳ[™^ß‚ÁBiRéfl'èSÅ÷ot‰#¸6õm.Ïs‡9Û8ÚÎþ"hÔ½ý¡éÃíõù×r³ð&Ì,÷tìqÊ…Á✎XQeœÂˆÊ_›iرøXÙ×A¿-ËÝ3í^Ëç°'JT3¬ˆHÿ4š«è§û=±ñ˜ÏV%X^q¸z 4¹R¤Æ·€¥©]GÖxfzé‹‹/âT¡Š7ÿ>lÞq„g§( KW.<—£BÐ?Þ¼þAûÚ¹å±'öåm×ôSrÎî§ß²  ìÞÙ.YNhïË3ö]Ï•@éàÏÑ0þ—IàCá »Z&‡ZuârvÒVùsØ'17þ¬ù8¬ŒZ5;TW—•¹³0T©–ªw- sb ˜“»BC›à¾`}v ÍÌò¶k«Ál´ÏÒë£0 ¦›¦ ÞÑIOž4M’Ø':|hHÎÿ&ËDÝüÕ“Ÿu}UâI&–Œ­Ü5ØGóQ%…q¡ýT¬‘~I9lþXï½vMó…ÚçãÔùw¢›ì²öLDà–Ÿ =Ìò~ëìþáÐÆpôjrvW/X÷Ô¸ÉW-èŽ}÷l63^0„¥“Ø©*Ÿ±®4G=è2¬ìkŒl?C0hêšû¬B¿­· U;+ ¿å™½­VL0cÍØ§#ßcýš7@SËé ü mÚûm ’9 ¿_oYšB Vi5Ûb9 ­¢¥o’ ÁlJQ?ñ÷8L5 1Êìé'¸—EM=6ì&ÅŽÑåÕÄÀ2½ì]N‚^V×OoÓA™“ÈÙ–GpK´Q›RŽ®`#>?‚óÀ&޾Ì›ûž© ÷ ÎÍ?²þUxì¹wLSamÉéÖÜÓ$‚/ïø£Þ³R½O·þ˜:¼£úDŒÇ ¦ñZ®“õiƒÑ€2§‚!Æ´v•¤'6<·ŠõË~CÐ*i-åB|êÓãï úÉÈ_ƒæ8OL,èÃ,òò˜îÛ‡³NsŒ]2¡UOˆ…þÜ^èýÒöÌ«W½?= K¯µ—?¾F›Ûn…F¼u\[âÂ;˜÷ê¿¿{½Ï•¿õÅ}†{üù6NWb·\¿8+' þln~l˜M0T¨+ù”ZA׋öž8jŒ¨Ø÷g,9F¡+Žjû•¡ÝB_1ébdâöó:slÇÅ¿ÿ÷!¬—b],ôkTi<Ìq èÚTóÆUvÁïóÒéÏØI˜y=wç{? xlQü¦²îk )Ÿ ÚaÁ¢þƒbXìK¿üö0LR?oò P ™.Î6ïZ„I×°†}†e°–Jß ‹mãðÅhb‰g#çË såsÛûN˜Ÿ«ž Þ» fUMƒ}|F¦žs/ä3alš1oèõoø¥Ûýác±?ÁÑ9b+ðo=7gð-Ê@ÏùÆ@·¨ávñ°ÿN°<ª˜¿ÌœKMó}ƒ\ÿÿâ°Fýr1ç',–q¹‘ÛÖ÷TÂô¿G;¾cÛÕÇEDTaùxàšvÓ{ìm“XñãZç¶ì…òº°$óyØI*¿v‹×ª†L­sÛkòaK‡ˆrž×*ÖI¾Ø›±Ñ nXL4oÝ„s©¢ÆJ/qàõIÒO½:,ku¶Ë%CmúÏ«5:"Ø+[:vWò ›pÄì|…S¶Úrr>8²‡Xûœ¦ˆ©o¯ާNžÞgÛŒ9œZ·w*¾‚s©ñÿ}¼ûΟǛ•ü€¨kŸéZýqò«XÏ­qØ=ïdÚÌÖ3y#EÏ`–îÞÌìg>Þñlöݹ¦'™}p»$âÌ៻ ŸK?ª‚¶ÌbãÞ0­Gº8|¯þVÙ“à)W³cb½§;S;•/X`óÏW{†V`®1†GÄüݶO¡Â‚Œ µ÷;¬Ïz°aö œo™~ZÇýÇ~™ÿ„ÎR;‰“!P»( ×ïa‰‘C,Òve°ôØêÃïÖdXÜÄú¬õ&Ì6/}Í˼LÐ øx¯rS•¦,îïÄÜÌÍ…œÔiüô^`ïÞ›‡ü¿œÂ5·Ê+SOI˜¿$²/§¨[‚¦.²û¬Ÿ—ã&« sŒyàè´×C’drëÝ“}i¦ óp~½>?Ìr½,Ÿ6¿cï¯]äí¦<‚IÎ{‡òý{9z¯Š:¼KÓ•ç6>‡aƒ[;ô\#€lþotžXÏ“-{êzµþs‘g!­0ûOI–$Á`¾kwÙ˜# ݱþì¸VF°ØÇ1 „âë¨@Xh,ô~”r`.„™ˆ8aó÷LP_1· °ç® GȘWý¯ùûñë8³g".óœéúœéìæ8ŠSÛn³–Z:âÐÀ¯ vOìú˜¨¸é=掱G=¦@·Èñê~ãP$çÆ›ƒæJž8 ª”ÙOV$ÅúR”§76O„N²1é’ôÎAk%âǰڮ±J;q„÷ÀÉjeiœeªÞkq »ÿ Ä–«âÜ)÷sV²?qþ¡¢Cé"/ÉDKgSVƒ)6…=ëœØ&xñ·æœ—ÿèæ¶ X#xïiÙCc˜ž~ÉfOUņ³‚nªÃØnO˜]_xŽ-¿¢wÕ‚ Vª¯0Û3ÌÛùÞZ“Úýš7Ú®~˜]÷ä§ûóÌâØãç]ý.Ãä­BÑ–‹áiÖ÷ƒöG¦ï—,»,`AHµ¤ NOVŸHùA0ö}ßZǸ‚S;³bÖÜ€¯öç1ȯó¾vßj ï|MHAOÕjê1Ï~˜³à)XàÅŠëFõ²J±QgjMøÏúsøË)õh´®ÌBäJ<Éx,»^Ek–ÿ6w\ê'); Û‡#ýq@ÉL0¶}u7lØkž‘l|•Žß_ŸßÔ&µ Ÿ¤±Ò;Ì#ÉŽ„!OQ·ÛIPÉã1V ±úDTG=|ˆ ›{ž‰÷0âxr;FÐêÏǨÙ=W…Ë/ÔÌC©÷Äåã>;qðq釤œ'Ë­¿Ø'nƒ5Íüד‰@ÑÇÛM„ ÛM–“oçBÕðGV£(LkÊv€™°´è¯ß1†»¢~­4 ÇÓOp(-=‡Ž]í+câݰì/ʸ¤Œ³¶Gìw¤µâÔé-îIMÔ_ÓrÐ ;hkºñ¥§ê ,Ïdzÿ¡‹ÅúAó_\BÁ0³é¯Y§¤ÁâüVQ»ë1 ωÖ}³ú ÍR|/Î'CÇÌï¥Øïë|œ²7úò—a¨Å[{_·$è¾m‘Þ÷0wp÷ÓN *ö”*nvˆÇi%æ¶Á9è–9<8˜‚u¶ÓòŠª€êïVý¦|.½ 2íš|¿æ+8@{å葌íÍó"ŠÅ`žÉÆ‚zs,¦Ì…æý'¤½ä½N!éâ8: 1øø‰h¶Ï 3è>á¨y¹×kg†uß­ˆa3%âµ±täŸ|¦!w+Œ ÕJ·cµ„êééW0§d#²“ó¥G$d¬peÏÔÀšÒsèvnêz¼ç?Ìdâ)ÚÚÚ“ùêŠnëýtåÏ6ßSGw;oq&™¿Ï»>ã·!ïÁ)®n$Ûi–2fæìs«;‘»ÔB²;]VnD’KúÇÎ}I[+‘ÛòÄûå°[ie/É|ð{Û³ŽË@å£yôÃÎŒ‘Ôk0bãxBøm Ì—–%U®çßÈ¥¡m!š Xª²V}Ô¦˜ ;«}›·=ö»vê'†Dbs|“Þ‹”*¬{Þ4ÂÕ¼ÿTê"÷™$Áœ¬ú.Ñ«ae‹0Ý͹tè6ú"£{Ìh[XÕfôAª^PñÃõýXý2\ÓËrŠýÅï¿k"éE¢Ëîðਠ^éÁ|%l;"äµÏ‚ ªÆ*–"JþâäÛêmQè¡=mºo>ù4V…u_¶ÀJ¹í7œ¨…WÚra4; tNM¾‰fŽªZÚUÉh#‚ªÈ¾ð¾ú˜ÜžOÐMÃDŠ÷íýòô°ô=pàÃÐ,,>‘>féKÐ}9.xÕ¨~´07žs2‚äò×Ge½pàUçCãðd’B17·óÑÄ_Eç½wÅ@ÙLtMq¢4”<˜yäV‘KЙí ïÈäÆI:F/…a2øÚÒö O°èú}SöX:¯±'ë!A¿©é£§‹¦Žñžm3Â>oÛÎdÃhôdN­¬‡Ê|{UíkÝ´ÞÏç7X±J3hm*†B06Ú;µøüSäñ.Ðíöx£ÕƒïØwá0¬uÿ­8=ö:ÞÅßY ØEP”™Æ¢æª¡%Uï4;%¦øùlùzo@ŸÂOÁ›¡ñ[⫇\6v>ð|‰ ›Â¦Ç’Eq¤81/[‹“cέú}üH³Õdk¸[ í*ɪAë9ñôð¿_0å®Ãx•mäÌÞÚ“!7£‘›N¸cƒþh¨¢b/vÛÍ!6ÿÆîɃN¿_f@›ÆƒŸðK³;DN·tS7Ÿ\$î ³kgú>˜@óå{+î’ØÖcfæ˜iFPb'Ü4*[¡§ûTÒ;ÅrX­yÌu¾ªT•b÷·z¬ÍO«É¸C߯·±û”†qè8˦áN½~«RäÁèkÌ”fKU ˜î÷sK²5ˆIÃ<>Á æºM‘ÕýXÊpù¸é»3XWûêíÉÈ=XY÷ü¸AŸôH“¢.—ó¡ýöLö’³;sœÛԽϢ®x¶âœ¶þsW…0{†këÑáPð'…íPN¸ßz-òz³U zCuað¦§Tñ"àÄ…'Y§Þ‡á¥ÿfDÀ"®¾è7¾ÑÄ 3¬W?ß³ˆ…U;éï¢O%ÚQÞò0Š÷T/P·Õ”'Tk¥&/1¯~ „æwó·ó’ G¼˜1ø–¬ÚnçÚwªº¿òΩá[D³Ó¢N9­™¨t†×ç‡Ú· º/¯Ï[ÁYÒ(§2íÄψ?Á‘ …¡ Ðh<,áà‚#5®þ Y8¯d#ùÔw˜d;Î'k`ŒÓ!\ ؽ±ôöÕÉ7þ0XìµñŠ ÒbžYè®ï‡¹{ìÚnœ²i_ìÕBjRر(Ñr’NKî\ã^aî-òû¶±»º|…Ò/šA‡ôKág3å¬cßz¯ßnœ5À_Í¥mfåpnï«+ý8ÒÖ}¶=õ#i(!‡õ)ê‰MÎ5«ÀQ>N&g•~ȹ•Ñâ A -û)mã—ó$…Ϫº ÷LÏèE]y(HP˜¾ª°¨Ì`o…äµç—a¸0öØb¬N?ÖðÉe®Æ>Mîñ ‡ës~4÷}e>ÎÝ%ßþ=²ž.M¿PËÆ/u²¤~¶Â׎·mo6Bókvÿc;ÃÄøï9ÛËuíÊhºph·7Iùéø†6œ¾ÿv÷h¤–U}WhGÓÙƒ¨ msÚ»Ùë<ý´òA2×Þ0q±”CõðoÀ‘­@-ȳŽÈfywgºZ 'ø{ª@‡æ”C¤ÅCOèЈ k)úD°È½Ucù½ã‡5š¤f—v¥wORÆ}ÙÕUŽP¯ìÀ3¶–Lpœ[ig¬Å…%õ=ãß¡ö´í›CïÁ|¸å©ÀtèãóÔºeU[ç¨Ú ùØí¦4‚ñþ ŸÝ«"ØíèÝÊ:X[&öñÖ[ɇzVÛH¶!k²ý¸p`M²C°ey“NUãh»‘±Kýñþ·Œvy8ýÖùAå­Z‹áÎüc,vŒµkïjĿڧ7}g$8oÇîÜ` 9Ñ¥kŽÀ¯·5¾cëso±“ifÇzÿ¾û·ý‘$º_<5~äÝa÷¸¼S ¶Bß)™j…Ô Õ l†Îj#BŽ 9¸2ø$‡îÆE tï†~ýÖ:XÌžÜpÝŽ`øÀ•ÒÞ³î}×ʤÄb`ðÑ¿”Ò³ã]ƒ¼ÉÍü\øôÃ@ŠÇ²µçöî&)™t§¬qÈ:ûƪz’rÁ¥ôhD"z6±²Èî÷€¼à÷*¢Š$fœLœ{v¡òoãù¹8濉Þ:8 WÛܤkkú?¹æy ‡Ó9ó‘‡wEàraÞr#)#|‹vþ¦¥‚ùû˜‡´£ߎ°¯{Bkï«§bq„ŽU‚·¢{Föë›ÄG`ÿêê}ê¥XhøçTawzP«|Ó]A¤î}p8†¡NŒï=ÞˆÓ‚íîq‰ž8~Ë9ÊãéqœϨJ|^ˆù7•G¸¯ë¬¿îÚyÎë+Ð|‰Û¯G[ {¦è^ÅÉs^§Ø—‡1y¬¤¶xçúœè¥ï(•ƒ †£»«ŒãI¦‚< …‰&¨ mJÞKÎÂdÓzðÝx†}¿¨JŸsIºj.+Ò}=Ÿ¾ü¶ðeþ¦ÿ#tYprûÑÏnÕ…½ŽB=4èúÉ"ùü&IÿbªÐúì9X97R(tg"¼/²úâß 9Ë+ Ðù·~×Û·¡…\cýüÃþ0)¸wc_VEè5¾=áÔˆýaJìØ’à-|â öðßøú>ÿnó#—Ä9¤ÈÇü˜•Ú—üæ.ïÜ—Þ+ 6ìáš´tÜ—±ËªÆJÂcï*fa{Yö Ú'ìv_ ÿœâ ¤/çMïW—a¬ZïšX®sˆÐi£ ˜»å­ÔÞËðzâ{qo¼²ø#¯½ž„ö–™tBÍþ~¿Ûnœ[ël[œEê…üæhÉ XÚ˜è>TÒ¼~. Ë»jN@í–?9ÂǰçZ„e•Ÿ¤Óýc°±ÚWnª3§BÇõÓï"qÉHÓ¾âW%¥þR´°éÀ 6G‚Ã(¸òåü¦|W(j— P&ŽǾy9`{l"ƒ_”'=Y–å8rÞ‘’'šc \¼w_ø ûEhß5fÑCÑ!Y¤þï¹UŠË»¦…þjã%¸øf£>Ž.(òý„Ø{Þ÷¡¬çì}/Xj¿ù,ɶ?æaK;vˆÊÔUIžíçnºI`×ÝõŽ=‡Z)¶¬}ö¶ü¾þsõN'ôÿä×jñÏÆé„/ͬ]?°ÕES›ÝL>6¦w8`ÏQbNìò"öú±;3š ‘Lõ27ýª^ Ñß9³çoÂŒ¶f6õþ6̽–:þûÅGL¬{‘u|ê(Ö_É#ʹ, ¢r"T*¿¨kE™»ê5Ÿ7[ÖFÒ%žUþvF©óêqÝ")—ÃLM_o„‰–Ë-g6ᘚcPoGþ9328´¡·R¹Ž×¬ÂäòÕpx,4öüê]²òų=Ÿ¢`ÊþxŒË²P¿}È[}IOß$÷PÖ{ðÓÝ.i5˜¢qž¼Ã`±žÃç‹ä`Ìê¥ãçvDÂDõÜͶñ?8ÅšåzìáN‚>/áÄ¢” P?îÐ5³s÷JåÏ|]ßnÿå9ahhÿ~Þéfj˜µþ!qbÒîÁ?¶(ø¤p^MÀɇ‹3{ÞÆœÁÉî3,Ó9u0â½Rº¥$S›5T•¦ÝÚâR½Ž}·öÌë|ß/zÞ0 ‡kwa…vø6°?YhÁ¢ï52ÑW\ JÈ¿”;aöç’Ÿ=Ôþû’éªs‹+êBïVI¦óõ?r_Àº7IâcÇ¡n€;Ql,‡êÙ"w5Þ†Ž¸B?9§1’é…ÏÎ ~¬HÛAïñ|ÝLÞo¦Ì´LŸÊ;Ä`ÕõƒÝßö•1øú5¦ÿ}ï€XÚ÷ ¶é,ù4fb‹¬ˆ Á/nÓ]+y¿Çû0+¤¹;,Q”`x|>™v_•à¹Q[jpQ™`¶Úž¿)fãrï~W¨<3¾JfM¥Û ·¬Gœ°´²ûXÓÁn|„K®–¶0FèçôÅÊ÷­Fc`5Š·Tǧ“`,5Î'8Ž1IV1Ãêç± 7>Õ°êãã_z–~ÞËø£3o­ÄR–`ÑãbÃͱÇÿÏ ƒ?˜ó/k¸¼î‰³s¯©)ТÞʾÌþ/Yqªõß¿²eXWÙtÁw¾ Gò hÌh¿ç·„%cm‰=·‚ JÚhC˜-¼Úûw¸{d&wÚ%"õÀs~Çž°hMÕˆªSF›.˜ BækËp”ó'éY3Ø&Îà`M[ù³·Øvð¯{ËØÃ£q}§Øuœÿ*%UØ{x½7ÏUœÏg†ÙaãoxkÕœ®ÛÑÕÃßmbuƳ¡ó»v^–— Ì«þ^{ÚÏ]ó¿ó$¹®ï¾Љʿkl×ýu¢'3#.Œ³lÊ¡ÊGÆnÛÌÜ[ °¤Ð‰¹Ã10“KŒT¾‚¿H¡ýÁå@}Ö.eήKÐßÒ“îúæ&¾¾×ëKo‡òsö†Ck„Ýó»°xhùƒbl¡è´´tYU-GC-ë׵ݒÐ8Q³nFbFáFacìš““÷ ~…yÍ뾤Q%äÑOS0猯ÞJû=‚Imÿ VðXp¼{‚î.Nê}ôpQC’º¯n4Lzö©e½æPĶ/wdj}ıçÓ’¬-÷z],ÿ•ÖŠ·]´a`iK5~.—fò4qšíFð~Úk»nfÞ±‡ÁHν–?ÎÃÌÙ-«ÏX‹p$5êÀͽ$£àGùoL­½[Ö¤ÓÞ[0ñÑóiÍ)\-×¥ºý¾:ÖuqXû4³×H/hÇnDŸ1ç'(ÑGLms±‘Êá4M†ÂT†”.ñÄ€`Û§"ù¦ ï¾}„ —J:GZ@aÆI¯}ùØ[(òÁ=çvJíã7qJ}¦ÞÛVzÔÍv/~ÿzÒùg 2pèð+ÙÆnè:¥8ö«§ßL~Ù}öE¯ž§l•ܵï÷inÂ*Oå6&_Ž°Ø·g=‡#˜¤Øñãâ%-»¿_èüèIBeܺo|ã#BZaüî«\‰ Xy/ ­:W#G­8ÿ’ªX¹gñªœ^+Á8ïŽ:µTXê 5*m1„!©G–ý0u¬‡IsÜ æ˜„?=öȇ¥î]ÄægÛ`•¿íø“Ê·_ßêà+kŠÕ‡ÔÎ> Ä•Ž$l7&˜hŽï,ÙÖZó\9ºOÿn?áù'á0w¦L¥TÆ4„>ÿqŠmŸlBË!aîô/­Q;¨4iàןïé‹·}ÓOrÂÂA1Ž=ÇpF÷Ô¤"7ö:rÔXÎbí±`ZßaìT¼÷Œ ÆòZvÅ\Ôǩ٠g­ ý°YÂêÇM·z¬ X9Ej AËÞ_¯å`ÏY—µ[E•‡Ø­sfІY¢'÷¸OdíÆ†Ê Õ+uÿá¯cŽ£U$v½xø_bƒüH«\ñkÕ!ÙÆL”ð´²?¢—¼ºÆ~þ*ôá.ºzkÆž¿ ç¶ïÏ_ï•Á$»GŒ5Á8ä+ò¼l u¹[–•r|±ê5¥áʨqmõ¬m$èíS`«áÆô™¯¯6¹ûCõ€ˆÇæ $¨ÝZZû4?zŸ»°VL½Gê¹%.ö Q\J1þ¯WÀ¦t:\ì¥bq€ýËþ.!ÌyQ­mÀ½Þciö•Æoç ’kbhçO-ñ/xq?H'ÅðCšBÒñ—õ½Ýðy»Û œó4ç2Xv€ñÍõ?în'éݶ(ßAÒíßÿÀò·.Hšzï£âê™cͯÏ,B_[-f%=ÁŸ[:tÁlsµÝ¨‚46fdÍßÅZ/ͦ½¼H]õ÷:¢*…oÎÙf¼²3ÃÞîOMŸ±Æ™7³‚× ¢Ì÷ɺãà«’¶Ö X¦_l»9 §—:î©©ÃÙ;Âb¼ùÿ zùèÚÈï1lïÝsP@À »Œ;©4•'°&Δpºõ:¼ç‰fWÔAÚ;=JÃüËuïÝÝ<žŠSÔ𸃑÷×y®R;¸^WÕ'ÞÆíÇØî·YŸ(Á¸”Ôê­uˆÄyÑø“®šqšéÝ€ÇûI’RMÖ É'@í÷`£n5¤þ¤ho«†V/ýÙ– \©ˆ¦ Ø9&ÖüǧJáŸç¾}ƒ¼½ØíÛ°Ë­&¿)¤&ÈÔ“tw^_ËÎ2ÂYM¸g!;‹k™8­ƒpp\Êù·ã86IEŽšJxÃøê³ÇöaßqÁ<”k6Æ »”F=ìÊӮáô磰Im‹`ÙÈm~²tÄØ½¢2~¥®x˜ÁÌ5®»†YMØ-0¦^²C—Étú3B$ÝóW稻HÞgþÖ‚S¡ÐýbÖÀðÉÄ—³štW~‰èd˜çàjWO« k Îo49c®¶ÎÁ“›ž;àÐÞ×[ÎKa§z…ç+uOLí|,k`‰Ô’o6¸Ã|”󥬵èµâïÛ¥õ,LRUšuøØjóûF$Ô]Ëhâ~ 㬠…ÍI¡Ø¿iõÝKæ@M8 ÒT¡wÔdLH+;¸Ü] Ý« ç S±o0â·IÞ’Q5÷ãìe˜äãÎÐùH²mÿööÍ‘ ¤¹HjÆ[åâ›ç÷Søq"ëŸÐš\2ŽÅ|ÎN2q4Ç'Ÿ·ÄÚæ„ëÛÞCeôw»ß;q!á(£[Êfíø×C#匨_:tš5±éÀìå •g<ÄåÜQåòÜ–™y&”sJà¿íH}çy§÷%<á^Á¡¯r&­±¢P_U$7À/#Ížd€Eª‹´¶O,Þ»«§Þm¨áã|82>„ɾd?LRÎ/Ë?¨Šä˜žÚ"ý<.­žnßy –UOßH2o£Êý)í„Ì—%ô \ø[ïü)J˜íDö âÂR>#ó›|ü›ZÇh½5þÎ)l mƒÆßwè-%q¥ûÍÜÿsœdºg,^y×T^ˆ:úKãâxÓQ¥šT\îlÝùßM’¡£ãOàœöˆÜhÎW†¥âÏo¨’ey׊˜† Áhê"pûÁ2ØÓµµ·–`­ÖàÙsQ Væ—È+1ðûCè¹Êçߪêoññ&ônTÓˆ}³ÓÏænÑo•€aÆÝ ÚÙ0šW$¯&bý—lmË!¾X£_ä¶½› v„qï•äh‹øxúþÐøg80ëÐê³g¼Ì;~C± ƒå„söQÝž˜ÚZ,mnòŸÃQcºQ-’ÑA¯tó­Ý=MÅ×à"Œ6ô9Ïl¾ßeï\¬Ç‰ÎÝ{å+À$÷Eñ´yÈ‘º=³à÷ fž”O«Á‚Û×F^s#ìîjaó *ÇÞ¬Í3©¹*0"8¶ÓýIN{3ž¾RT‹S72ÞgßÄ–Û‰–j † 1+]²Ð{®šn^zŠùÿ–¶š¨bí.U1ª¨ÎDó+›r¥ÃÂñg‰wj‘ÚžpÈLBæ¾,\>x!úŒ8Oò…¿´lêáX¨­|øTÿGö©{†¶¥tgSrx–Îá¤ç›y±ÉøžùZ½ï€ ö„o¬nµ|ÕUO:^?ï3šã—ßOà’Œä¡£3å8zí_‹¡¯X1>7ðÆm3Òª‡/[¿ƒÑ΄‡?²¥pÀÍöâ•Èvjhã#JÕƒ"³Û&§ÓjÏü†¡7qÕ†þßsèÚ”Î6ºÅ&]؃..aC:qRöé |‰ïªHð‡Ô/GÃÖû=ù”¤èæd‚‡¾iäÈ?3˜— 4{GEU5§í.“”ÔçóÔ‚l\59àwácLç¸y5òßzΤíÍ2µê®×&Óµð÷bÆKÉ2ÌÎløçG#zíêg¼D0_táDá‡3ØcÖ—WHß –|û”KÅQÝç‘—ôB×Û€íò\µXõó¯,OòMXHvOš]ui}Û.ã€jÆ»âËõv˜G¹S«a `l^FióînµT¯‡/õƒ²Ô& 9m‹%O [ÐeÂÖ=»îmíMÆ™ñóæåâë{zMŽE³áäÔÚÀ‚ÉÒ2cñÍy]£Ää»js‚#©Qìõ«Û@’þo@¢HvUË=$}xªèÅhxäÂðò¬1L?:]ó›§´î(F_ ÚB³Ãl º^š³ß»Ê ábÎSQ1“B(sW¿4ö†¡òfvì®<Ææ& Å'¤6 =̃^]þÜà½0Ô¸Êû¨÷Ô×xG¿ªÃSÏmý#?Ì;ª-ßbaæ%%Æ4lâANåWô8þþ‹—O‡.,˜k…¦C“îLA|B7T×Bé÷¸8œoãïŠ÷ŦOÌ©g`²¹Øq™i†û¢¿ô’~Ð~œÃy59F/ÇÓÏý„þVÁI‡C’ñy—ľ¼£¸$p‘þ¹ÿ>œUž6e÷yMÐËÞ¼'ÏmTâP†uÎs‹3cƼ¶– P9ûKcˆ%pÒGÇšo½¯Xû UŽ«àâÑFýþ‚‡8Xvõy!ã2¶ËT7>V; ´Ï-©4Öû8b5ÑԘ¼Z3=~Ò—qßÿ¢4_T§˜±±T0E[WØ”´½·° LMGGq©#òüÿÞ'Ÿ óŒt\\ƒžÌ±;‚wà²xd–L½,ɨf7“ºé .Ôä?àöoƒ®rŽ cMõ8ÜXf®ÂPw+¡Âe;ð4k$U¥@ÝLŽ%$Û†–tÎMp±æÝÌÁ‰¿“=ÊïÞ»ƒ‹Qß9<'`<²^³ré(t§9¿¿ÿ(j¾6õÜ3…–%·Äœ¦kØ#Çug¯7W ˆ÷mT.ViEô,ËÆ‰?´ûƒ¡0Íš©K9"…ÿl¾ÃÞëØ•îl··º~…78µÞñ€U3#Ûp›/0éÄëIï S(÷<„?ís×Yz$ ºï~Ž5† Ã}YÞ ‘0,ó_ψ $E|x\a¹?‹¦Šß(¨*¬ïa¸í†‰h~lÈ^½ÞWÏJïŸ"Y¿?®És%™ljíøUŒKÒ¶Ú¾3#™/R ÷~+'™áÏ¿8ç_$³ˆÂªØ¤>É8-øOØÊ™äúrÜÂï}$.Ÿ (Ùð .ª'ïŽ$Y^x‰-v"95 :ö‘Œ+C”=‡WHŽ}Ë?¶%™ÿÜéê‘"y·o~¥ìK²6Q®7P!ù´ì8?Ž0¬³•o}åßÌ_e;—³u0âa±^¿ITx½¹œeˆ Ä Ën$8Ïp*‰ö.#õVžë΢ {¢~¼êëc ò°°4Æ¿Æñ¬…œ ß.ÂäJ„‡‘†6·¬ ¶ ^ÃE몇ö„ž€‹"ú#cHUÚþ’´Ý€jÕ´áðë8ðÚÀBéfP5g ´-·Ì1eM¾‰H¶l½šï¥îØ´ ï»uÐÜ`è^)ç%Ú0»uyKŒ^§Ñr'ì×{ +t9 Æ•5„ú.úÃO¸ÎÅÌVÈz|Ð m˜ŠM/œø‰h<©µdÉ6L<6âô=J³ÒRÈOIÅò TìÑ>Îrqe¾n£—ÜÖÔÏ7„É+÷aPþnÇ—I0àTÏ×݃El+_ÊÏ|ÅÁª¾°» ¥ãwá{m|‡aN°J<òO:ŽŸºQEŸ yîóÆ¿½´ ©úìw'¡:ôBÐ÷“†PRÌf˜‘ z¶hþ\± Æ>:Kæ{ìPÙVV1¿†w&/Š:呌”\¼—U ¹ fv×!ÓKx2ä$ÎÒ9ˆ®Ìàª*„}Ͼ†µÚTèÂX«95¿_‘€æŸçB¨‚°ç£úŸh†QÎêaKŒ4‚žŸï#b|Œ`àñ9™ÙM’88{!$J÷v–©_–ÁR­|‹‡žú°|åT£iGÁÞè÷†¡!¿g[œaÓÆéH³p–*ÇÂŽÙ•Bû·GFæ0$V¤xFôtõ”¤…ÛÁJ]hë+]q˜7ÉYÛ½Ù-žŽO¡ÓÍúˆÁÝ1‚rðxº½{(¬Dê¶]ûCÐISW?þ€¥YcG9º“ )"=VÎÁØë*3[0¶»÷±ó}ÇæP3ž”¼SÐç<6²»ú¤xl:mèO0ìLí¸Ž4¹ÓÇöìg…©žÍ@¥8wÎäõÈK}_È+»‹¤«»¾`r§ÊÍEšnït ¿–Õþ¬}‹CG„ι×mÃ-uQ›£Ð+…©£7c_ǵÒKW~ÂL¼›7Ë^5qù÷û?¬Ò_©~%“}OmˆS§^†@£VˆÊé‹ÐóÄ“áeÖyï¼{^Éëá\áæ±ƒL§zÛsq˜:~£OÊt3TO¿þæNž»c=ÿ÷-Z}RoCòþ÷Á‡wOÁ¤ÊLë—Ë50ðçíA;_n»±÷»&LŸ?ßÜLÀâ] ¦0p¹W!²*ºÈ=¡£½ÍМ-œšøÞVãºþtëþ*Ç×Ë¿î~Ñ~7ÿ@MR•oœÄ'( Q‰¶¶€ÖÞ ?Âý“`2%9>h¬‡ëù,¸O›wó ZÊA ÎþËc´ŒèÁYw¾ÃO2¸ÚÑ z?ÇùJZ2]šŒï·249¸ ™ÏRWTípZö~æ#Z%Ö²ý¸³Q~ž¤÷m:/∫qWGŠB¿àêÞ Sõ'l°k—žÆ1'(Ì_‰x_ÌŠ‹‹ÌÊGþà@C™_Õ^\椖ý³çs‡ÝNÝ›"™oxÁç?…¸¬·ïŽúèhSéÞPÑ9•5FúY_á°ì3ï®|7‚!oéKêò8´v]Î5®ô"Ø^JKc‡É[£¾‡Ø[`!Ø£‚ßš‰ è’É‘—%˜,—6­ûãgÎŽt츰—¡êU/]ÙÃzÿ(91&èt›1Š’ Gûó“I¦JíéöVAœÝ4fôªû%I n±66À‰ñt’àU¤}:umOf"¹š¤ßz]‰d»ñv¯Ó7’™öAÿÝDÉûó+È~ßNR­“&+#pyû…Š Îž$Ó%~¥¯7‘ܰРµ?q†]äÜ~‡}'‹ÎþㄽÏÜf?³¬,|¶w]vt[’Ü ƒ)çÞ%U´@gÑwƬ“Ï`^P3ÜÃØ‰¤ÛôÓ3’Òò†Æóéö˜O[Ñöbû}žÕ>a\´q¯RØyçv;ï`µƒvAƒÃc¡8îxè3#É:iÊ%LM!Y2Ób–ž@úåÇG¾Ibþåq©ÁY*ŽoQ KÝŒuÑ×Ö<¢½°{ð…+ëÂMh¢¨Ÿ¼¹¯vF8M=°õ¾¸QԮݸäUúT¯ ç,7k4½‰‹ÿÕÊòéÎãÜRzÓõ0uõ¿6ðÛg‡GCÊ N㈔èFÑni¬«1 K{s½†¬u&W=°OðèÌ—=ÒØ`5G¾ÇÒlŠø¦ahrèfl?@2)Ü{mÄÚ†ÓÝùC=D&'+x*Ïëá°Ý®n¯Ø²p[é.DßÉ8VCbýÐQ’W3§^eTØ…Srb§â+ØH:ÿâÚj{qÚø”n ÉS®z9A‡¤õ¹2~èœ8¬: }sçk¸Y‚±µ,@ûã{ìIq›t8“Ї$íÞc×µ¿¢î¥0µßм8Nª/°ôM­ôŽ/­s…߯äÒ¾º$¦Íä~‰ºPÕ¶ï¤Ìúâ!³¤¼µè‚,½û6ögÌ «úV~’’°ô¦-Ñ¿æ Ì¿´lÈ÷[ƒ˜¦_Ï·T Iåc3È…I¡\ ÛºSþùÔåÍ$‡!k72ó¿duõy'qæüÓé›ÃÚ$óïÎ*'œ$óÁñßÏðygËZ0uàÜphçàpIŸbuJMˆŠ“û—Nÿ³7Ä~ù•=uJ5ØßøE¸ZŸ ZJ3Å-peoÎõ°‰‡$“£¨æ½`/|ÏŒ6ÛF"Õï”Ì3Á"¬OpðÖë‡.kâHÃÃ;HU·¯ªš½‚]gõî)ª[éO#ÊM¤¾Ùøß`?Ö½=£ÙÏÝiÔÐâÀ}ÂØÒz NÜh©€©BÖ$åäågíÖž0ÒéÎ`ió›ýZ¯rÞOà gm±fï‘°[JÜØ$±$±ûÃS‚"KùZeue¡²2²üŒ”übE".MGù½Œn[ÊÉ~­0èf(Õ7„Î×áÏÃ>˜sr±|9ê¼-û ÒfX¥nq­Ã»ÇwÇÈä6íÃ^G«¤+טq•¹]JpÓo,ٕ׺h9‹Ÿ¶o?¿ÀŒÃ_‡ª/Žþ‡CÐËzÇ={gžnþY *“l2õZ0¥ÞzûÄÁ3øƒe¢"{ß$N`¼IÁÈhöŠ(pá(Ó ¦K ¸˜Ìú0ÃçÒr2:Kó±ûIÉË5¹·óÝÞÔÁ¾#˜wIº¼Úõ9|h7¾¨W¦ƒ ‡Çý¯z”0fsŸd?hÞ"PV!ÓOÙó‡ÚvCyl*éòtwíõd.^狃5mt¦Ðî½åÃýB¨i`±"ÝÕ`íÙr¾dˆ)Nö42–³…….Ê_šv/Äê±-C¿÷Öû'ëïáBƒç²î}]\nÔnnÅYmã‹k>$]ýìSâ¤ë¬°£òb½G܉èåøÇW¢mׯKÊ^uÄŽÃ:~M‘6°+Õw“+´Þ£wZ‘zgSkÝS &sàÕ-Þ@c¿ZÄ_0‰óC¼¿LÏÊ`ÓOñÍ—oâÄFþ9!ûb‚.ûﯞ\@mÛ—÷ÒÔí5Vc’XðÇ×=Š«ï×ùA¶•rV‚€±‰ùŠ Hã¹ïû4_ÓåJ«aÿ Ëæ'c«0ÏóÞôߣèun²¿µ-›”fdÓÖýŸ>#ô\'6<øµ?w«ÌTTüÑi9FÐGŲHj…µç)æ`| iúÇSx"•®n.ª’‡‘¼íüªâu0!4.NW@p ­÷ía¾x‘ÌËHÚ$ˆÃ$C™+cƒ ¬Œ®¾ 9)]ÁÓûžd†@ý“'Œ.ÜØÿñ–({`,Ì4õþa¾¥Ë|î6S8¹SÚ£Ã;›ÿùæ<±ÕÃé˜é©Q»{ØíÈ,ÿ¢¤§п6zEÒ7mƈLk\ ¤?‚S¯:Ä ¡ç—Ç` ïìv>¬öë96üÍS´îlj_Ÿ¤ûc,Íh%:yý>r^íëpÀF^Ö¼«MØ\óEó)'I‰þŽX˜Ç‘1×j_µãra†EK‡7yf=‘Yçé •W¯üÒ')Ì<›.ÉÂð±ç”?¡8gC~“€#xý¿— UPi±Wל´Æ>£Èr‰R’’ëïlÞp Æþ|ýmæ~ æ£ßµ¸Žk …!ü&ÁÐù»×Áë6–=f³X(: ËÁo¶È6ƒ¡¼4öÛ÷˜±ØçÃiÞªHìÿm“ÿñÔ(Î$\g7=K0¾}ºQ÷6jÏò6…Ž}‘5;ƒB1«zäd¤û ÑIM†‰]çžŸÞæ‰3 ¬Ñ.1ö0¡í¹qÛ[!¤’4±{Oa¨—ûáî ý0)Ѹå÷æP¤Å:±™?Üý{ÿÚ Bo·Ýg Ÿ°ä%÷ýÚ’˜tÐcîk›,¡°ª·I,¹ü÷äÀvIì½÷róla veʰó|ƒ-Šo–Æ>!(ÊlTf€j*V˜ž ó[m;Ì ÝŸ‚ß=Ðì…¢ÚÊëaèÿïd‰½i;,Ó¼*áb&˜†½d+Ÿ:ÂìWU½÷*I0ñ:ÍÃË:„`¸J6šR`)¿ê<ÓÄSXxœÊ'/ç­ìlÚò®auxUb¸è/ œò“ß_’KpÞ©¸ul›Áš•Ë9yšâ˜éøEc°0ñ¬ê…F¨®^áã±€^¶¦ô«’ûåç–4?³§s÷ñ2‚íÂÎÑ¿ù¡U§¿ƒWŠ ÞÒ 7W•(ãªÁÝw1 ?½:Z•euŽª%úë¼–œ2*rÞ“ \Me¶Ö|³~ßRÇ®êÕ­Ô¹\‚e`¿]ž¢%Ü{ôð©F2ëm3ÖÖ{dèÛ)áú ‚Ñ0ܨã$Ô×å t®ŠÏ¼_ïEß î]QÕ°rÎñ‘þx-vÔX´éÅôNÆ£a·á»v;MÖ>»|©mZ)þ8e.¬^bBÐÕMDÜ݆ÃÚ³Ò“>û °!àsË_@êU¢ç5%±×)†â"«:ë倣0R¤æÔ² :Ÿæžp9ÐtÑ-Ê¢û8Nn¼°d?-ŠÓ[R}Ÿ¥Ã€Ø²©^3 †*ç¾ð¤ÝÁù…e9 éQ m~w䬮¾?Ñ"Ðó»§Z¦·í…6ÅFRÅü9I¯§ÎCY½KRÎ|ø`bÛ€“)M)ß4:±dx4\Ü^•¤Ÿðd½¬„£I݃*`ªñ?/|\:ù®¾1ÓêWÃ¥Uw0cJi°GÄ^eÍÓýoÇÏŠ%ž1„ê|ÓÂR$°Gb·J[>PÅ8?;츂k×þ¾ ½ ;f6ؽîWí-lUë{oóÎï¸Ç «7|>‰Mêÿ˜š^Æ£·ÊŸ ce¬ ˜–‹Às£©mÓñ˜è¾SÉõsú”r O*OŠ^¶‰_¿Nƒ†CsmΫ4eÜœ~ŒÔoŠŸ)&ÿ ~â–µD*Ìëc¹¡4ˆc"¹;jüka‚kpó×kG`LµVÐùjìÒé"ã§aNÚ:ER! æ,çŽ0lËÃg݉­àòÞ£eí$… ßsõ‹pÁ%ècù(/*Ã5/ô‚cC­õ®¦—™§ è⨄Ã0úHâ¬4̳šÉßV 8›õ…Ó–wÁ¼rö4çÛ‚©ÀT¸²…¦bм]ûä¬ýóø›O°¤,ÛœµåÐ:š·š¸ÄÁÔtýõO6û^ɨ‹¼¥a®÷Û%Á; –'T²ìÆm}¢ÅÕhQ9ÖY¶Ò0Yqä]8‹#L÷ˆ²Zúð3Ë~X²»÷Ô`ý9”L·œ0< ˺O{C;ù †•îZw@gIÔÒU‚9D¹!ð1Á &ó6v7;änûЯÅOÒm’R;¶áÔ6ÃÑG8ËTÓ"›e‰k_øªƒÐàºÔ³Ä!ñ –"†„•KßÂ,Ûå«ÑÌoa4ì[zûí[Ñ/æÙKpµ~Õr Áêù¯åïƒHMðÊZiÃè&a_ý¥0³ùÊ=ÆÔR‚ÒÆ±Ücòh~£iª5@ëÖÛv2|ÖX»ù·Ý¿BR޵NJ¿h‡ùË—¢¹JB CO&–ÙÅ kÐ"›%¡¸6¾ùI +Ž–lÐôÝæ3ü¥ÉÔe.L+ªg¸z2V-ù¤Â¿Àĵ›[6B¿Ýa©Ôt‚«!¬:Ü>²<øñ¦wT}'xa4á_áÖ¨9x,qI)„Éäzýg]0qgßþ›Š‘PËža"#"êÎ=n°âVÊmâß í†?¾Wžp%(Ç?e-ÿ ïú²UâOÞØÇ†•°üäòâ;ŽËÚ?ÛëœWßäÆÂÂò=ü.Æ Ô˜¢³¿eÐßÜu¡Ä hûke}0핹ü¸Öúy[œÕyôË Ü¿¿ÉßËšœŠœš®Åá[ñÅ#`!!çIÚ¹‡0Õ¦3Æ`„sF^3B–°óÏtT:h•·ýþݾÍ^ÜõÚ…¦¶v8÷cÎ]áÖB}ÀÎC/O@½] çk öŸ+ØäYî… {‹n®÷{“ß‘Šn’îjÏò§\»Þ½µbO®õ?ø¤ò‚d£?ĺõi9äë¿Ò¼ýB2$g–Ûzn5–ë—Z'I¦Ó#“Ò+I†MüãN½çÆøºÁ]%ŽdÛ4Qxćj"¯ܲ8È_5t¿žÿð½ ³!–JÇ# »öMrºo°Ù„´8tŸ 'Y6zÂl¬¶™†'…`ÞÐ{Ÿ'þlÓ³q/&,;û6öÇŸ©†ê[–y!×~ªä?¿HèÖýæç@2ýwjPÒñ.ìÊ¥Žín!é™rc2ì°WüLN)÷V†©ÎŽž'˜Ì =ÔK¡Ÿ9Rø¿"5’Òž»õ“3T,íKL:kòó/ÜðOPú檬,ð™4Êè ZÃdä×X[¥ïýÌóç5c£~¾ÃÅ:c[C^èóüUhˆ¹²¥™fHõ¨“ ]zDR²ÌöîÁé'2¯0Ä`§PÚÝ='ëq~\øç Òëèè§…—´ ×I¨Ëìæsœçë{«ω=¡ÛNÕôÕÝ^h‘ kÞ‰BÆÐ"þv}Ýcg7|­ºãBÒ¿x¾o¾âÀ¯rŠù3$£bè³î\Ë;ú\ÇeË7_ê)°"†“µ7ü$ÙÅOöß-íAjÝ÷¶l¸¨ Aw]Öµ¶R-UüaÁt¾Éaõ$É< p5Ëá .¸g´=¥êãhÖý{»Hú|µÀ-ãß±WiŒü1‡8cÁþ6}–䔑÷.3ÃÉmß7ºñàl´‡ú‹-‰¸Êð‰÷Æ¿V,õјÛ~äaS”©[ïuÚ°eL£µ4ÄYÄæÉgA&ßá(ßo{¡Ëïë¾ê—z×­– ýú>2i÷àóþÇÚ| 8úÝ+_a>´¡ÍìdJp_Æ‚w0§Ë=dǾþs¹WüÊ×9’¾¾ëok?L½tþóé<,K‰?;ßcþ‰ßŸ;} wîÀVÊz¯æ¨™FYgîôÏeG$Û·>ÃÖš¦ZW;õíW¡9‹%ïHøg8=¢é\©ŽÍ›—/͈Ŭ³U!÷3³IJMYAÁY“"75p1ñ3Ïý´F’¾l;ï³9æjm.‘$¹Å¥ƒèK7“t»ùïšý•ƄûlIvþ‘í]fI:ÿû/œ=¨0®l“»½ ‡ç~ñT´¿ÅÆRž#ó-pÕ;þ¡è-g’ù¿Ö[×Ç÷àœÊÜ{‹y¸F,nNµ~M²»O­lÖ ÅùÒ6+íäü|CÎÌç’$t¥ó7é2C£k¶Ê•èS@õÉpxÛy&YöÙ4&uÃÚ¡öGZœ¾8wÞ…×ï«;Î<VŽÛ S§,³jN„àjÅßã­¦/q:ˆö\þ`IÙåýùéc¬FïýÌPüüò§MøžQœ¶[ÌSÚP‚“ç¾…p,Öâð³’ÄúKÖXÛ4ð'·CmÜK½Ån˜Û°¦m> JPt ò?Á수|q±6SøÂ¦; m1IŠ/"?ìæ>®¯'-1g ×ýVzW˜~bÖ»w¨[Ún÷º‡C¯rfÁ­›Ï`úAßÇo¿ä°ýÙlÝØJH¿ËÏK€¸GZ‘‰Tl‹ÉÓVÊ MÂ.ãõãÏ€1œ×-¼êÈŒã^Z)J'Vqõ6ýþá~ßgŽKAFW•רx(ÌíÝw&n‹!Ž›=iH~H²ßàéÀ™(?ò¯ ä…Õ¼±æÜ5²_éçNB9i(‹(®&Š]b9¬£â]? 9žBù½Å‹O“qÒ|Êþs47$Ì09îuŽ$™üâ":¨7 Ï9Þ>ö¢tíNY ¢áê ÷ƒ¶73 Ip·¼ñÉ^Ì“vºñ`?²}:ñàŒvݶM¹˜ç¾ž{ú ¦Ópv‡¿|í[!hÕËmÿ™,M2ªfºäzëá²±¾;Õ›+ŠÙ"Ï3\¢›6² ÁÔã]Õ_%ÆaDÑ_ܹ9Ê_y=ËÊöŶù§îþbQ8µð#½ùËQXÓ¬{7ú9’ c8¥±¢Çegsô- Iº8{ç_æ>Ð#¾zbƒ”<ô§­ˆž–~“Ož¸Æžï€‘ò,êÆ\àw~›ù7ÇD2X(|P®äd1§O2›õ¶(Æ;á@¾Vfá–2h¨Y{ÂŒc®}—ÐÃé*MÕEïqú îÔüº_Œs™—„]Z÷1ŽªøªØÃr8¶NuW6ó°YáÌpTRÛ*ÉP¦[ql=§ékžï:刳÷é®òš÷áô¾Ækk¬Húà_mOx H¶Œ¸5‚ÅpxYØ‹‰²û¢XÊÁðØ›äÍqÊ´àì·ïðhÉ|ßó±¨YCf.K…`2®×tøœÞQî vÐ']®xõ8L¿êO}²)»%‚é¶+îÀ:›‹M Ö=èèŸÆ³)od¼‡ÖÒŒ•ç¾ýÍE‡ÔýÿèbV±¬=øZfÝ/ìNÍg«­!q@ƬRÑ!ê>=´Qrž=¿~BðŽñO¯BXž’·mT*„ò#9[‡¾¤@_Âo¾ÀdX Ýzr>0ç«´™5:¯µg!EƒzF.þ¤`­ýVúC/–a-X>÷Jc¿Kâ¥Ü^XÈàúÊøþ ,™F›Ìü³·ǺM¡‘+=CþN´²4œô9§µî%¢µÁLga&1©]þ7AÊs{ï¡uα)×¥§ÂªÙgÆèß04UäúÂ{ Õôת"×ïCÈ”…Ç &cCÈÿ¶oÃÁÚ³÷N)™ÁŸÑsö‚níHÁƒ’,˜¹;/>Ær»Çfƒ·¤¬{ìÊ+í£ÑÃ0Ävfñ#v‰p*:IcçÔOzžc7­³Þ¾U –ªYW;q kˆ÷þÃNØwz £>7L]Ø0aãÎûvÓ¹`MÑ?¹…ìn®ïi÷ŒÔY?·â£Ü2°ã‘§ÀÕ/Xô½ÎÚ9ù«LîžùÙy§ÑÇë³Ìè±î¼+ wZrvœo9s'Y¬JVj?t?XîLíZÅé6¯oNAW‘ªþÕÊ$g|¸Äe-9ºþ¼=§ö¤M Á¸ê ¯ø«ß˜ºC•Ý÷Ó-lsÐê<¼ F㹺-ÃÅÁ Þß2¢@ß^yþ$ z¨íÛu'ù–A‡e4l{tÝ®jÜeTO0<´ûúçN0A·éY7«Õ*t;é$;ú<€éf —1s¤]Œ™wðÀhq’Ä¥' ¢—rï@õ¯ð–ÿÍãÐq‹o÷je¡³ô#÷žÝ÷ ¿Ôæ6êƒ/vê©ÌÂÁàÕ°öÕNœÙË·xÄX+ hXÿÆùç;Nú8³áÔ5Ù.ç|'|n˜=½*LÁnÉÒ§û¹ar«žxʰrˆ‘À²ìÒ—GPüp±âÝ(üugzapÛÛ”UêómJ¡Ö¸ëy¢žÎnåäaš ņË›ª®©b—DСÏI$é÷Xq\9鲚¾Ï7x\wý8DÒÍÊÆ½WÁaúêgŸnÆÉ–_å+Ø©æßºÉ{nënX= C£ÿE8㤚ÍËþÚ8ò ²m®rç»ËØ *ÇHÖµÎNEOȲà¢Ð{RIÿêtì/×_WbùÛK ß&twm[ÖúÜo”ß+½¨Ï÷–vyCrÎHNZ’ÁtQcÎÌÖ–NçŠÅoƒÙÜ$£›¤¹³<ùì®ã]c*?â!!Ò…ïŸ-NüøÅÿGQ‹¿¸¼ÐdÈ“àA$ ¹³dÅí‚!)geÑ%X²Ù]ß9A¤{Ã2â%±Ld¼*T˜yb‚HÛPÀõÞv ¦5ò^ë³¹]÷Œ¦ru™þû>’LdbúzYAà‘¾×„+ýðM"JÙ‹=bDNÆåÄ$["ƒEO<ŒÖÏV¥ë=8‹óß ³cß‹¾}Gž÷_úè8òxÀtP09cì>‘1ˆÖ×ê=N Ç*XÙ¬áoÚ•is>ØÄ*#¾ï•$æ\ì.x`ÛF8×AŒ°}¹½&S/Õ)g¤BάfŸ\…äqo¶í„E^³×fš‰ŒÎþ!o¶‹Ã\"ßá鉓8`Bå;xï+ ž›WYd&åUnŸ5 Ç;8ðÑ[ÃûŒ¼3‹´/›?PÜ Ô»¥ô+É@eË•”W!2¤ô>£À©åkºãGXRš ®:õ‚¸m„îV‹\'‘FÕùvùL®úðl6_‡öc!Ë»·EÁòýþ/>Fî@íÕ*ˆà”áÂ㟢téßqç™çÁüÚ¡å¦ë–0u ÆÎŒk·ûíâÜð†±Ù­Ê h²–¬Ópä²ÈµÁY1XcW·ÎoÄK*w ¦" `’+c“ìfC¤I÷ r©ÿÝþßóÁæf¯^»K¤grw5žp#H:ÞDNnL¹ù|êê}"½Î/wCŸ­º0Í”=qIV*Šb é ÂRé‘_}òÿï·ús集üw ÔuÙºÃâªÍ÷K—þß}~zl&`ÔøôOª¦¬›OÆ;?Cª"÷¥´J7Ó )É&2„Jœý$M$Põúò‰@j`ͱïfªI#££åcXên,føÃe¯.Õ˰¢×é´…îZLÑÈ–Z$N@G–í;ŠQ%8kwS¯Û-G¾T°CŠ¡nVA7XºŒÞIåaŸ®Ñî’­óÏ΋‹÷Djó§k]×>“è8ûèõÕ*‘ZßLOLƒ^=ª¬Ð‡L$¿æ0þaÏ„s\áIù«áؾ×KPþ­dûf–âXÎ1žÒæwDïÊ ÝDB¯k»êQ¨Ï>©ÙÚ SŽbVK%ùÐCJRžù½‚dÖ÷qéßh°;£òø/__X2šÓí’‚ ºWËJ ÿÁª«Yùà ÉŒ\tÿ³ O&¯ûÂb­KmÝ Ö_|-ÐJØ31ºûr.ÉàJç’ q7#,ž«t²Ïׂ9ÓK²þË`ýÁ;±ïÓ€O|HÁ0÷Rýç®?NDš ñê} ’à±ã4xˆ9`ˆRffiLÇ\¥W­Û‡N3(]‡™\Ýô?žÂüñËcM™0f¡ vv†ï’øò¶o‡Yíí’¤Lf¬^•c·‚÷A®Ô hë±¾³Pd5y‚# ——Uñàláîíw‡,w»\,¯ÅõÄ»ƒ›&aXúKǧÖlŽ4:dÆ¥G>|e¬Çâì'–;9˜°ïIæ½®Ž~j|ÜâÅâ“÷Glq¹=¤Uàx‰ Õ6ÂqD?aÿ8Á¾[lgŸÒ‰Aççîö÷gHνì%$–Ž{Þõ^¹$Ú¢(Ÿ·+½Ð1×Z±õÐÌç„ß"$ìî°-f¹b½±B<$ëuhÞUÔWÜ%Ó+J棬JªÀõj‹§Úܘkºáhbºë¯ñThØe˜XM†‰ã4ab[¹ÂOpNyF"åu’ qa§]þØn ¦ëÙJë;ßê—¥õ:"L^LÝ,aÇÑwS祃`èJõ᥯ؽ]0Ð4ЗÔ*.]øÇ¥“¢¹Ƙ¶þPûÍŒP‡'ª}ί@Mßpgt/¦ÔÐð>®‘‚ñH®¤>ØS?²|ä`î¿Ïܙ۠¬’ÈÏ•CIW”h|a²¬n‡ÓÝT$ßñ& ž3&2jr<&b­¶qäþŠN,þg-מK½³ªÙ'81½%©ÇÑ#úÞÔ ´ÊaÛvâØÀ³Ã0¸™¡±ß+ tÚkûþøÂðíoÿqˆAš íE˜Yô8m¶o¦ù>/V€nždq»J\À‡UyÄ-þ[^ˆRÿ:}tÏ ®U€:s¥×ñ\özZOÙ+-c/ot{e­!Žñìpù~ Ê_%z8À„é”yÕ–nºHÕÆÂš8øÕgÄ1%õâ²À@â¶ ;—£4ÿaÿ Iaø4,d{kü²å%öÛž¹¯zÖN>ø|?çýÅWM£qè û×WÖŸqò«LD‰9ΕeØOªÃ~™n·À‰01›<„Ö½Ø0 ?9½Þ:Ãç™Nâ¾¾ÍPZ CO^n„ŸÃµèÃ.­‘P|VC˜ügÄ á|컃‰&>»Ù•ÈŠKþ3¯M ÄÉi"ôˆ‰îÌÒjVÜ+l9ëJ뀃Æ"V Ûc¿¿ÇÍâ F˜2?¥QœD¤lã UU$2Y@ǯÐk‘#@}0½ïz¶zi|f2üKÒkc‡+qþ¡Èž?fDÜtÀ¬ž×H¬Õ×ÓùŽà¶àØÇÞ…¸ôˆrý¦L$‰°ô(i`»Îðïˆ?"ƒÙmƒÊ÷ˆÖÇòk\BÒ´ªÃ¨uD˜Ú¾¹Ü8Єã~Ÿ8+šqö¸k·»:¶XŒÝr²Ë!ÒŽF¸&ºÃÂãï´ª=î°® ø È ÆYV/„f·‡j¥:Ø+xÃê<¤¤¿2Øò¯«h+~ðÝ ˆW·ªàäk#z¿WÎ÷ÅOCùS”üW¸ýÚãèžâ_´?ýœD+·x{%mku"4œýÎã¨hï^SFEt}rMSÄ„DûÛ÷ìT´Lþr×0ß«” ûcïY±v4ù)‹Èe·u—8˜€ÿÞòÅ_tÂÚ™>ü?ù pñ·ÙÞ>zhÏ3j¸š¼åÛ7ôZ6í¡eÇ|z×ây$ß{!÷ò Œ;=fÍ/e†vv9ë­üüÐj[NÅ.éçB;EßBÍ!=µÈjcœ -­i].¦–ï̾‰W°©:(Fí˜P)þ´wo&#yoô ²ú6T¬ß³I$è<º8i& céTöòÝv¸Tíf÷qݨÁ¬®Ûÿç¹ûÛ¦ åP©Õ>“;0Ûg¸[¡ÜVïÿf5(çÃÕ¦]ó!†°ñ™Çg'cœ”­ïkÝ”ÀÕ½ƒ¹µ¿°Íö[ÒÍ=ØSãô]G ÇØFå9õo€ïâÀQ@1(6èh´ß¶µN[r¶ŽmŸ?ç&Œ]&oϤ]“–±—·eÀbô[s¶BD½!Žª3µ0ÿ·´þ÷3(b¨Oк|’H£ÑÝýßÉÏÐ[5b:ýò ?ŒŸ\¸s'¹¯¿;() /n„šå#¬™W)W§ÀÒŠtã¶fìft*Êç Óc÷|Ïj ¬?–.‘Ð÷²Jn{U¯ ‘FFóÎP€%‘濱Óî^ø¨¬“9EˆñÌ—V<:ÛÏ·6‚‹'Q¥`T—%Øâ“l†³üŒ `Äæ þ0—x"‹ØK¯ÙrX/{¯Üñµ Vl(—o½‘…^#¡½ƒ=°ÒÛíU T?“›Ìñì055º£Œ‰ûX_ÄgÞ7å⿆¯ @ýõøLé_èýþ§¬Ìó1‘° `Z+óAÚ‡nÃÄd}ÊÕOnÐÏŸÆPþ¨‘Hçe£[Õâ rÜò®"Ï-ÐFmÍç†c @Ý(.¦öæ+™l-LaNý€î³‡‰„ïs_+ßXé²0ò™9mi^‰£¶VÝ–šä¸¾U—Êá±²=âw\L5Ÿ=£Z•„T^÷ÝR!±Pæår1æn ¹´Ä9/[cgÿ- ­À0R¾ ^YŸ> ó<`ÔϧÛU^Ôñű†×‹üÇ+ /;CÃ<5(a ¶å'R`^òÙƒoÏ£aéé£.£ÞàòWY¯ñ¬®§†®¦´ÂÜÚ0LÜd…•“W#~`]D®ÕçÐ[X÷]µÀcMš¬fÂX‘Òü¬M¹ÉìoÛ#4L¡õÍð•–`3(cn¼RaD„ü}/ ífTþã¶ÃÖºÅ3ãâ 6Õ¾6Ý”xÃb,˧r‡`Ø$Dúœù-e{Àmµ Þ-¤Ípë3‰ÐcÁ#ÅÒF¢QÏ>à3솳͖ѷÚTH oô_DØê“ ¹™‘<\{ËåL¯Î‰ó£ù÷ëw‘p±­i¹“‡yæ|I¸æ¶ëTA˜;RmÂþè—“X[·yYÚEÂDfÈMbYXüjØéc †Œù_Ÿ…þ/>¦4ÃàÅ"sŽ‚ú¾Zhyëøæ×‘ñºA—ä·Y\z"±H|ƒ]{§e?ŸÌ…Õ‘ovòl…Dºs¢²ÓU†0+b>Òs "W‡ÜadLÇÀ)â9v‹Þ°Ú¼™áÞ. 0½hËãÅ Ël×9B]ºö[¥ ™Ø"uŽ^)¥–Ä@ß­b»ÇHþ"ê×<ÜþåÂÀð2é8üwÄ„DÃsB‡HÄ­ZõR©`(¥1÷Þ!…+¿Ÿ(ñEobîÛÛsUwT°Ë>—c÷äcèz Êpì.^Uú ºv©‚ɪË?H,Q ê«•0³øÕy0öÌ1_f}n@$ãìbµHàÅ£pùÐó-_ÿ̾©MËL¤+¹xañÎ 'Éü\ «ùŠÆJrLÞH¿íå‡cûŸ®ñÕÃΦs¶æòR0ÿhCø$ÿ#X»·—Aoõ)Œ?¤WηQ†EñC)Í+¨_Ê*/”ÇTžAgþ_ÌD†Ìoî]€éAÙßÞéi:üs’Bëo'÷[X›kWN¬ª‡ù[îßr·r‘þž@ÜÀO˜;yêFÀßrz×ik;ºå»_^¿í“KdH1È´Vº†•Š ¦u®Ð{ðQØå^6"½E؇&ÛmÐgȸ‹³ï,”¯«Âõp–¨µݰ<WªÉ|zÿÀɯ-aœìñ$ÔævÞçI/ï†Ù²ÛOË?É@M¯Kü yè TU;«“÷ §zY2¤z$þ†âüs‰ëŠ=ŽÐu¸ßqüÓœ¶l?ªlZ«¢ŸD“hZ};éWÖpÞOcdþ(/‰6»;o—H Žïœwìýp«Äoà4 B»É‘Ç>f½8ðIF =UÛ™¤ª:$±1p‡·O˜0® ‚lN™$”<<ùâ;.Ê®*샢ã \œXôp—uÔU‰ÑPVwö»ŽUï<ævk‹Cˆ-ÆÇ؇¤«þAìÛ>&ª×"Ù(w4r‘D'¼ëË-—vœüÇœh^Œ£FOwÊgqÔC kyŸ´½ybn­$ ÛýgêÈ)Aè±ô¬ËÑ[ߺÌY²„3#Û5v™:"•‘üàÚv¬Ùâëõllb÷ר}‹Ä’´™sì¨6޾/wùB®Ç‰oä'n Î8½ÃáÉAú!,Ûãÿh0Ë7´CÛ1.A;‡êêAN½\`ívÿ³$ƪkϸá²ýÉÜp96ܨŸ?gzlËwé^+~’ƒu†(AŸ‚ä§zõøˆ´ ¹«~æ±Àb–Åo3¶¡÷^‡Á! Ö„ï̶8d7ѳʱF)áå€Ô(”µ ÓSpn.JàŸx ¬¶ÉÓ]lÇfñ¯s§ºÍ±50PÖò²´Þ˹S4xz#‹[ÛoPq=˜ƒïãÁ^诖‹R»Ð##U«¶0ÕR㖉lj´'^Óh "2 ÓÞð·$Ò®‰uL âÈW?²F< }Q¸“¤ò:„žô÷`D`7ÏÅ´÷0rBZ;¸ó ôEÜ—~Y ‹#.“ÏMl¡Ú:ÛŒ¼£§$édd˜ˆ0\:âþ@6†i:‹ú±4@Ió¦ûI\:¡Ð9÷;»Ý.òü<#Ž# ‡—LR±uãÙOi–%œïç=ôÈ "~5? PÑæçñZŒoHô­»>™9í†;‰êGi¥ØûÒ]çVö)UJøNbú:aö&ù/ö'æ¼bUÆõ!Æ!§YI— å1 ÑtgNÎ_ÊÀ ÷mBÞ½ˆ«×ÞÚÒ Æµþª×¬é»¡EjàÐ¥¯38æôaxæ’+.–Ü“‘Þ…“ׯ_Ný.ŒÓÿ.$6Üù‚=¥ËýŽÙ8$»ëSm…ö[…êÊ5ñ#%+šlž œcò\Ã1Ò§/’2?pÓ;ãFçÔY\«¦±=ŽKóÿ½f÷ÆM ÿ¾›öìHùuŠhf´-ÿº«Äã¸D×'ìc>Ÿos©fŸ þ»7b T‰·b¬Ç#õT‡ÊµÇI0&[Z,³£i‡ßN`Åáò'»«èÕnMw$OÄÒ;~oPÛ3Úܲ ®B,°½ÌgØ.ßzõ©Cb³ CW`@ãø¶¦€@9á¿Û#&k‹¤­38Ât™U±ÿþ8"Í›„kd˜÷-¿°`™A¤çÑß-§k‡5Þ$rL=Ô&Ojí“‚£6&ŒÝûˆŒ$vezƒy§lÉQ8›M×¼*„ÕG݆i%ø¡O›!aî¡(ÅŸ$Ü“ƒ5®ÙrõÊ5XÏR…Ò[ëôÀ­:j=›þºÝ{­#oŽžû";´u¿WË[%Z ?ëÔúy‹ l¹!«žðG[†Õçx O‘¢júC>^™}¼œ}W­.§šxC ³âxZ /°_„¡gd¢†[ë:L˜ÛBp$|ETÁeë®Ùå^{•‰ƒò–ó=­RðãÐØÈOc!Ly n7¾ìG¢I8÷Õ¹ »é*×î©íÛòêØk·xÈкvÚü¯q†î ûyž„Õ•êV†½[Ü|îxËz\û"k(D EJÑîæk&ðoùuI§F2ÌŒùÉßyùÊ}Û¯ÑÁHK §äy"dЙMŠó&ŽR"ëóÀ¯¼É60?œ'°¿« –Ç.Ü?@$º‹Š5‰tÞö‹X‘ÞU÷Äd›‘‹QL.ðÊ /äÉž1é%æ3 åæza¾ÑNqßXþõ¼–¿¨_HÑëÑ‚ÐÙÓ zI Ú¬VÍ=ç×P¯ITvÆ@¤­Óy®‹êÏd+„ )Úxªö ÅªÛÃGˆô#göúsI@ó³ÁNC·` /Úð]ªTÉ›ß#Ønymê#ý§nÂÜé™5I˜cŸOÜÿ{®6KL¤OÀrkÑÆ™µ«.·µ…a)3É@õ¦;lšˆ’¢èpð°ùŒòÁû¦k`Úç€Ëu™ÛƙŸÚÍ¥Ÿ÷`MÄmÚ8Î;0g¸Qó¯ô2ôlŒpÔÇ ãš —µá®U˜9©ðUÆÁ'™ÇgØ–'¡®loÌM A ëE?)é€QÞ„«Ü¶ÿp!!Ãc{m4}o~: ÃÇQ]s‰È,p0ÅÜ ºÉj_éßDòšGã`£1;¤ó—'NÅùìJkù‰™‡B§CG³]Ú@ÏÌšX“„y–úáÍs?Þ˜±=Y²î.Û-‘ü›‰°V«¢D#k¤/‡ØÇ¾‹`C@÷ &wXiëc ˆ‚:¹ÊÁÊÇePÊŸðùÁïgP¿ík$³`Pæªø›>ÀÒ|ƈºü X¿{ÇÕÊ`ŽHŸ­[6½•— šLßåœüM¸p• èó÷}fÆ~ö—VÒŸpñÚ’ógœÀ¡ÉRÏçq;±!ßN_÷t”°ñØÊ×ÉCYD4Žm-¶d}Y½ c†¦ÎÏ ¡ç˜waš;Îý^±¹þý ’ík´æf¸aÔÊl°ùê œ[˾ö,U &V“«ï™vßñPêÊ Úæ±±UÂFý‰ˆ«ó×qñgœòª‹\˜6ç2à  rmrZN†–»<œ 3¬•§¨í£Ü‡…tçõ„*œÐ£ÏTnZÃ:»S¼9[þ°óà½û±y'þÛå%é4¬ñ{]å>ÿfÇþž¢ø’`uÎòŸcì1 k;LJ Ìß™¸‹27£¥Îþçe¹¿º€7‰AH¯¬tÆûyøŸãÅâO2wÇOÿÀåWBÈó¬ëèÅOîPfÅ9Á}Gåþ÷~Hôþ~ h¼¼÷zó~ì;§!ÃÅi‹Ôt–wŸMps°þp„RþÍ>6šÁÕ2§¯OpÝóK¡çŸ  Òû«QJb×d›3ìfÛêš'Vˇq®ŸõƶÔý¸ÂîÔV>ðÖ‚ø2÷uCÿož})vœØ¶XõùVVdÿªr×Ì…á±T“_i¾8Rx`òm‰nXªê— .;ÊýL»vD0~ìܵS §}g—¿Â…}ì©w‹{°7Õ·dæþ9«r‰¶ÿ]\÷Y×vþ{›DÇq’ƒíTxsÓkÄŠ´¥½/5·cÝņ±Þf˜<½íåÒÜI,ü|óÙ­$nœÛMÿ¶«7ýÅ%üNzÃJmOLŒ*Rž˜Öù"Y$_þz€7·E^Õ’¼‚õ÷ë.}û‰¯íÄ:>àœÀñ¶yeœöjE›¤°4š?äÁê¢Ë1„V"]‡‘.$­Aõq…A³oðÿ¤iv¡}g é°ý<®¦é>Ò³ùµ].ëëÎÐòѳdö®iÓ  h`îWýÒ¬ÔJº%¦ƒSÒtŠá×¢ íf9×ÙG¸ÞŸ'"™ñìÀlZõQt€1ÑÄ*ÍÔÇÐ,ñJ:ŽYÊ<ç=ÎØˆ´37vù™6·Ëe«·¨auðâO'ú%˜ðK³,°ú3±~]Wh»¶<ý~ýM\3¸Hûn‹ò‹…,°øT’¶þ`ôÓJï[TÀ‰§º $F¦ØïF˜ü´i#9!§O—é`ŸU'SŠA‘€E,t=³0Õ¡ôô º¸Í*Ž[÷ÿ·KÛ§T?ÑùEß=‘ÄT°ÙK±RÀÕÌÖeB•ÎÓk Å|x†súå;÷'íuì&Þ»‚Ëlü’†ôc¸äÛƒ&‘²L+äuuŒDx.ÎpyŸ5‰éK¿ïÝN~\ü<)\êàE¢£w{<Ë|„È…NIáÂs­{û6Hô^·W}^‘wÿ¡)÷d@Ê©îeÙ[.8¬ÂνñûŸ¨ô¬ècýP²TÎ}ÿ«µ'6•uuZZyBZ¸<45aLSId((æ4Ó~ユ9zˆËN.ÑV‡¢`œ£’9F[1cßéá}ï"yñ×QóÁZhnØ}È1æºGJ¥¤º¡˜+ípÀ5"®\xh Ç/ž‘–‡´ˆ@½Þ&í¼×EX ½®ÀEûUvq·ª­z>hjvì3ê•¶B¥º—Ã)o~`…šYgY'æîü0?)¹„+ôT.C–j˜vî£ÎQƒ.Ä]ê‚YªæëÌ!µ¼—óO¥ âKüd< ßTÜ(§½(t³o/Üt¥_okç!Õqñ·ÙfÖbÇ“\ -¹åuû«BXZ¹'Õà£cºÝu/JNiÖ”êwžõ‡9ÎŒZ6=DgýŸXm"óõH€y:žïy9¨JdY‘ørFÛvª/º´Z(côŸç>ÁDA³©òÅA"Ý®â‡ÝDzá:;>‹ç°êÿ5øÐËPXvQîpnž…Yšºƒž}z0"³Ùsü°<ÔVÓ&÷ß³ÆÞ¤šcJ`·#*'…åÇàoÍ-‰®ë…ø6ÁpœøP­5†ƒ¶[Ÿîy‡”ÂŽŽCÛ>áØÎÓ^wŽâ’ùdÐò·'$ÂQ©ØÄÝØÃq¾ÚÒÎç³~n[ã…”¿Ù|Y¾$²{Íf N3´Hè6ecºUEd½T\Öì&ÔÀ5Ñ×ÃVS‡±óëW/ò>!l8Z#å²ÍiN7UÄ¡~»›ÕUñÇÐtù…Á¨¼/‘ñãóõ¥ÁTLË6=.žÍú[í±-A$Öb&æCîÖ²-A“¥Šii¡;ô¿=ޫÂÍÌA;šŸ<ªºdýö,Ë‘‰S_ÂÆl£ŽÒxèÝ)ÒJ9C†é/1¬N¿™¡û‹ô|À}/ ¼hP€Rozÿ½G0]K~2ßí uòw¿³–ìãÛ ‰´WÓÚÅ.¾„å4ª£Tƒ)LêG^'kÿ†~!‡ô›Ù´8ªpâ“F;4Ê&jhˆ„™/ÿþˆêŸƒñ!á3Éô•X±”(µ?;HØ“xS³f>M_<½:Ê<Ÿ¯;¾•óµGmF|ˆ´/27Ïà ÒüNk; w\xß¿ðÄÙ˜·/ti¯`·ÃþË"ok¡÷ÏíÏ [ýrÌùó%̘%>¸+‡kÚ;žNuÎÂR™-çâ…FSÝëS88ÝÈy÷Ä)˜I׈^ƒ®çüÓo8…©qøàoî·ð.·þ c©Ï§¿ü6Ä–£O®5ü5‚Ɇ*e¶0x·áä¯ßw`ÝË ©Ú×ké׉x!õ¬¢‹ß Î/ [•±«B駱ˆÉ×®\¬©.ä„öÈøkÛùÚª¯_™Ã#ˆž —6:aÈUéðô1è?y+ƒå0'ŽE¾+Öü½Åó¹ ŽÏ•pÒ<ñ¬Œ;ަO^Q]aƒ² ãó™ý‡`æ´•×ñoº¼/ìâ«->Ü¡»¤Ÿµ=¯» ˜àJ˜[ƒ„ÿ žÚ|W¹7:GåOyÖ âäÒµë¸>ÄåÊ{«/ßJ ˜‚ÙRڇ˕Ðɺ—¿8ï‘æÐ— š-H]J­pâåÇMEŸ[!Û‰„ã½}Ñ÷·ŽW8Ñ64a }^‚g.y¨f¼ÂåúC¯v?7€ùò£´á<òÐû]x[ª‹´Ð»Ð-zA¯F¢b—=Ìn×-uº£;üÿêlå¼"çxh.|y›“ »×ã†n‡Fí08´?È‹`Æ¿­(Ì»+ÇêOãҚǺ±V%ö‘êâg«XY¬àñöÍV¿Gýá,ªž€’Sq·^p¼‘åg’¾?píâÚvvÿƒXëJΙ!Ñþû—뜞K¢÷/ÝÃ|è ‰à_m”Áµ€Kz?ÂUòp(¶´å–#.k:ÆDÅÚõd¢6اž ô;J"l/®øü goT ØMÞÁDvJ5…é1ôyùo¾äÇÜÄìÅ$å-Oh5ïs†Ù ƒo^ö…ùÏ.;»ýmaÖÊ(+u¾}˜_—»…Hôú6Õª>‰¶*š]å!'ö­¤ÊðÛºÃêÝàË—£¡Âر†•tçùgê¶}ЂŋË}7°ÂÃüÚ“/{p|HY/š=6)¿Dö´]ê•S/â.fYJå_,:=†¥ê¼ïwåc>#YQp|ªû~¦Üàu€Má‹¿šW}a&ô›¥œ ‘IHµÃhûq"vÉ]“ËDµ/¤Ÿ\"çíšWëž8¯A1ç“å$rÄ1Ép–Â"S¼îˆg3‰E¸Ù:ã%;n„¬[ ªSHêׯzhZÎídï» ‡Î1/ÝÝMÚcâkza€ÄPb+vw¦—Wõ·y1U‘ƒÜB¾PI¬2ov¹gmåÃÂS©ÁÛ'0Þ}[C:ôýgî¸çUR΄;æ+‹`÷öŒØrlO{W¤/óÈ»Âùé_ÂOc–æ=f•P¥—R`â1³ì'ø.òøãƒ"=‘YÄÉžÔˆákÇ9>zcÛž Ûn/ãâ=ñéV\<%}Ç|¢‡ÎMÖ\µ‚.êb„Ü¡7¨[B•ž®¶pŸ ëÕ?}þd1Xvû@ëÌ ƒþ–Z`ÞÌø³Õ &¸‰÷^’…ѱ>“c[~÷Ù`ûôé[ÐÛüY Á&®±ýͬ•å¡‘=òPèÔ¬%|GUéßÙ@ÕÑ«š ÿ}Á…Õ‡šÿ?0Þ2¸Ç·úô¿PÓ}W‡m]‘áÒ7å ×CÕÆÎ­†DÓD¸:̲‰³ê›¢&Ï`¸âWT¬Â”dØ–S°‚#œÜûĬ ô3)Øž~;,r<ý$EÁ‘”lÆ,ý8ñ[õƒ3 ¾¦2¨÷þÁÊmG„åE‡qÞú‚TÂóM ‹\ØÝÆ:—išà2¿À;Ó]¿0wjîií ç¾jÿYÇqg¸"ßÛaµòZIAs>L} ]°b„‰»ÿÚoÿƒ˜°õžR(Œ’öÊú;iA—P(¥ïa"Ší⽆–±92éóÅ/XgÌçI¤I5 šzË…ÒngWÿQ ÿp¿þ[X-ª¥[V%æKß© é`MŒªö%¡Ç0ÄG‘Fw7¶ŽŽÙ‰#Awö“Ni8KG/^€üÒ7Æ;šŒ0NÃV/þÙ ‘é‚®6I†TÈÍD•ƒHÝûa­3ý P–Øþêõ @†X} -ÂæQ.J;NaèÛÐ_ÏÃ`(é'ùcœ"PÚ y^½Ô€n[~Óô¥~(ú×â~¹“ýb9<̈4¿Ùºþ.ôÂ|`Èd¤:öê>–%rç„䯚L–âÆ¾¸Î ¡8q½gÞòPdí0l™ÆÌà«{_ ªB×Ëø¡K¬ûÀ¡¿C'ZæÚ•YÛ€ZY·IÔ½Ž”§ÃBÖ‹Ð'RÞÁàƒ ㌽–b0 o䥱a Ã$Ǿ˜ã*{À¯£/ŸÀ`M`¸4«'¯7…°¦Ã,™çuiU=ô:w¾kZÌ€u݈Þн8A'{šMIL¶nå•2 8;U ÇË9®+ –áØãÑ>æË ËMd)äë·›†ŽË<›©¼·0ËøÄ)ɇa9áöºÖæ ì«Ï/%åyXômÑbprí÷ÖL€e/±ï·;é!W€“Ó–pú%€aÒ^ VÂ;IEi°–ñzûXà’ª]xa¡6.\¡Uâb¼ ýšpbÝ…IQËM‰„5 ˆ«¥‡1¡7âty“*6¤ˆ"|§ÂW®)¸ ­]íË!6@©!×|/ Âñ«©E>Âm0âRô!b'}ézþcÛù«_çÝ!áÍ—"ßQØÝ|È%×Ð0O}”‘—H8H~øPŽ@¢å·U,ˆHƒQž³%ÇO'~duPOcó 6ˇî%˜Q°¬v;R ²wØev$ãø±°ÿ6-< j›&·† .}ÜsNW)âc„s?ž›€´=?DÇ´™ÊÕÓx;nÀDˆÞeÍ]ÌORîqÃñ÷q1ÚòÐÄÕG&lñóVûw0ôNémd3;Lü':xabóœæöV\ÎQ“>~tóf–.¾®ÆîÚ¿Ý%23ÆJ yÀ\Ce †ýu]ûõ~ËÞQê™·i”†oÒˆ Éz;®rCÛÁ ìë*Ø 'õ°Ìò+Ž~Ýp¿g†í2$K˃8ô.ú{“S/\T'Lµ 2Ñ-´à< m§ËæÖ"q†¬ØýÖ# ÉÁ'y5SBp]»™kWî,¶n?f¼9†ãŠ÷âŽýȵvò¦-…©”ËõÞ0¬Uí~¹5Üp™÷SPî +¾œ,û§{ “ü-2|AÀÉ j>6HIkvìã­…iimñÓ8*c?Å¡Œ”Æ”b ÐÀùµúz¢4Éé™ð|‡hé³va5z0 qú¼Å• lÛ./šr¥zZ$»cgàÊhùßõ‹Ðjýs³&uúV ‡«C«pVt¦5}ÿ (:¾=¦˜‹ß¯wu2ž´êã;‰4?zúƒO÷á|À·LZÈ=~v1†Ë-'ítÛî`£­ªá‘´¯ØUs`Ÿ`Ñi;nx`POê_š;ÝÁe·›ù&ès¤;öLs0‰°DU…måj|DåoS¥¯-{ÂÂÐ÷ÝÏhб°þ\®Ÿº­È_*Ÿ%,=´…² »ËÄ›"MÂù‚×^2…ÃØÝMóoéÌÎO¨ù÷;½s9—&nàlçCёGÆöÄ£î~W±9îѱ^}ì½0Öª¤¯ Ëyaü,b$ .äìw&b3Yë|Ö‡ ˜K&o[·0òªü¦ø·p¡ÛV”GË ×‹?÷™±uÒmoçÃ8Þ“Hxžá-þ!F0½ýÞÙ²U|ö™YÍÀ̆¦ÚÍE%XcLŠŒ[‚¡ûÚ ÛƒaÝVýÏFX_Z=vî6lÊ3~­l ä§ö¬vу°’ªCó_Ìœ‘õØv—ZûEC„ ëG¨þ飥°˜ÞW2˜ðˆHßòñ«(-‘%×ÊõY¥+‘ñh€ § ¸Šüµ'Ò¯îCSO"c Ó™.:#ÈŸ ûº»V ¨×þ=xÙ¼Å6öÚ äp(*¬?òy/’·}žáÕw¤½oÕÄŒ`j¾(ÿl˜*”ŒxiŸ„9ïþéQ µrK;úŽ'~¼®g9ÿ{Ü þ©r‹@gÏ7‘A18že·Ó×ò=4$ËŽ_±¸Wµ_ès~Ç¿fw}"îà„€U´MÿgØÐqøò~ck2,Î÷úÁ÷_üûi¶Öí!Ÿ_ë:\¼x,ˆ¾§ ÕŽF_ûÍÍ¯ïæ‹­BwÃÇ5K•‹@.ÿªÀû9ǼÎ\ÿ©òkªÒb^è¼Á²š¸À±%¸nì~qò£ Î<ý}Mþ-W\Õº6lî“èŠ#rÅKÛH„ ¥ôé­¼ÓßwS’Æ[r®ÌhgôâòWeß²]H¸âz_,7†Usg3pÊ‘­Šyæ7.ŽÇ¿{x’ÄK™îÝ)Db–ûûrÿãë$šRß]dŸ“è‚e‡.IàÚ¿ŸÂ£;ãpiîCG/¹çW®X}ˆ›äœŒ/*/H4“rW[=°fS<éã›&ü˺fàbñ˜È`þñnõ‰†|¨ëÖy&ØìÛ^Ý]ž€•ߌ?Epv4ôœÝå=°Øð–³þ»-ÔÅoÐ=ë‚eÎZî_³Ç`CÒW–´·– ¿™Îíæƒ9å}šŽÇ ÖAúÐm9"þÑéï1"ÐWœËý÷Áv·TØy ‡Uç}¡!#Ð'û}×wgåi–XáÀË[…¬ò&8–4óÖé’§OÚv£ÖoÜѲ‘"Ò=åш©"Òuý±ß{> &“n*ØS Ð×§v«®ËgK“c}pìÏÓÒ{„Az»CòÊBä{ïxl“³…‰_ü™¡uEëñ©EìêÐ;žé¡ÍF_Jëêgaý’ÝÍÝŸqxÃŽmrÏL6ZŸ‘—ÀÖ½\ÿn‹®¬šèhWßj7¿N›¼v9OÑfªkÁìŘÖ¯ €ÍùêXn.Æ(Uí+=²•Ä´SW“±O’ºÏ·˜^FZ’?á}œ­Å] } Q1 î„þÎßqø]Pk‡¡/Î1Ý6¹gÿÚNî+oz®G¤½+X«Æj Cbg‚E ‰t¥ï ÷<ÅÚû×”ÎÜb#Ò¶?WRŸŽ‡ÕwæS>yÎ@•ó1à½Ýé´*±[œuðòï”'PõY¨Pk&Ú*ò ÔAß_ÝÙ˹P|ü“íA&3"о”=‡|`ù´ï_¶›¯ˆ/¯ç*Æ·æîýý2M`iñYXØUh¶õ‹ â(Õä«ýšÁK@îV1W†ÿ+eI{`õ‡ÐëƒW„ úKUYÇ¡{@‘Ș‹f{ë{Îð6Îüîë‘Ê÷/$A¹V¿¹†¤ ¶Wœa/±Å}“Ѧ—xq&gMప暷H„þg‹ oϸþÖ5ÃüŠõ7EwÂzÆ|Ô.¡\,~—úó-LþûS7YqÑü}=ÿÌ`üpŸò7….íKÉÿt$öC•£¦Á´Íaè=ú±:üAlæYRuαÁપÅeãMèy.vDw޳©Vä\¡S^Ü2B˜)÷ê¬×ׯ0ãÕ[W êa”÷{Ø ðYøæRN¢e}Í£¥Ê¥ïÆ×£I0Ç{âÝÝßáî~(ûŽèöýˆç|‚íšÆú Û±5íOèo®Ï[9- ²ôÂ9,C¸caM-P-¯Û†¦NÝØÓVŠ3r›]—KH„+w³ ã S'•S'ƒD›H;äP]Ô§–ú·¦1ÿï‘ÿÑã’ÌÇÐÍÓøPÓrô?šû¤mO6¢c^á&›ö·ÉûØ?Ÿp ÇùöDÆgä9ŸÃ…—åó ŽH~cW÷~;fF ¨ç” íL†¯qŒAçMbz2vœ¾“ja¿ ‹/4ö\ûG„aºãsïpE*ˆCz‘D¨Q´!ÿáÀ•ûsœ«ŠI„œ÷ƒ[÷­öåK?›ì\ ;¨÷âêzZ¼(¿©Ì5†É+¸~ì˜ë©$-œm²>&y ›ë=¸Í\ ¡0áû±äóÞ¸ÊøRgßþ3$ÚKJz—ö­â¢Îží'똱l$þÀ¬–D{¿"Kʼn4Ž“%‚—FapxºÏþa7Ôgæw7<€¥9¾TgèI`ëó;Ç„KŠÎñ#`h²Ã\çñ$ŒYjÎY0Ì층Ý.ÔVZÛä[Ýà¯O9™Ä +¾ b!çdˆtuYc>’Â@]?VÏ¢§~~ç#Ò¨ûÏ<“‚E׸_ÿÁt()J4yt¬F'ÅžyL¤-yæCûŠÈ3E§¿‹ßæ;œ}8øÇ`¹`”f“K 6½•D/QÛa±S臈ˆ=ŽûÏ(Ti!å5_3aÖ&Îç+(Â…f+átíoúÓN:¬*žX<û‰Ï¦\ªbf0­]8‚Ç%ÆXµúíú‰îWúýå%@•šSôôÿ=‡u ›Îôã°}¹{ƒ Lè>¡ÿâŸóŒ=R; RŠžqå+–ÿíÜØ&þ–5h%ü¹vBQϱڎb1lû}ÿaþ,7ö%›Ï,\޼}hÿU}á³È$Ÿ;ŽìÔÝ!ZûŠDÏÒ^¼¤±ÅýŠëÛº’¡ÎøüGÞí7q&8ÊiWÇ+üñX¡›)ƧVî$%1ë‰i/Rl>|ÌòÄá[?….3 娣+Q?š¡êpªÑc-Ž$Åw¦µ¦á¤˜3Q È•?£R›,,G¥‡i.{±4)üWÔþ¡'OˆÄéAñº‡”Å_T&Ø‹(U $¦ʬùµvÍÕ£ýBô¸ßL{Gú-®ÜüXÖ¼ƒ—é÷Ûp&ÇcûÕœ+¢Hu Y“3’¤í/ÛixþƤ¯³ÜƒMœ8Ç8sç…Ä”5É$š)†Ë×2†™ÿ!Ñ:ìLXæþN¢)Xø’52¡·*¶g=&1Å1¦|éîÝâˆ\¡´agÜ,ñJ}÷Îâ’Ö¿=>L¢-êpçšØò¹W¸3›6˜Ýyeéa\<Ùíƒð-\–ÿÔk¤XKCç¬v»c×S/½«[¹ëIªúvXFÞʾd¸DÀ"9Ý÷Í}5DºÀSªÂsTÈMÉ^‹ÔlÄzZAç'¥ÐV9SõµoLRF—_E%CËt€cªë˜üS<»ùõ>”Ìëð…*½ÃÉÕëÔ¾Rù6ì"ÄÉ "Êdkì=PXí¬/5êó2l@µ¹P×ö€ fÝmÓÍ>ÉåUŒùz»•*Cz±Ø5~¼VÚWßXd<ý=‰õ<ÿyš à¬O Û*'=ôœÞß_^CÆõ‰z×ÈF¬›øþ³7 ζé+ªÆÖ÷{ÓC4Îx÷eØeã4Ó¨§ù•\ZyðùŽú d<]ŽÞÿ WÔLúYKbmÉØ^šr!åúuû²÷$Z;IžG~8fžÇ>ô›ç?gÔ­á#ÑL¿ù;{„K2ñýOÞCFö¦ƒ@åä.k¾“2†ÖA÷³µ|÷‹0 f,|æ{t¬ÍEŠQ¥âÏuÑOF3Î æpìÀ “Ó\KJX,´×®•÷,t§’þØÜÂ^½ÎjújJ­8”虉±ê¹ö'šÿ÷ÿ\ê?s¯½‡ÑíóWÜ™±}4‡ÑýÒC8¶NùqO«ŽD4Äÿo¿ÝB¦ìš»±§Ä¥ÑåÛAÊÿ–›?•sGü§¿”‚uXWi‰X-ö]Ñ-TÙ–ˆ­ùyVs«8ÑV)ž×¸‚}a,•?pÒi×"·¼ûÖô”z<3®Y–¶’GÇOÖÙ`Ì}¬ØL„Ä‹Ó “0¼ýúÍ­vˆ®?UŠKÉ„]{\­žŠ`z&Gb¹X÷–ž'ŠÊt§®HtR ÊŽF¦Ø&Š»¦TjI,‹s)®i.R]¾”†;«÷ÕAòÙ<5†T=¤´žýtãëV_†åüÝzŠD£[61´ î~G_²=jÎÕ¶ì?rÙÌŒó;ôc$ê0-ýÒߌW¥8럓ÄssFÀ¸ÈQk‘›=Dº=]‡N¸ÔÂÏð‡õ±~è)°¹Á"Ÿ «m©gÿPŸ &Ïпƒ²(Á%c>(Né«x…Sg¶?й‚Sw>¼î> íò´¸¨’Ðwjïyëò³8è •šék†Eùi Ãæ[s•…=}U|–÷O¦3 á¼™ìL©CHi/ô’¶Ã)žzs:FãI—.«³8ùÜ8ìKν &¼<§O¢™•et½¯ÃÒcÌùç(H™ü\’£à fÃ'Ÿþï9R•6Å25ìß{dºhÐ –"ˆ=×d!Á,F—+`‹»ŸWÞÊ<Õ -6ÎÊLBÿAë4_tE }h g¡6¥Xmrµšf^›±ÆéI$jç=º˜–ÿ‘¹)Œƒ2ö±ÚžÐlØB½]³ö_0æw ,Þ´yz€9çVx¦^nç‡øiæ4d‚«Öõ7ÚRpîKü¹j€qyá{ªž~0¬0”©u ‹®º¹5mñëhÜÞcг¡¹ßì×™óR0s¶¡Ñˆ$ˆSY‡…whˆà„ÕÆ…ŸQ?`Ó€K_ÕæçŽÔ8摱–¯#<óh‰ÀVu¦lø)v®ŒºŽÏÀü~×ÜýÒì8(A—$°'ºž_ïPª.Þê³þÔ[]áØ´øìd¨¼1ÔÚ¿q|õ¨ç„÷tè§ârTo¶áµË8ùáÍÌÕPsȬ褟 ®}Ø8lÌ81÷4*+ãT±áe52Û¾O½yÔ©ŒU¼ik{vrâ¸ÂG¥Î~NèúïŸY8ÌÏ™OÞ‰}QL¤ߘêùéf[7‡}wW̵ý×pþ×ä¦ \&yÞ®`’ëôüL„±ÍÉÿ‡ÕÞˆì3D‚OŠìkj-’#*;”½;°ç*ãQHˆýá—\üǬ_=)_¸'ºo¹î×€•Yûqà$c}öüóçC¬üg:¿”¨TÏ µøÏ;uýíëoPÁëö•‡:ˆk£ÿîj]§'Žõxq6}'nï›c»j IÙ‹½¸1R®¬ž§«(B.÷ª³a‘ºa’ðç t Wõ¿&àdà¬#¿†'Îɲå´~`ÿ™—°äÙ×¶ôûaÜüpu·(®^V=.Û63G‡Œ#Âq‘Яð¾e šÒ]òçˆVØst{(ƒ¬0ôE'ÔßÀF‡ů¹5±ñ€þát6òœ*é$Zbtùa©òAèI§¯½¹~GT1Iîéþ˜,ÒŠ‰µú_â˜Nâ$“òŠ|ú Þy/);áŒ‡Ç 7™*l—,l:µF¤£ìŒõgkξ}w©Ž ¼)9tBù8’u‡öÌ3}œ—Í~žüê%,¤k/š`aŸwK¯©Z|ÊWžáìkɶäàœzïTl´¶Œ$ïs?.Å áø“ÉåóÖðéïÏM/˜‘ëw:¡Œgë+zÅÿa³LJ×}×qaÊÔOûöýÖ’[µï…Ù¯éÌï50ó÷Ù“‹»`|8CþçÌõ”5ùõ©'Žwé="ÑÿSeŠÀþÆ¡GŒ8Ñÿ1ôñmlUo9'øŸ0tz—ðÿÀ u­),ž1k0èaó/ ¿Tü¬©ÖŠy ’ùé5°á)y¹ÚkNÛ§òÄè#õóóÉñ(CJö ýz7ØYtõX#±ÝUÞ¿È29×W,ú óÆ—àØÿ\‘dÙ’€=%§½yam ˜{²ÌþLê\¯eÆ•ÿè›7cI@¦3²9bO‚ÅýçKu`‘Ãά¼×‰¸ý~®Ùˆô'"Í^â:©ç,²±(ªaé¢×z€6 qç½ L;æGO„Ù·?äjé[ŸŒi]ªÃ¼î¬Co0Co)kî÷› ×éV;·ÀoX®ˆv©uñnò„ùï®Fq[üñ¨1-HV¶êýàR†UÍËéûw‰U?ë¥ÏEU"í)¹#ùO‰4¼MŸxïvÁR__w“ðg"ýG™Y4@•Wö=wˆÈ}Åä‚ÊI"ã!çì§»ç`Ôà_STg*v]éé?ø–zÙ­6öèÁÆŽÂi«—PuÅ[L2”f&Ê3}Õ ¥#l³ëª2Ô¿+J¸äòVE¾-»°bUÂŽ'3•°âÜï¤\ÿê(kúp7('[.]Ž„ðž³ÎüK­ØqÊ0õIŒÈ(c¡wÂSˆÈx¿kÂÓ ê™mÕ™J!,Y>hù2ž¦»Ø¯¯äb§Ä5• òo–§›ß9°ŒæÍžÖÒh|o¦öèÊuü·/ém“ ,ÊD–óÁ„‰ƒîØX.Rwû´•§Ó?…ªïˆ%ÒG“ö'kBÃ_s/qmtMð >v"ÒµbøÄù6 ©)^NDú|E½©$C"ÝiúßÌaé‹8_ÞÛCDÂ)ªÈÑwDú›§,ŸéØiuiuCˆl.i*߈4ïÕ("¢D&C¥*aÓ¿@.+Útþ ”Lûw¿¾À*ÿ€ñp*´þm06TKd-ö‰Ú—»IÜÆ™/}"'àÿí”Ò«ÿBl×p†óÞ`˜GvݯZ&ž†ƒ´K¬R8Ϊè¯VsG9*óG±8«€¯ÚìL=îMO?í¾å£ž;ø¿ž‚ö°qýPC˜ùöÌ:ì`öþ{—ï›8 µÝÍDe•(Î.ý7¹€Ôsœ†jÛ»q&rBÁYO„DûMö#ˆ# .»p:Û3ao¤FÖ;a\ý²O 3r/‘î^@†¦ çŸï=@úi‰Å³ÅôÙo§p´Š³ hÿ ™åC0 W£hóûAëî–Û–I8{(îxÚÇp(¾æ0ÿ–ë34åL²CG5˜õe½ƒú ÷GŠÛcýŽà‚æ³dè*üÚôíI'¶k¥LE„á²î=:Ž”˜“S|¬µÚæe  ûaýª¸ÚˆûNÌR¿ÏsoÁæîÑK—bol¼×¥!8ºÏwTô˜ Ò`Ûµ‚éÏr—ré× ü¶°›b ÉžQíû³öÀ¬þ/ï«­0f¦ùÌX:˜…d_a§kôêýW¯¡_Å4õÎêQ\.÷¼j{Z³,zľÒ@¬öp©=% |)Õ~8%­ñØ]Þ(¾eì3d\ØuìÛÄ´öq0¹¶Õœj»VØGürçÓ³W#ÁHXûsn9ÒÌ_š;Å5Ûßùœ×;n$¨ SJÊîâYaîŸÍøK›znbà"pçbÊ#$:ÚÛâÜyeBÊ´&,½ösð6…‘ÞͳiæÐ$!Yï4«1;¢¬‹°†žÁÓ»-”Ȩxôå ¦à–m~À‡ù'}d®ØP0N·N%f®Ÿ{O0 †‰£ûöÿ[ê…íœDíQ"ô,YšÕz…ÂÜÒä_ýÐÿuÖ#æèØÐ—’Ý?ýÓÕ ¡6§t±=ò ,Î ìP«âÂÙ£5uä²Xä¦ÕlŠÃÓÇï)z>Â9î”ÃŽÃqâ™/wÃ{蘌;1G¤óÉgž)Ô„÷ágÁæío&}ŽvÇŽÏ$o^!Ñ9ź]Úû—[ ¦Ó•q&EæbŽœ# ?e"f_‡QÒ!ß ¦f¿Ï9»çŽq¸ EÔÃÿm£:ÍGðýzúX}¢Œó±Ó{ï4Çrlw2òÁ²_EÄeî"¬õÞóYrmeÞï gÃL ±à?VDìŠûw#©Ö ðílÄ5 ­º®F‚¹ØAfK­¯oUaµ®uKútð¿'Ñ,*‡DžVÄR# ú<'Fì2 åým; C[ºÀ}²ê=pŒ¨³†ñã“ªï‹ø àÙÛXîŠMè¼xH'þQ6–á²óS„–Í‹í7ª›p“Áó·Ê 7ŒÚj]蟂փ·"PÞÞ—¹¼ü Úbˆkëã±yõÏ]Yi\ùØÜxW£Šk´—®òîž(íAXÖ {Ù mÕ— ͪia>ª·á §ÛV^}Y¹š÷ûÿ}¬:ö +,U? `¸<æk¾¥Ì‘8†+¡¦–— «; Ìwÿ4“Á•÷~lÝV7±±ðÈoW¿:èÜþvÈ;Þ†O´ªL€ò'> >•ØÁgyAûÞÖu´úÓæÞ¬ÄyiÑÞ.x9íæÒñ™M¶6ªâ„$Ý­pvôÿ$ÂÔz–úøwë\«cÀ ÎSbì^¯p‚rS%e §D]}]¨0¹‘öÝî–3N– v7<*ǘô|™L?ýv~l —8E°‹«Àš‚VnzÔ](× \¸ÏŒ-ºã?YGY!–’•`€¹SAñô_LVþ¤"t—¢¯²’¾–cþ½ý2^~…qû¥eÍe ºÖ¹ÑíÖƒ^×ûÀ©Ç8,d÷ôv"Ä­»¢GN [Gâ¼§lõücÞvåšþk\¡eX*Û»å[4I‹{Nü™°a¡JÐ𳿢)mìx?±½¯gSèž,oõí|¤×zÖ™;¸E(ÏÀœø.Q«ŸŸ°)óÑç¿X*{­¶#¥ ûòŸØ©ÐE¡ÞîÕ„c|z$‚G—ú§£[œx¤BKˆzŸó_tÅÞ*UO^xˆ…/„åTP÷!`pt€ ³nëK¯Ø{±‚{ç±6w Ñ#Ir‡®r³»ü½ “A¼®Ô5°XþÄ(5:W_x\œ?m‡]•Cúkæ°hp‚Ý4)òVÂú?µÐa³‘œú»Ì¸$ª/V½ÞO¤á›±7JNêÁå—ªÁðf4cл)œ-Ûøšõ8—3†Œ_-‡iÓG±š’HqJ2ý$GDò‹Gc0¥ŒõQlæƒw¯‘h¾Ói?{ˆµ‰c›Ÿ{`Â\Z&f8ð*hÿMÜñnÇx_ÒÇá{…êþ+ƒYó?!Ï] ¶òÁýÑIë>hŠsI螺E÷>~Z™ûg»  pêˆLùHl.”2·¤v}RÖø3ÍÇÃÞá<Œ,¼¯,Ö!î¦H0±¯Á‚xlçm•hœ`â.~‡³‹\˜h!ý¶öô‘c¿±õŸéþ]qáƒÎÛjÓ8·-цÏ" ›xŽe;‡byjdöCìQ#~k:­\æ»3ƒ"±ŽéËqZ"Œ’<ÊÙªgýž1šF˜6*Z!ÐF&ÒÜ,/°m:ëwRZÉ„j¬ÕÔ ù^¡pg ;J.ý ¤Å±£'™Ž_Æ©q¾îa-¨ÙiÃáà“ ±Çü¯+½‘ue» Sgv‡ISBQçUè é‡Fö·ûþ?RÆ‚S>ò8Äе;ýW¦è¿þ¡RŠý·ºÆ‡œ<ÔÀ%ÂðÛ£—îØìrÅÆSqKDÌw®ªÙ¦‹›NÁ÷œ*þƒ±Îz·…’Ððñš »P'¦´W¸w'OhVøËc…î·#¬ê)8ð ÒYï¡6ô0ún眺ÝuKÁ¢7œT9i£ùtFÜ—_†…aw—ÖÅîË-P¶ø&íÒåœ v‘Ù’B拱ÁõìÐßS"8â†ÓLëå+¼ Py›7¯Ù†÷jºúY{â†6=ÿZÀ™˜ýÌü¾b¥»"/^ ‚œÀ†·Â=póÜK†ãý8³ºGíÃI¨«õùáxÅ5.iºÄîç E×±>üb“6×uh¦þ 3\éÇ–¿öb·°©?âTrþEy4ICsˆWRöÿGƒä‘¸(R¼>ÔF¤=ÿŸˆí­†R˼B¸JwÓõ"lñÕQ³[›šž0bõR÷¯!bçö¢^7ü@édŽ=s%‡DûÐ,™vÐôÃß= ¾8xýÝA+23‘ök[Ê…gXϼÃu¿¨‘@Ç tôzF®w•z@J\ó}Õœ®“ôXðÂÂT9#ÁG饕Дåp¤ó‰Ú©åã°$‘JãÖØƒ-ª»T.îßò÷òM›H3'ìs k6—ÀN…ùæƒ#Í0óôª˜E·,=tû÷ì îŒ9îÿWƾ ²K †ãpÄÓöÆ==P[–ºÁ(¢Ñ ò+K0ö€4aí  !éM¢f[a.äÈÙÚQXG¿‡#ãæAgt<í´åiνú;ø¡9Jýƒà#V~—ÊÔßâç˜xëŠä÷°¹MBûñ>W¨Ô'v7'¸Šö·Žk½ˆ$A^¼Îi¯[Í8XÖ$–·õ=l¿¯Z¿Ajƒ…Äí/¶xÙ\l³ê>´ü—Óô§—*„µ×}5ˆ' »Ç0_è¤2øAçOSZ"u°Þ(¤5âð53§Í$è˜Ø5bΔ‹‹ì£4t=I«¡ýº[uøSéÒ3"'e›ÕYXÏYôtã0†»¦µ½C83Ý®þ¹ª fò6&ÿ\ÁÆ?3Ÿ…ÿÖ åj`ÚCžŒ¼æ,®N„nºÕ—rœ½®}ùYŠ5´ pN9•‡m8³3ŒŸAÃþkÝ_b`¬kóùt,\b]ív…¥K¯œRQ…Ôw$Šþþ(pKÛS1·,Êw¯åÀl›qUpHÌñç•̪CÓË™„Š”Xë+yäùÐûÚ®¬Ð$Öã@—ßvÚ((_5•9jˆµj¿ÔÏkb?Oœø¡*VŸ5þïX ö󥜢ÊÃ]Zma±©ö‡Ì8wX¾à²D Ô$Ä/¾©Á¥Å»6 ŠPãžÛ]R=qÎVwÛ0Jà´“·’%Nó~ü÷ßÃyhüÝç»p·Zrû3ƒÆ¡[¯Çe '? ZÇÊ`Å%zgÚ½zØìm±ÀuFOv¦†nÆ…›‘¶%ÑØ–š¯kÿd²tS.°;‘‚ôÒuw ò¦R õ1‰Hè1½ lÕ³ÓÓœfîM8Gôó1\? ÃÏÐ|»¨…ãú¾ãRÑ ¯•²íYç¬T˜|sp½ß(јö%ÚL^ú‹E&Ë $aQÌ¢®I»2É ê¬SÐp"÷Þý.öQs‡Yo܂ВA_^l¸ðá“dº"LɦÆïNþœŠ™†Þ;qné¢éÆÞ£X~+å­ñ{1`zþ˜¾õ¾0´í‚IO"¹þœ4ª\6÷ç_ó¨åÇ×HPøe'ÛYj/‘à2¦ÍÇÌŽåîd†ù’(¸¤À&+L‹¬*þ=w`œ%.^ªvË7%,xŠ6• ¶âÎ-%3eÿ+’‡ÿö>Jº™ŒƒÅÿ G 6äB—ÿnVÌ)lRË6ÃúcG~~[PÀøÄ)?zšºvÙåMM̧¶…O¤À:eÅÔ£ž0ü3#èîkqÄËæq^츖|qQ„Œ5ßµ‹©l±MIìlx%VþÕ‰ƒ2ÿi­ã¨!ÕÚ÷aôžMw`)y³7>nw|Œã97ŠôUù ,?bT7H :¿åÝ¿þ_*t{·p¹ù^Á¦Ûzöý…µÖÖÎ]ë[|hP¹Èn  í©…¦}*òH˜÷òÊõÄA//§î¡CÐ-}ÖùÈœ{þ·uià"ÌœêÛÏïK‰¿ãY5”¿@ ¾MkYý&_\jµa%Ò¶ÝÖ*ƒbúWQ\5(ØHzC¢»@rçŸ4%Ñ&‹RS~ƒqÝûý7ÑÖ•%Ø~’„sOy[¯Ö=ÁæÏ÷'œXÍaø;·ðÏ è*yäüSFþzÑHù¼ã."õ'K2¸u&š9rO¸õøJ“ G0tm'ö9”µ¡w¶ãÒÎóDløºMÛs´Òx¿â¼n‚óJÝ ¡[}åÁÉw°äÕ©%Ú4ìjaû«¶Ä¥R‚²-Ø÷h =vÿWHj»s>6úêåÊ¿0²¢bòèI¨äuÑÄìøãƒàÉt>ìOà™ZSU‚^±²>ö¨£†Fdc¯5­tù© 0²ò¨zĆ7ÞÊ+tN¹áš¥×_6%zìÈk¥Vf5aÖâ¡¡D‘Aó@Z¿" È÷|wzß`‡%úlÎMÊ&8egU‚³¾ßû²ŸW°—2%šËÇM¤Ý™L¦ƒp:IßgϘ±a’y_Eü[˜×Ö^Þ\Ýòž·oÛ}Ç6o‘¥¾Ê-ÿ Xh žÒ ±¯æ}ÞcØ® dƒõÍ·ÇÅXq&Ëûõ!ì×߸~}—&¬ï.¶s>¸õ7ç…Üt#Œ ŠÌ@ƒÅ5H|z¦JbÄÿhàᮎˆè]~%¶®"™âÅ}èõ "Mð ¶# ’³;m„K=Â<á0zÚ³ËZßz/­FÚ+jáç+ó?Rx8M-Øœ0q«$C¤Ž©òþèȈ½½ %ƒ· ƒ)Í*U=;îŸ;tL·#f²US>@ãKn·žà£øW_‚ùë70kåΰ®ŒeLD?qOa,qrÌÌÊ„¯úÕ«úAo¤0ÛÝÖØs[!`ߪ8õæw½Ä‰Ä² v¼›Ë»e_䰼DZÍçSlE€§ÖÞÿ±†‚¡f¤¸`˜AûÑ¡V?Ü,•/𼼆YÛín‰akÑOÙ¼ƒ0¥ ò˜~f¥þ†zÕÆ«ì‰OÏÀ`¡ÿ½8–$lz38¯>lŽ_"úiÔƒÅw²\Ú‹7 Ñú(›ú¥.˜¶¶0ôhì…Y½qõ° Mæ:]qgÖ½…³ä1ïˉ‡cOpúððýíñO¡ê€«v<–oÿ^ûAéü.ºZ¥=ÑZÞ: |X~ÛzetŽGw sj<­¿87ol󆡮E VÇ0‰­tDô÷qø}]Þ,äm+ ñZzNÂÒUý“²$p¥=KÆ´¶3‹¿e^ìeÆÕW¤ëCH «T2ñGÊ…^›;gp*•À|ð½+ÔžóŸÞâÂ`nÓêK  ¦©÷^É…ÙÛ !‡Ûƒ ¬$#K4à1’˜”½kX2€ºóäјªÝØÿ÷÷¡ï;5`²Ú3¤òZÌû¾§SP>É{„'E §^ÉrÐßÃ>3_.ïŠKÐ÷”Õ_åg‰nÛE«}!8Is[c¡È‚†iŠÃw áÌ['ï´"UhÊ Ñ+æàÂ~ý»!Ù$š‰ª±ÁÛÅà÷úhÉSÓ78[·/æ—„0,„×ä?»‘Ëïvp³,Åù°ñ‹e[<òôά’/–/É5w¥IÆ~îW ¶å–Aïäu½h¨ßtc&ÜäLCnZ\f®Û)¿aÞyœÙBLš­Œ^r–yb•vÚeGy¸¯Ó^y ²ú^[ž5‚njé[Gé4ÌŒc`ÿî“u›iOš= N~±"~ü>‘á/[ã·qd¶õ—V„¾éudž © ­TÆéRÎX¼n¾Ic{ßžÔÇÁÓá>M”Ø¿ó @;ᬚQOùU:@ÿè-sÍÓà÷û‡²ûõ`øxó€ÏÕF˜¿v¨);0=QÎÇpÛ ]m¼wÔ «ÛßÔ~ÿº­.2›@ØSa  çDÛ2Îëb^[mø÷—?ñÍCÛ ;TÇ$8ëqÚõì­Ço¡P‹  ³@bt«t\}y”OUJÆ[ir|dKOÓc–eåAft ð<­ †êMš“ö× ŠÌö÷{(SyÿÒ=#Ã|_½×$ãs˜è"– §¢Ï®m4šÒDÜPI¿d™Ö‡ þ›ǵû›5ybØ–0÷ž+aÂm”ƒáçcœkJâ9u–€#Žl»Åšw3­I/L>0™`ÐÆ¢+à ÆNPþÏQ/BЋ¹[~nåãÆÝ?lõÇt`NXæ]b‘Öà¿‘Øã—ˆ´×W§ÅšcåEy7‹S$¨¹àíÊ­N } ÔÞ5í;-ßÑ&@÷DóôŒ¾ P¤Fë&b¾ømU–žN轘u´Å×pùí¦ƒÉ¿W2ü^¡×¢dPÙð=ôç^RÑÆ´X¥VIYV ,ÞŒozŠ#¬3 ÇÚ¡LÑ[ÀiÅ3Õž?¯s&ÃÄ·µÉûw; {°¾[õœ*Æ(Jt ;Ü€^ï°÷¯g·¸íÅ¥ œóÐJ PÌÅ–’ –šP»1s?c:&ý%bì€áácß.«qA#]ò±…äLœx°û¿ªÁ "íiÝ%·óï°E:Ÿi§‘1ݤ{÷8‰€3ÚR¦ÛÄp“&Xq³½ËÞ‘x@^Áž =#ë³¶ÍÝЩïrß,Æqá‡õì•ß¡$š7þªœa†p§(8 )îFšÁèXeSÄp{ õ¾/r†òÁº×‰´”´Ðáðiì³t>d!!3cåÑç­; KêpÂÿ°Š‹Ê¾ÝÈGÿ{¯%ò;ÇMâ.£‰&ÁørÅû œ•öªnÇåƒÚE‡Å+q½$`ûtª ÙpÞuö3…ì{³yÏ̽±~ãÈ+ÁaèÆ–§f‹pvòt³®â¤šlVêùèC§øOÏH]lÝikBõĆS¼JŽCk¡þ-æXI”ÝÑ|Þ¤8…çw8Àì÷Ç?ENÃhÝÅœ#Û 13HâRâ!?l‘#MÝ7®ÁºŽâ;a…ϱ÷•Ù+µWz°¾>¤4D 3‚ÑŽ8©=_¶öé:ö œì”c…‰âlr€2F•Ô½?í®')8ãú­Z³ÖÎ8pŸY2rr Æ>»j žoÂYÙ8þý´®Xuës}å)lyöí¤Ûwn˜ÿ,bÍ T—i× éPU¶ã¾ 3,$Žó-œ ‚þ;ªok~(bâ?îÛá÷¬¶ø8`ÄÈWzÇèiŸÒq©>Tôb8vŸnxȇ_ôn„1ãçË7è$¼H8pÊÖNZ »s†UO| ‚ÙœoŸ>”à%ªU—±ç{—»x^lƒ²D;•÷Ï]¡¦øT;Ãh_æpˆ½…Tqùa—æm°@¸ìÙóävŶ©qûhÁ}⚆û]`Sy‘|òŸ%LD6²ã¬ ÝsΤN‡¿‡êŸpƒ|\`¿æÚ¬ƒZÇkþ ƒ”ÖÍ.ÎÄ^§7]'óac_tw6ô²ô±dí!5õ§±™L2nôç÷ ºâ ÃLJÎâ{\PVÌr½rË;r\a)„áGû¦Yä0gDQ3hì$¶ld|Z¹ýÚ¦df.,ÃÚšÚ0,l¿'q_ß&}ÜìíßÀp ƒâvèÄæ„î…ùÐsrA=±7šDS!¢-hªs-¥ÞJ¢Ÿ¬¥ýàcl]éœãÍ|ý?u>²´+¾¨ ÆÃ$P}'úq‡ ÂSæVY\i üPöä+~¬Öeß ³~ªÕ«xX¸.»YúB ÇKB—˜ÝÀ&öÒÓ{2-¡×R,ª¬ΈðÏ'*õÃJ~‰‚fIV÷ù˜uia÷=êUUèŒê»^ºå÷»Ø‘Rèí3hR =s'>1Ü冘T}Æ-HšSÓ-¥´á²DZÅ·Jh”—t7ÐȪüv¡œƒŸqðÅ÷Ÿß¢Ð·ô5JãZ$¶Çì|ù7ȼg2ß~Üšß4êËiÐ|m»ñ¹Æ "Ãý=$¶òxïXîMC!Ì]ïàwŸŠ‚™‹iûˆÅ8üÙ|³x}ÒÄ—~\3÷ÉÅÖ¨Ç@)y½‘9m“»Êžê‚ÑݵGÀ!þÑíu¥T¹?o|}k‰³¿„®ßg‚µ Ñ†ŽûÏ¡=Ÿ“¹ï. FvŒÇ*kc]v„ê ÿñY—ÌNºb%»e d‰8è^žUànt€‘Jîæï¾2ð瓘qã_!hŒ¬jv?…”¨¯å&—`tªÊ—l™³ÉgUaúCŸ”1Ç.˜=ÈZÍ΋JÇÕ\+A?=×=²ßü<Ë»¡S)„Ã.ü'ï8ÎáÚ£ BmÆægÁaÍÒÆ ³­á‡¦áO…¤®ï-Žx‰tvâ·Ç^ºÁ!kš0wÇ×Í'÷òÙö rêÃñ™ÎøL^-œé§Þz±ó6Å^xíÔU…žvAÐÙW¢¾œî}ÕÓBCi ïöú!å×X—1²°WvŒÄ e–«¬ÆÍ"Œœk¯xqi§‹oEèóîúÌÅ¥ ¡//|…qK­øî+84Ñmäø׸âZq)bÖ;þ7JëÂПðûß7ì1Ûí „Ã×o{að;Ó]béwádçå[:mØ«ã3öl° (¹vÙ1ĸؒòí©v˜~eèãÆqÖ›#0jÐy; Fr º5Â’±àÅ Ôþ­\Ï=éù¹Œ£âê[ó¨G[8Ÿ…š3°ÚõÀ䯂™ÉÓLãzúˆ–͇Þü“ÊÊ€¾¦Ó ’9í0v‘­´|ºœ-Ÿ>%N¤±ÖêQTÕ…æ°¾ K+°Vwª7°Š CÔ DÓp¡Û¢õ÷·­þWø`zYM ëmØf܆õ´'Œæ.;as•-ÁD]GÕ°ò‡ ÿ'] yè-æT½=ŒÓ•ÒŠb:$NÆ«ÏnëáLt[þd„}†!£æ8F›u%Fûoí‘d\õÆF}>íEg)Ëi5 ÓÑÁ™›«Ê%%X*ÏåAÜW3”¬—.&$¤ÂIG"«‘àÒÚÐÚ±•Ó§óβ}Àúýã´ì%HÕøFnxÝõ öÌmoð{ò„šó¸ölÚ\t½õEo~ÆO€ÉÆ›¹Ž]dÑ‘¤é tï"ÆýbuÈ +žýäz’`…õOíº/pº‘«3±­ æ÷Ç=­^Úóø#`›>Œ¾xÊNYþ ŽghŸñÒã§/öÒ®üœmºSʽ´ƒD€} ÿžÅÞæýÇ)Ÿí°ùÅáÞ«’˜ù:ºöÆwfˆy!E]âh„.W~H™ZLÜ5%ÎL ùï>îaóÀ–_|>!f^8öǰa‡*ÌùÓ‰öWn'‘õ§Ò_Üqwú× 5.-y^ [BªËÌÎS¯ò‚Á•ÅIÂ5al5yIZ{sÖoV=7(‡ZqÖAZQy¬)˜«i£1%—&®ÀrhÜuQ3"ýïùó&¿“€j/¯¿mÛ1½~v_ô«º±Våè5žìl\3z+ÙàdCŠ6ŸôØ9ø±î6ÁYsc(n…jé•ýváX—©Rä£öï„|ö{ï í\Lėp6øˆbžFt¾²Žkº¤ˆSMõ¯Æ¹ ðå³ø“óØŸ´ÿƒÐû  T«'䥎{nåð pœYò$Ú½zÐñ¹€s¶´æ–ôE çZ€Ô$i|JÖç—b½|žäaÛÒÁÑ€o®@^®:ŸF…ùó¿olrß Ñ;t_×»‹¯ß¤¦´^€Éæòšd‡Ñ2žº–^ô%irI¶šA·HDÒ—–dÚ»z ÷¢Î(Ú-ÿšeÜò˜n¿{Øþ4Ú¦ Ä å”gL-ŒI»Ù7˜Æ~cë&† h~Ͷ«>þ+ýÙ`cƒM"Ë9ž´Ãª±>.g ªwv8ßó '4þÖÄ(&Ñj›Iç>2†ú‚‹Ë [^W%_תs6#K´yk‘âáEºô̧zÔÔšÔÛ¡õù3‘k–0,ïíÑìáM¢§õ=kc‹K|WŠ+ÕLp ·¬>S|DedÞ{—;º·ê^nd`mÈ¢mŒ vè¼™§Ñ¹‹޼yðwŒÍîPç·†IÊédXk:äÂæø†ÿ3ìOYã‡îôrÃ'uÐ=EIfHÐú‰;bP®[㦾®?ƒ]ËŒá²jbPD¯Y­;Èç:__ŒMÕGF¡/ôq㇙”×F hX…í«_„åïÄ:]Äk-Õ]°L’}’RþœDãöŸÉáIèz5Ê’HŒÇºËfæq¬YüGƒƒ1P§´Ým¶‡>ÞÏ–‹ 8ô3öFe+'ç"Sì„Y°ï€ýáï‘r°”ÈrQE­ É Ë—ûl. î•wÁÿqÃDõà².¯#‘féšTIÅA-PËzȵÅ/¼ú‰%Dœhúœ§0*c4›å`¡AëIo¶%ÖF®$oX¤5³„ƒ|¶jïL:\¹‡_Õ94àæI¡ßóa¦‡]¼râ<6¼òµf?‡3ŸQË ¹q1ÊýAãy)(Mg˜l–.Ç9†wómÜÇpµdeoÜI¤düS}s}&O$hªZc¾±ÉxùïHÊ77˜ 5e«ý°Ž¥œ¡ÿUK¡õƒi©Hh)üM¿àº3¦Ý‚6ª®™æÌ®šp¬o>¨mxV¶…nêôx ‰s%¶HLgöLJ~‡äaη†|¸L©Þ{n»(I›Ê©¸¬@ê‰øŽ£Gì ïŸpÀ¼Ö<™[7¥0çeÓç¿åq,[„l•xoËGÂ5× 9­QòåûJì½~[Á’Á]¥ÚYÔC¯°-!SPFâý×þûÀ¿Õ¯»÷ïf?…ý:'lëÐâLÅÂG·€0øqètN-)<¤ùDÀùp¥ãÚ_NBÞ¾ísÖßÖѽì;ùï ‡Ÿ&i쵋[²…b°(>/{û\%–î¾+w#6—ÃÁ®œ ³ß¦—ˆïaªäDî°ú}i–¬pÛgD÷ ÍŒÂ$ÁŒWlÎ)Gšå°º¶6«þü”r^sò©0|»"\®w/̬=ÜT¡ƒ}¡Eå¹iðôuûƒ{ÓPèfÙº ãb¡ç¼íôà륪ÞËDÌùÝðæ‚n×Ι`«ô`W)Rž˜L'ð1)çhb{7׋¬êtÃDOYÓ§b>P~»¸²$E¤ÿÅù”Ö€ûÎÜ`ó †=U-øì.ú§³qJüñƒ~Õð¢Í7å0Ðézdê2L pIܸœS½-Nû³¾áf¯v†þ‘ŽzÉYî;ÔïˆV­g­ƒB¦Vu=~LÍÛ!šðÄuî>h£ l±ÿF?­µ/izqqSÿg°P©²,†6@¾GéãÅ èùòÎQ¯GÍ¿ ÓÀ Xæ‹k?-Æÿ.Zí69…dFsmÛ9PÚ¢v«¦æ;vºŒ\g'l­ÏÄý%ÐöüaÁÏ@WÌž&…¥•„ÅÈØûõåvlÛ>~ÍÀHGç”ÿP̉åÄ6VÅUh€Q•R×+¸JuŠI=¬‰‹Lg¸=jš€ò6èçåHæºùºwg$,zîr›’€nù³/E q<”ÇÙâÁ[¼sîà´ýwÆÚºz°t²,z}f/e¼2sЄ֤'¿§·ævòºO3™Ä ä=÷+Ž1¡û-¾wOg¶ºoÍÕÞŸ¿;f°HÀùŸÇŸ ɨ¦LcjÖùâ4Ó ˜qõh¬?üû >¹Gƒº×hz$.àäÜ:Õjì:‘Î"?‘åa4ÝJ¯å[ðÄ‘n§í%‡¡‹å·²·4¤Ÿ½jM†jMNs®çcÐ(ÍrëNo¾«%Õ܇þõ³â»_²ÃÆÕÿþ\Ü6Œu^µI‚÷¬±”ÍìÈýisX\ç[©¢“Æ|5\J'Ò6-iggï„ùÅ'ÔÒÆ!äeàÚƒ2Œè^cs²ÀÖ·¹jc”/˜ÇÅxËÍD —žê¿Ó1˜©ÝªOûý°¼'.Z'á.&¼4©ÛñŠÉÚ„¡š)e¬L«yrM§“D#¾öÍûÎh´L~ï-ä‹ÓÜoæ ”U°¾hŠRòIÍéâqñ†Ú%S‹Ñj]%« ¾u„>Ê©Wûö•õÙéwX»Dccz€ûþ8Ôi†Ë=›R:¯i·<`B.Âø"öf,ì«›pª>ãÚ«ß4¸\æ“=x6§O±ÿw¼a J®÷êb5´‰òH¿ÊDjRZ‡ŸQŽd0Ie@:³f½³1nÔU¶¸Ÿdƒ¹§dëHУ;h)I€…»]76°7{lèë°*ŽmãnÍ*†‘wQ+Œ:Š0ߟ8¯ýÈk}ØâÒôæàoäÉ0'½5 ÈöGžÞ„aÿÒûig8°ý?ÓËÑž 0|ø€Û]‰XåãνÃáGžKއ»Á´¦9‡%;Û»êkŸA_ôÝ……&ì—8ëüäª.»4òá”ÂÑqò¾|œPÙQj¡ ©Ås.„d9s²åaž9ˆ²jA5Ìȸ¶L b¤¸eUenÔo”+:ÄÃL¿ÃW+{Ø%ûñÛgÎ%HúŠyÿ…oèxÌ`Ía/yå{IÐZr¿Â)'é>ÍÆÁi«%I‘Ÿ|$º¹ôNód>ØöÙÉ·»ûiŠN†µ¬æiýo˜{€¸ƒR ýJ%'þˆÄŠ)z®¯{ƒ ÊÔØ­§„ç¼w¯k>€îÒòÀÝ=¹ÐzÉù©üðç»o׎AèúÃáá`:t~˜uüÊýžkŽ±ÆØdдO0à.PJÊ{‡gqÈzªÔ€×c—뺵p¥º*É>Mçs½.޾ÀÚ[/¥?­éÁÊjð¡»{(0^vcEì7 šRÕÚ 1içëõ‚o$–·®°_HÛûÂúYÎðA¯þ³ˆáÿ0ûõSëó‘DBÔ‡ q·0›Ú)™t]Âαy0fÐCùûu?¡PIÿüÞʽ»Îs*' {SMZŒ`ážAÌŽÌFèÑÝ;йÏ*Þ¼ ¤ér…_µ{ÔéÛq¬ÐMPö_%®å|kŠÏR…¦+ŠÓz "ØÞ(¾Ã2ä$öçXEÞƒéœ+|-¼ øJrgWÆ_œ´<ÙPÌùh:óx¨Þ÷ÛS„$…’$D)-Br%I%I"ŠPdOÊ’$•R²%ùÈ–ì[²ßcß÷}ß÷} Æþóýã÷ï¼Î¼æ™çÜÏû¾®™sžƒýÂÔY6?pf8ó¡‰Ð…î½ÿÀ—¸”Bï$Äà&‹ÅÁVƒÿ.Ÿ}^Åy/­*`OåŠMÍyRt£¾{(z·rb¢€¤Ô:˹ï6.íêaÃ"G.é²e¤„Ú»ú½¹‹­uï.Õ}—ýÊéHÎ86õ§:Çõd_)ïÆÖÙ½©-XÕûÊÎÁIùw®ï³üC9“ ƒ—¾ ö8?ºÇæ÷ ¨¤)ãìInžF>uXæº^b4¯Ž¤ OãS`°Ì(f8Ü”Hk{üƒM ýÁÀSZ+sBJ§Äj¡éÃÍ+†Æ¦Øz¹´YIH¦6‚³lÅû`ô÷¡ÊQ³rˆãžŸtýFt3ÏÒæmçéQËØU{¨·Ãþ´é)ó˜.'âx¨Å¯±lkèýU¯ƒ÷QÌgUÚq³çÛ›ÿñ]½tÍé’j)ǵ`=¤ëöY]lYü¤jºç ¬–¾L× †Šæ—nOˆTÔ±Ô«°XtðÓýjZ¨­¼ïµËªs;™ŽÛ•ÂМ ‡$ÅÎŽ EhŒéï~c`šýiôáþ–i|°P5Iåçî… ì>ÂIïÏ`Rñå†o­ wƇ‰ƒ­¦‡´ÃûT°ÃZ•ûè-¨ïÊi1rUÀUå«[ZCDjƒ÷Ÿ‡è°/*7¸û¼6vå^9ʰm·ˆQ¾¾XÕ2¶ë¹i%Üzã(#†"…%¾´X3¶îô· Ë>àÔ½fš–\eØ¿:p¤~ºë˜Á’µÇîÉ~WZ‡:yÃÒã´Ì¼}ûq;‘´¢ Ï_X2>ÈCrcíNÍ%hº|­PéÚK^ŒPÝRƒÑwåÙã{ üSN’nái¬çæiØ|™‘¬*Iwqíü_±Ü˜ l hóÝ ÊÂÔd÷‚îÖØž—Á¬Xù:l¤Ãûa¾7ÛNøyLrŸüwÀÊsE'OŸ…ö£r’³8’¸»3éäS;øÉøAŒ‹÷(ÒThA’‘G¡k>¶ÞÙ'þL& '.æÊ^Ú‘x>R'Æ„=¸­*qœï`ïx’XMnyHáôrˆ÷°“34~V«- â`ds G„ NýÉ¿xÚ{IÓ?ô™…«AY—^X<ÜËY‹JòíÏ;raæÑü%\}n{QÏc»¾é«M&Sp¡î!YG| úÞÛ5ÇŽB±B«Ø”4º&â´ÏIÀüøÇ¯ Öal‡ÇÌ hÌ‹öé¾wÇ×çõŽÇÀ„ÙI­ùÖrèü¹ãÙ¡!ì“ú,£ÿ©¸ã3Cb\¬ô+õëpBzï¼Õþ88‘dôwÁÆý\, ›0ãÓ®ïw/B ¦¿^ì¾:¬ø#¥·ë¢n*ܲ`üÒsýÆw7öÀ‰/CÞcß­¬+^;`µùÄÇÞ*PvâÐñ·=]YóÌ,‘Fº »BÔ_Yn{¥Ð÷Égp¬wÃI;r›_¸¾Þ”yá%ƒ•‚¥ÙX.ÛÑ}×—ò%~»(auj¡Ùr×Û>ZÊÀôÅ9ÃØlÙ]¿Gó@¶è—tµ^LÂR)jMîÃ-ØÊgŸþBíA«9a¡Ê˜HõÂ¥ö6`ÛfŸ¢á®T¼ÇÃ*Bý–ö ›¿üŠ£i•ÿ¼aƒdŸÏúã!8üúçÚÒ­œ²Ü87>yP˜h<‰4ëk»ÍÞZ`;Kæø˜îW‰9öÉþïn´~Ëy³Ã?+y¬ôpÃØ,t¯ìè_næÑ-ÔÆ­×û{Ô“qxÀ”(ÞÕuc×þF…5`Ãk’&Kö;h~›~6ç°¶k²®zÞu‚Z+«³é?~á¨YÄ)ëgöÐã(¹z̤G—[Þ^=F­ƒÆNBªzºG æ ažéZ7•&´îºò¾ñy&¸J-¯óqÿo_›VëØÀž8þ!½8Oλ–ýçwó''݉åÔËL`°èÝŒW ÊfH쬋ØøâlPc3ŽeÛ/üeè¤ø¶&ÝúˆkY¶Ê¿±Dç(×dl*ν]õ/ú-ŸxžŠÄöÀƒÜtžšØ—×.>ÄE7›Žeîºß¦Í³sõ†qì{ô1aÏ,,é}Wj[„mÕbÒ¡¸ýýñüUé[“»’E6èÔ _jÓŽ£á8÷ÉåyeÒ_û)£C80³+.ÿV-ÌØq÷éå ÃØm&ç‹£§qèC\щ4øSÞ·H⯃sÖW¤kIvþÚÖÉáhXÓLìY‹¸u3mjG¥'°œæyâ»R̸¨3`ò¥gø^¼6¯¾ ã‹cºå!ØxIˆT -ŽÑ‡S¹Z0µØõMv9J•NîQüOú³ò&BÝn÷Ãû^GÁD”Æ™@ì(#ïTÀ–£²‡é.8—øûǼí‘Jç²Éý;p~Š",+³Â¹ïèÆ Ø:Ÿz0¶r¦Õèm^KcÃa«¢]tÆ0…q")äj•N¼=3…iüúD:\¿*oï[­¤‡üõ¬«a€áÈÁבj*þ3&90fe9±¶Ðˆ ÿ¿»}[æÑåw“ &;Y$*Z[ï¦ëd¶`ßcêççàë“¢×¥]pÒà7Ó¸/¬<×.˜ľ±=n»máœYUÏ¢4ÆD€†¾¸'ì_NÎ'[ã–é”ÓWÎüÄ´>9B{à²Åaì§„¾ ëa•Y< ïa!Öø¾Îr% ‘#CŽº"¥>Ø)†;ŠÜ”—?µa-…îµW–.,½ÞŒ?€ä«”Çßm°í•GâçЬcû+vµ é—¹=•Çù3\Q2 HÖ.m«Ù’ÀI£û;#9j+A¢Â»É %…ÎWaÒGÞˆûIƼëÅÕ õ»”²ýñŒªÈ=Gcj“¢ís‰´É Ô'=BqšÜ(z€# &~ŸNåúˆäÿ²®Å•cû»óGÅÿ ñꜰL| lˆ²5Á¨”çÅY É·Ÿ’Õ\e`%àŸ•á5XœæctÛ`Á± K™Óô§±¬éhµü(³]’¿™Àó;}xËn¯C{åø¥¶óXçnèA6ƒ6‹¨ ~™¯8÷“bQ¿¦ÿKp~xó¦lí nÜC½Ui~{ß7Á„Ðuñ­'ÓØ,l§}%ð6bÚ³ ªG=bZ¼Ç„_ Jú˜½¹á\ó¿@uìÖaÌÇÁþÁÇš pÙ4¤-•ÿ/Zyx|0ù4~soÕª¿]OdÃQ¨MÌ "fi9Ž^”ê¾>,îñÝ,j[OÜžX×øbyƒcxð“3°¬ð««ƒmçË"|wñ–à¼v`ã09 þ-|;̵ “b>ÃuR=XzÕ¯†‰Y†'R¦wÁ\Ú3—ï$°á\æ”ÿƒ½8}ùÜΚ8þiÇMS 5˜ì7NvÇ›\¶‰ÎƒÀŽ·_ý ¢ï¿å˜vk,½y ëÞýO˜ÿâ$uéKhó?ÁòÌð7†ild™ Cógšè‘^I8»ç‘a :Âb–·„¢œoR|Ò¡ušå‰9³t/Ó°Çê‰b5ñü6V~X¾’&ÂOÁÙ¯™ª0Õ)Ð÷ÊÖ©¿•†;ÝÛ"àÔM7Ël^Gè(hþü·Æ^²ÅÙãÍ–Ý¿NœÄšÍ°HW¥î“äøs çc„|nUÆCsõÚºó¥}°&Äq¥i·Î24|´÷ÄÖã³ô¾× ˜dûAÁÈÐг{±fâ>œ($Åi®žü$óXÃÝöå4¾²Çù>7—gòç4rçY”h`‚*÷ßæÂ*ëzXwì°ÅYë ?>?| Óyý#qNh“]oêlÙKf?:PC¤™2;ÏÕå2N%ÌŸÛñ÷m t)­.Ô]"ât3ïë¿e M𩬬Œ}6ŽÍ/£h°ÓEM¶~ÛSzT¹â7?½†åE‡áñKÙ˜øÃ…Ë3¶>V‘çËcîɦ{aÁ÷°‰õ2º¬‰Èœûþ|õ]¬ØiÖ°8ü¬NÒ9ŒÁ‘ÅÔ‚¬èwç>Då# .»XbvÎ@:…ÃÝõÑ.­|Ñ„kÌézÜo‘x1fϘ[7¤]“èÕ(ÒÂÒ²ê¡ JX%È‘$vI‘_(~tP5tÎÈ@y­0—MG»2p ç@QK5*ì3ŒEšØÃBóz`(¤îx¨¢5LÜr/.à…Vaé_Ò’c8[>VÓ²OIdÇèŽCñRóð\0…æ¿_¸1ž†­C´â†0>3s]Íü.Ç™šµša“ʘ½bÌ%‰Ù¼´R=§Ø§;H2,oOGœšàÿÀl‚+:ÇÄtÈš0±–ÒhLÁ¯$‡/Wô¨¯&œ´Ý»=ަ§µ‹`1áÿÝ7U©Z?w<Å‘Hû͉Æÿ%S}Ù]·ÛE®¥Ïa ƒö¿û§p¢]Js#’„­g3¥u%tp%ó‹¹?ô\`Ûm9JÄÙ×7”„ÔX`®˜¾ºôÅ´®¨>V.ƒ>ÇòL‘ÓÓØÇtk‘…+‚@ûú¹mÓ”HmHÜø;4‹ vc#¶vX_R¾M]ô0ìa6Ë^Š#Šm‡=ýÛpZTTóÈ iÞ¨ÿW…cÖßÞùþüƒóÅqwæä1«Øµó\²?Ìn­Í&e‰´ßsõ=€‰§ýu‚‚H¦Íw7b³„nò#á‚ÏqíU‚¥^H9zHˆ½!Iš±zˆLEd†œœg±lÚ®°~Õ›ëá£U(ZjY­þÇŒ”/ø…¨a“ÄK’­þkìji²ßÚ¢d/RÜ#I"U‰«º/C2ö)& ¯ÛZÙUW/; :–Â>}N¤þûfê‚ûš&|³Ï ¶¤F»O¨c­J>gsl,§V<éæ”!RñþÖ +–Åà§ïWm×ÿ[™*ɘä²Ð "‡ãtטpÎÚJð·.tÛÏk#êb­Âµ$ Œì[W±*oÒ ûØ1ºS8cá{ôãÁǸø=Çü‹"y ÇØÓ@G©N›£Ì=ÜÒk–ÿ|GM³X,÷®iËCx¶ùmA±˜Yµ §Å›èߦÂÓw Ÿy¯bçô—ñxú5]}ÉÁöyFC_jÍ» ¢øsÉWîá0Í5Wlžú8¨ç­œqñè>÷&ÛðG’ÝÏã=’EŒ:v³ÂÂð–Ò-š6\ÚËÕq^Gj¢4ë3ŸÉýa)o¿;Ök±‰¥žç†Â¡×©w†Ê0Mîâ ÃÖ[¸_WhŠÿü†3 û¯õAWjgÚì}œ¸fpøèÉ)¬ó½£­2XÃzæ¾¾(Ì/ì:=¤­õwÜØê¼ÂbgÓ­¦\ö…ÂØu² ¢“ Ãù)¼–¡?1>æ\Ü­Õ ¨£’žu  ¢=ú.ô‡á ûù(vìž<$Ú(ò›%ìXn\³rËÍŸŽöï°°Ñr§N—#¶GX†+k%B½ùÔ:ñø7,K7f }\3§E$w­^ Ð4í—jj€©–K»$¶4ç>‹ò8c¿–Ç|õU71~ËžõÖ¿œù|Äš@5òw:à´œòK‰Ê,lú–Ó½uö6Nÿ÷%\eû<µ]<nHA£‘qÔnfœ²±Ø]‘ QÇß¿CA¯Ê¶°½vmÀ;· WB®jøµËÃÀ†aOÉæºC”îÝ'î¨ ˆ1jè‚é®öd£«Ø_Ì“þî·>ô¤¬ý-dÀµ ³;ŽãÐØù3U |Ø)txrmbŽyíþ)ð$û;’ôW*a¨wã£Cÿ WbLÉW9Wu.æýñ;Í"SÞGýG bL)ò(·-6æ%×3Æràì‘'˜~cCQWAJx*‘få¦nÓÙ4èq.ÛYû·„ïŸ×âÞö­£Û݃éF‹ûcǪp&ïí¥ ùb\ŠPÉ •z#Æ;ŸPm{ßjËQ:9üyhõe>—Þ™Ò~"Æz;]£«±–íò‹¨S|3XO9UV‡ÃuBU#hT¯¯À¹'v÷)ò#…ñy]²IÂKEê÷Ãø˜JŽ ꓉§.tjà̵¥Šôµf(¡û˜i“e‹SBÏŽ"ÂÀ•GNÝýŒI™ †{"ôaJM!ãYž4|çáýÉk»Ó?Õ¥àÐfÑë»íðóTMm4‡MÒŽý¦PAy¥{þ­©ƒ„{†£" ïmÏ×é*Î~MÿíeœÈ‚¿5a8î˜ å:&y gÇoñ%æô¡¾˜6h¯àæòÍUDZ$‘ŸßzOÃܯX~÷H:$_¨ªã¦„`¾(cã)_>U[-ËÞõ'z/¾¡6ÑÇ­¦Nã7¨`´ëþ™kÊñÐNô#BòöøÞŒT‰ƒNú=sŸžFãÀ¯ 1¹žLhZ_^,¼ £•ƒûuªaî™d»½Õd‡šÍh`º¤~Î!òô‹ gÖ“pr¿uñ£sö8裮Ìóý-ö;ÕmíÖáÀŽ &ßTRÕÀ½\Íân”…ó0rT9Áø…OÍEeΘ>q¥K!Òçs"‚í¥Á˜e¥oÎŒí…UÒG¡WúÁ±Óÿ$ÿêÍNÿ0ñ¾oñÛ™˜;ñüB2ÿö÷› g^ïæsM•¡<=íeÜo+,bJçÏÅ%û„ì›Ç¨0j–«óx®nè>[Û©$½i+ë=z8-à¶kàd’g8œtïIBÇ!Öï5·Ï3X¸þИÅñ+g÷L]0€¢d§©ý´è—<ï~ CG u¦¿ 饎÷䣯ØzÈ·yÿŽ;÷´x;vƒ>g.éí}f¦)±)6OÓ2>âP)'Ë×ó˜-Ôñåͦ>ÔÒj‘rƒêp| E'†›He}ƒtt~¶šê˜×f ÙHbxBŒúÒΪçÝ…E> J/I§¡rñ”{è½k@8'þwàøXV1¢ÂöÇ› YµŽPíÞJa¤¶Çæ±´’ßé€Ò+Õ øœ‡.±©R„©?,}48Zÿ+²g-×Üœ›¹å‹BÅç×%-PèsêýY¬ ¡m¿£ˆã,0¯-Äá" v¬~ÊØmþ½þœ£4V\jüé©xÚ7OÔ –aáaKÁÿŽüƒãËÁ—"ÙqñôÉC!ßK€8ÙõäYH ›—Q•ÄßÕH±)X Qª|ˆË?:[µ â¼{ë‰f {çﻈӱ"©Q`Kʹy½ÉÖ†ó]{/ž‡öµº´ºnPÊž?Ù¹: ÷ÎÞ <ã}W|w b£þÃVÛñc8—.ÿúÙg.,=®%¤° ›g¸è×™ß#©Ö2.õu›ú=SžVĉ§fb{¸8¡®èÝÃij¸ÜwGÎ?À2B$åᢠï9|>Ö¶ 7X=¥s—±3¤ó¸Ðˆ þmy¿¼Së.j¯j B–׊]àj?nJ²X/1xÔø!Vý›’˜•`Ýés²5åñÆ@Ö»Z‰ñPlªÍÝ¥s] úoí¯JÅ¡¸M““¼ÂÐÀšébcd™ m+*ûoÀr\U á*àP(} -ÏI\=2ÏfºÍ£á™YQT0Òùëé£".&[O¼O‘’§ÀwO˜ïÕ¯”‚!.3ÍkY}%øÕC¶êGa_Žœ»ªzriŸ¿1‹{ ÞM?*3Ý`[Roì³-vý »œ’VÃÉ!¾SÊ.g!Žs¶ê<…­D m‡Ð!ìZ!‘sJRp빸eÕÀv} æGK0ÁÜéŒFšÄ˜“S\Ýù¦zoP¼ÒaÖ ®Ë©9 øõ µòj àÆuÆQLîÌ¢Ÿ?›CMJtœ¹8ÿ¢bwª #®¾åd˜Hz€¤g±R«©8_¤ôáìoÄ Ç}¤«G9ô”1ŽÇož›‰jÂå;労åÍøæô¹KzØor—®‡hc*:sÁ'ÑïH’ · l½ðTüù°š–J ?¶C'ù›Ex¨?öœÔ 7¨Ù å?þT`Ǧ£yxx"6?uû=)e³=ÏÉGêí±ÿèéŒÔ¥OÐW¡rCÛ€ 沟$\׃¡æ¹HÎ@3œº°¢Ã§–Å»Rï<9š†Cþê‰a+åXö2Ç©»+ç.—µÔÇV¦A¿ 2ú¯¿ÂŽÑ8ùZSVXý:,Ìø¹| þîKÔº‡i ÏÍÙ$Óà,ͺùQŸnÛóŠed£k!ÆZz@{I ÂnÙ….v‹÷øK8÷ÓìW"Eð²Óh}húïΡSF«8f®oøñÉ&uÖ>"“K`a•e’Ïù;E®Ëª¾ñÁù=½ŸÓqzgÔ)âµìôµ=`à ŠÅLãÆ-LŸ¡/àvèˆï tÒ%31ºÕ`K»d™8í*ÌwMìÆ}6:×ÂLÊjK?¹ &ûÏ~¹­«3_•ä~8`ÃÚ¯›Ï^b?D‰ëñÙBÓÔýƒf¿©uXç¿_Pù[”¬¤,DÍ.H˜¾åƒVÃE“§ca°öp7ñùÐAè6Ëä¬ÝMW©~ °C®L‡±(ô¨_eúç+ëC×wt¹èðF·ãe((åÓ¾£†%‚½‚b oÝKYà,L±Ì<`Ǻ®Ú‹¥º¹}á1˜¾-[8ÍScz§düœ„ð%‡SA’0ü)qeGfÚ¶wìVk¤7…TRÙká#^¸Ä¿j"û¼ þ*?ÙÛx~›GåþqϨ¤5»7߾‰¨cJ,-°°+Ýöj…ö¨j] 0µøÒÛÊ·s8)ÎúXàøK¨ß"?fq¦¢Õ%êøY°Øf@}èÃ}˜koNฟ*eèbøñm•÷0÷},‰=œ³å±'þÏía_ð]¸q?ßÖ¿0ÔÂl÷¤¡)—Ï8! §Õ·9*ã\Kõ¥ÐÈYkîrù:,*1Þ¼Š³žmº†Ù¸¹qaçàx ιßÐêë7„òêkMLŸˆ8æú¬Ý¿¿¦ŸM 3UoçØ,Ï^Ù$ýáíõ»õ®øH5>0‹†¾]ú?qnõ'“_ëöU¯ÿ6ÚS ]WÓî¼^u„ ÿüˆ‰„wPMûfó€}t8y-W¹™-ž>G UqÏ|¡*#*¼>ƒÛ¡IG¡dœj Çý«¼ã†¹qà’ëæìÎTœ úQ¾=¯ ×yòÕn©Ac©IØpA/R¼Ý’²J€D ìø­åŽK?Å8ZËÆa€›ýëºìmhyf¶í¹‹ÊÉΜw%q僉t‘E_ü6š×q’­^¼/%m{ÿ⟮vìÛ:îñZIÇþipì8‹ì˜ÖEqÁðîO¶¬8+u—?”ŒÁ¿k£€,šºNí=s%ÕOuÎ9a×›Þ—….¶8ÒGWòâF)ö5v]ºE [3pµ|\ÿìŽf*åÀê/ÒøÏød¨'™;¬â¼þêAç+¹ÿ¿OÎu›=ÝG7:¯*©÷rÝæ Æf¯oß_éÿÁÈheÊÚ&¬Ê¬dfxŽ‘U£ŽV>P癘ê‘;¥R¼’¡½öÈÌÖìÜ0ùlkèÐ#‘>K¬8öó?6“æì_Uon#òÖæëª/s°ßÀ^&)² ¾ÿ uÊ·>uf›Š9 ØTТÄu G3ý~ŸäÄÆVÍO÷{i ÙâÞ ºT¬–½±/ñÅÕ›[šÊ«a8$k4dÒtë<Þ­J¶Úâ‚WXÑ#‚@mq¡¼%™§Þži¬¢~†iÇÌŒÝãøpð½ÄTËólë÷˜à^±Ãu}íµh{kƒ}CÑýý8ë?Úõ ÀVþq‡Í$àØŒ–кoÖ4+Æ:—;bƒëÔý”ã#бÿ¡0•9+ ).éopó@­k:,Xœ•yJ¿„ÝσORõâüÜ‹Ûie-0i§züù4öëŸØÌšO†ÅÌ÷Æ=ß@C“àocn\Y9r§bÕÚœyjÔC 4A‡R¼$ p@ïÊê÷0G+˜ô«wçå*=߬¹Âj6ÇÑEã/˜ió¯óô ÉöyÔ•Ž<—‰eã3gOøøÁ¼ú¿\E+M¼p­ÌOT7žÌ;¾Q”Åù¼ÞÄñs)°bb rvZIjK+{ˆÃ`ûÛaE)c ›$sÅÞµ…&*Ê+"Æä—åÚCfl§¢³ Tø9)”ºž…Õ÷‡¾Gø¼ÀÉ·Gü¥v¢û©-ÓŽ0ǰ´wmÇ}\îþö‡ï¹0ÖMލ|¯dÃΰÎöOd±¼7mNÀâbp›°0¬™”í·¢ÁÉﵦÃù¿`ãÚê#ù(ÀYk.ºê‘x1OÁˆùjfºçÎÓÞ½­Ÿ]!7~óËê…Ùɰÿ4qÂPϽüô ˜msÒºËù—è;xUö­à¤Õ¯ý’× ÏS&ªBÍ \ÇšÒÏÀV¬‡·Üíã_VR”.Á¸e Ñ3Û}Öh«²û1.—^¼µ±|s\þc„Ö¥û‡fÏ?ÆCÇŽ¶«ÁÂx|Á‘ÒX™ØQëiºÍ“j¬·,þ5@ŠúØgòNKlУ~s¸ÌGí˜>?:#¥êwSb|áMWašZ¯$¬þ‹3>ôBÊè¢ëç9aÑàWmé·­°qoó§±Ï&vpR(’Þ@ôTï²>í 3Ò³‘²x±å÷L<å îb½¯+ñ6YjRK04ÃÌ )¦m(«êv¤‡†ŒG ¤ãô»’X„DpÐEH5Bhºè…_Ž…R`Vw–,0}Šê¤²^ÅAc-å{‘É ßãŒÖ‹Ý £‰ò­>bî‡b½÷l8Í-û'V(ç7ØæÎÃàðÓóñ RP9« Ïã cyTüeêiªý—ÈÕŸõa~'cA•…>ŠëGÚlá!h¨Š½ë7”‚åÇÊÃ-Ç6‘,pݳl ÿ ~~s3Š“í5D?bÚÎ$œ»ÉöF$LF'»ò?0Ƽ­ÈtöNN¨ÿ=J”ú¯;HÖ—öüo"Mâ>ŸµƒöÐñéÂã÷#`ÂqŸ›ÐÏLlÜÖòæ‚'Ö…óqÝy¢ C?ÈØJ Ø?;ËÇùü<´^ÿvïSvhü²k†@ãÿ¡Àë+¶ücH2ø7ƒœ“½o{þB¶QBÒÇ\sèŠ[ýÅ!H€–Ç”;ìžàÕJÖ|ªF…Ç;ãö·Þƒ>×/ž¡Á•ËŠlcB3õõi³ä÷&h¹Ãûœ>—u„M¹bùÏ‚M×å`ŠùwµŽÐSÜ(øã©¨¨‚‹y^OcNB'Ÿa¼y¬.í¼¡`ý7Õ„ù‡â"Mæ‘®?Áø=¦Yè—  OSËKeØôWÆr /má·g{,0› «Jq@C—š™šã<ö«7ïQÁ¥R½{•ÿÀù»¨‹ RA¿àï{ö¦¯pTÛÀÌÕâ9lÍS° ™¹”·m—¢9 ŸûrÍÌÛ6Êcwý³¡ÙâÍ]̯\ß°tú=6|ÖͶÖô‡Ž;>IgR`ꌚM• ¬7l Oªß‚¦1öãîŠXŸÝ4žÒÆ e߸ ŠÄpŒÕ U†¸pð©Øóü4ˆ”c>ÖM£……K¦AY0¦.òJ*ÑÛ—$Í7Ga¢ÅDúu¿n ¾L°–ÇaWËNYn~\‰î{&³ÏèþÉãÒ.?/‡Œ°/ú™I]Óvž} ÃêÀKƒéšOa:ºÓRñQ<Œâ„ÂÅG Ìæ¼»ur(4HÔR'î1€®â%N­ô»ð+¦¨¯º€ÃØä®'ÀR”¼¼Lͨã)1›VøŸÔ‚Gx„©Þ°]ÈF2íÛ˜A½§8Z{üº3íqÈbìÜ Oì )%ë›ý€6º¿þM€ÁóXŽYIl­ç0{.±í¡ŽÉªû÷BƒF¾Qåôu  ó‡1àVŒ^ìÑà^A“£jŸ„Ý$Rôóî÷0ð4ÛFžØëï3;k˜+aúÀÚÍôY9X”(ÉOÒ‚¶“ Ÿ,ËR žsØUÇ$×§×ü(ü°ùæ9Q¸†[˜ìfÍO§Ãh¦VÕC„'ç^§³æáh ¨°+féBc—y¹ µÀí½§©7‘vv¤¼·i dÞR‘š‚‚É7ÁCØöN;·à·&.ç„ °îŠ•7»k¾xäñ} ÞPšë²NyÖåodkÁ­âùvSAÓ‰•Ç’°˜K$P;ƒyV[yƒÐí¤eüÊ >Yö-GË2®VŸ·ºé>†#)â0f8ðJîÈ>Þ[Ð,ѩڸç†;ÚÆB>Bþ®˜ÑæÕ4œ?‘9‘.Nd$œ>nØóeO¼ŸIÂ\Z½Ë‹GHê»mÇÄj‰Ë÷½›Y4O@3±aÏ?cèÜòãU#@—âÕ,¥¾mž>aºH„!î ª†Õíz¹ë¤nÀq {¹ýÚ¾ Åêÿ´?:GÀbeZ^èY"êPöu5ð¥)ïÒ‹3–Øó,ý ¸ª·gtÇßkª¥¶ÿÜ¿*ár³™ñ›0*Og+¶ï;ô^;ûsõÂnœW/9ð%5ZÂŽî7za„dÒ‡|ÎtÅܦeøš}÷ƵílÃM‚rüƒ>ë³Ô‡ÜÃòεТëu˜Ù_òäw “å¦R‘–3—&bößÁUü´Ø&´„õb³Å2›ÐÜÐ0£f¶ §£Ì¿Üå›FÒä÷ª–›“à\U:ƨŒ™Õî>,jAAEçßÚƒ±ˆ¥1ïSbÎ ¼ú¼ï÷\gì¯öÙŠÌ/I¯¥6œãŒ×¹\Ñ„©~B_šC'0ϳùÔÛu6$-®%jÏÙ@ý#v­Š'Î=KÓ^[ö€Õg5}áßù—žw¿ÁͶ ß—a35…9Dÿ#„ ”lÏßòë='[:b½éÓ¿  8áÒð½ƒ@ÝBÿ²¶»v~°:Ð-oR©0”Àì ¦TŽIhš×^7Ùj…9ùÕõcu@¾°•(zÌÇO7¶HÚ ùÙa9»¯Éþ]„ó­8ôS}ºéÕ)€Ä¢ÅLÆXá“4½:» ã¤A{v)\Ø}ó\f$31[æÂÎ.\¿Äxô˜ ϶/—½¢  Â¶£'Ìx…ÂÄíI3Ϩç8ñ/AcO¨,™GÜn¦)úǯluéW¡ÏÙÊVܲ{Ã⊦“t¡\ñl‚7§l‰íOîÃFÖLË¿pÞéf€¥˜»´¥øG+K[ »nààRëWmÕ"• ­kxä.š5W¼Þ‹u5‚Þ_ÏH@íãÈ 6N|¥éJýްjýXÅ2%Sõûа0¯6ÓýIšßgØ¿­¢¸¦ìn­]8Cëmñ¨Ç—5FV~|ºA¤5ú°}G…¸î¤n£lDÏÇFž‰­è‡â˜Fã›W1ë¸QzwÏBgX‘‹0ûìÖÚÍþKa8v—æEë@Á¾¸ì%7[œ‰ù"+‰ ëoS'mû³Î¸Fà]XNn{ÊÂÕÝ·ØB Oàø‘åÙ±M\OÚyô RÔ?ÔúáÒ§>ÁÛnØßÌšµÏê6¶”ø+è9iÙÂÀöµâ"XFdPλàæÔZ eJ£ÿÙ³«áð5­†:»±áäÀÙ`b#| º¢~¡Š¾¤Ýp£OÂÕîq³aì¼g7¾®Ò…]ç̾ <ß$Òä´®¿{sµy&««ä°ç›ºöoUj‹Óù Å òíÌßpLyy£Òd4Üß™}h×,N³&<ÓvLǘƒçÖ¯µ{CûºäçÛ<Øä¹ÿSª=/ÎÝ&¹´múãÊÂÓxú,è^©¬¿Ü…3Kßçh° ZeÇüdC' Ÿb¨I â9ùY¯ Xü´ädÆ–Ó¯½&vO@w×Íi3'g\óŠã2€^Ýoë««0,C>ç+§~1ò]Ôg†fJ—] LfWPŠÍIX½£…ë<3.Óleà|í$¯»‹T×tézOèá’Uwáøë| ëX1k)Ü¢JCTìæçÊ%¿øynÛxÿ†Ä`'—lJÙcs\ê‘–a’pµ«Yûý_Ãêãéôÿú ºûkÅ`¯Ÿjþ¹ˆSб3·;5a‚ERPüsöa¢ù<1—m®*>ˆØ^ÿßÖ‰ô¼0GGuW{Åú‹œ’ìh¿cßk”ß©‡Øû±bµE뤽¾÷àE„,®„>qbÏñÀVV Q –úÊU¼*¥_â‰c;°íM\áîI3¨åM:šiâ#A't>AAÈé¡;‘$!_ÿÜ#·6/9~Z1KVi;çòîQ¾ZÁ–®‚ˆ8¶{4¹Pùaq‰ºàùñ¸EEͦ-<…Ï‹d7±m£÷üë›FØS˺äöÖúú8‰lînÛÜye¼pøÎòŸyΞù~ Hä\,Ãñ€AöÂÊ8ȹ2öÁkOô› ýD¤þò“UC6’÷þýl ¤ð*[QÐnâõ—¤M×R0ºØÞÖ¡µ Ê™Šm9Eëp%äi;  yx™­"þ6âËMÁ.}ßYå,– X_Õò¥Ñ;–¿¯às§ÇoI½{Þcvœ[vw=pÊtõcKL’§³íìB Ô纆à3׸Õ+\;™ö–Ô„}®uV5| °°¤óù?$•íÈ«ÈiÃÙ‰‡¯kë ;Xt_ÏkN­•<åb†ýÑû'O AO]qVc…Ž5öþ ¼CE3,|zϱŠÝp_ó ìÈÿa+ï äåàBÿ4hð¿iû1 ÓǺØÍ‚¡Ü¹w·•ߘÇYoêÀ ‰a¢´%C0c,ç!ÎF‚iQÞ—®Q«HÙ»ÖòóÙE_têÙã÷¿ë§´obIîZ€=q HÂJaªSmÐ碹¼z­ Ǻ­µLA“hXôŸ¿1D:žysºÅ4(žÖý^uó#tjüÑëH‚%}½¼PëÞ¾¸÷º?ô·¿g;¢~'_PÍÞ§š‡!'£Çá×aV@Äùh@ôÚ´Ò¾¤I ;D¿?ÈÁÅÓÃÊwï.báå ó¿.0nYìôå}¿ß]O\ÜߢÓ¯h%¿øˆ±{ÎF1bLÓÞŽ2}¬|Ôù#úCz÷›þªæ-×B8ZØ¡ú߉Ã~ õ䘪èL`ͦ85†±ñÑ3sºˆE\1p—OÞæ4ú_«Ø.œd­Ü•V,} ÜærÖ TYkqar¦á„§‚5’=JlÏ}øã<ÿ˜ÆV‰ŒOS\ÅΈ`×QÊ!éŸ3ØGµåGmZˆ>ehlÎAp>Ÿý£¯|ØcñîÚÁÏú8zÄ…Å&PÈ/2c“;úaõë©Ô5X–ù0T惙w–U0õ^vu ]# pŒ ³Ü*‡oˆrÝu0"ôï¸æìCX1Kˆþ' µq/®®N®@íŸO2fJRPÇäбÀ$‚¿cb—›Mp¬öfŽtG ¶¼Iš('R5îsì?L‚y“}‡%Ä£¡ÖümæñïsXª\ÈšéøWÞò2\qýj¤³ú(Ìå7yW­*Àø@UWÃ?M\{0æøè6'W’¢Ý€¼©ëSØ#n7ƒŸ¦œ‡Í¯{;¨ª±Cn÷3Ë#ŒðÁ XëjŽÉHlî wÁ…‰ó. 7gpDÑáåA‡nM9›žM~E Éˆý°êúÌŒ´^›W"à¿¥Þöƒ°aä{Ū(F+}åP€l—z Íæ ÌÞ4Ýý·×; D*-lašJ“hs+†~6´‡^Àüêf‡½üPßrÚåÁ¤4,–L™¨ƒÅä¥Ò'NBßeëe³2œÊ’—ŠlWÆ9Á?#_ÀÜŸ}ãñb@>BPK0‡ùç?Ÿd˜¥b·æè·0\¿à!&ȈىK;%y YHaj™ûtgÒ ˜‡á¨L#Ë«Xxbxm‡4TÊä?~$PIËÕUß ƒÆDmFÁ\'1Û/KÞÿþüïÃ#;äY0j"€ós%ùM`âéý͇ôÏ‘r#ÔIFÀ烦ﺹ<ÀÞˆkæTÏJ2Caâc:#&6œ“+‹¡t…ÅSêr?3¡{ñ1¿ä $‘:r¯d¤*þëÛ`—¢…µ¬ÛÆæ ß Æâ£¬-07* þ€’¥Ç·÷¤"ìT¨œ”:¬)$úîó0ÕºñSsqÚ™ÿ‹Œ8ä)ó²k«Á¢Á·äévœˆ  ªlõO·ím:`š;½lÎf›¯=ÙöðÝäƒ~ëâÁ_öNXÆÛ(xâŒ>v«>Ævr 7D>¿/^ŒUŒÂ×N˜<Ç‹oÞoÁãSk`£ Þ?CmO« IÒ?Ìé×qaGÇq…ìFh&lîmwa€¹#*ÑînX±yØ9¾ÿ(ÔÑ_ä.2–†Ò±Í€4þ8ÕvF1ò ,Ø)¹ôæÀd|ƒ‹¶%tæõvæg=ÀÊ—³KúÔHªVz{ Žîü+֏ФêùGD6\½ýAU˜Z”ŽS:ÌspùË—S†2uã¢dæ%rËpŽ•›û··lžt¨îÁÉt^±ÜW18‘Må—÷*ÃWXÄAg8Ò¯hS"øm7ßvÎöù0”µü‡³ÑÅÁF,5Û\´îdõ£ ×½ÜÙÙ[倲úqšUVRø LÔ_ÀF‚c˜Ë‰JXo8ø.Ò#ÆÔ¨‰ò‚ 8Á~2ÅMè Î •³žœ²Ãß·ßócò Ô{ rñÚ‹í>–)ðôüG·æ¶mŸU’ãõ…e§@è«CNe§{ÐZá´'R„°ãtÆÍ#ÁPÚÞSÈÆñGµÙn´úBv`›Ä<ôFrÈ;Æê'æ!mÇù¤­3·7::Oäµu~ÆnÓ‡ãåp°FõýN×$\´Ù½øöÁg,d÷l0ùÉfN:¨pô‘Ž^jYƆµïuâ§‚ ©å™w†á{o­R ÔÏ¿Xr‹ìÞ¿ÝK ¶ýªf©:ÂjÌXÉ‚ì%ØÄã8A¿ŠëÇ’Ê öÁÊ»z›œh³÷Ñý•ˆsG‹/ßĉÿfdú+¶yͳÃ1íÕm$‡ë~q„H[$¾ïë}rM”ã„îw¾åýzøKǤõæ›`S²âð0ÝæÎŒK&’0Ç0;›®ŒäVÿóžï¾@W{eCš¿'n¥æˆ{µ]²Ñól)oý“ˆd:ÇC˜R4k¤¼¸€ýb†G9éCãÖ›söWÐØèȯhGý§J $T °QSñì¾›¸^JsZñ–&Ö?:z`DM ¦HÄãzS-8Çâ.Ç YUwû'ó ž U’dåʼn‹K¥»·ëÇûÛƒ½«8!–wíÑ–ön>Ý |ÇIå Bž€¤¿b^†á•Èt®«Õ8Í@nNg¿ƒîÏ1Gä:Ö÷¯,Q7ƒ!dž§7’@ß1®Ç,´ŽX÷îÆˆIˆ+Œof½ütØÀà˜6ÐFõ´„¼"››dá´qæm!ÂS˜d?g=ÉX ýe9…ËS¸¸2ðwƒUëö¿dóꑇÁߪ5¬Ç¡dÏ·IùH˜RRe܉½÷·º-¢qÔ‡kG± @¤l¡óΤ³e,áÜ=Bâ`'4Úùôž îÄ ³jnÿ*lŒ`Hdhƒcé.V˜úótà‡4+.ªþöì†òµ> Ù60Iœ¹)õG#+KtKèpÀáÆ…Ëÿ!‰¾`Nð8tÒÔº“ô9¡½PI‡q Nw^uä‰x]ëü‘;ÐàJµY¿½G¡Á~Û/ôÒ[äa"n׌úT/#V?…ý_SÂuÛ+`áŽÇôØ*këˆ2Z“Ø‚?^ÍX.Á¢Ò>§ÇÔF˜²™î–°º}Û®én]Ǽ%{úÊÇØŸzA@«xJÙ ­9êâXgIŠ+ ‡Ù3&¾<¤Ç†5ÅÓ1v<=±„]%êšR‡T°3ɯgž +Q°6¿(ŽãÿÅ]OV®ÁU÷›×©–pÐò›õ4Û>¨ó¸;Éæ*ŽEö¼ÏϬ<†º«'x“WqIJùO½’&NdÈ%øê©þ;&þlc;/G ‡BŽÁpòŠó}=\§~è(›^M]• %qI08çeÁj’%’–aëP"ç{cÀÜÇ2ü¬ôªA±Ì÷« "S˜ÐüUc‰–‰L‡]fEªpx‚áÝ•{¿±oá¸ö{6v(¸$X°h¬½²Þ|ñ­.P\ôDJ®äNžüðËù´=þÊË(åq‘Õûü4°ï ýÊPVvü[ø¯çx4öè¾ …Èøá“9~qG(ž ÊnŠ@ÕÝ;*»„[\‚ëm ayœ‹òDkGŽ´V~úæ…䡇¦|"poóu 讼˪°ÍQÌZw; Íx&)O. _åÌ0ážÖ†bwQhµ,ÃóÏôæPYöžý8*AÍ}åG νç>Ï]‰KÂDÉÓÔMP¡Ó“´ÍƒüŸî[Äa}]ÞZÙ:3TG ¹ŒÀá<ó£ïf`ñÏçš_ÑD:›Ñ!½}›ÐsðÖÊ{‡D*]£®Ž£°ñTðþõif\;xøÏ%h5½ä)’æM¤>Øa,®PŽs5æ¶nAÿËá'Ÿ•0`™™póH½–µGpÛ7jh깬ÎàâÇßî“ïpò h¯0táòꆴx4 sÝep4ØöÿvEò)áíºÍ˦»O¤:bRK;ÿŒ@+}ðƒlüê¸!'ƒweV»#¶½ Gãû=¡í>h$'’K„…Ý%Jlw?a ‘*ÞÑ¡…údéãì$yfO ‚ús)Jùõö»c¶3´Ú½noz[…£á{õ8K ûæÛzË8y¨øEõh¶;6?äö…©´\‡÷§¯ât¢Vú)nlq.uøüÇÿ°}\jý‚äãáÂUk…˜'Ão™Ü¢u¥£Ôÿ»Ïj¼/ãAÔÅ( yŠÙ>¯Ã€\Écáç—p*ºJûà È^3?¢ªç¹#ÁÏÍÅ‹ O|„Íd÷cÙŸú”af ?^ÜõÆ¥óï>f¼„öÃuš¹œ8äÕKzËü çߘ7UÕ±öS¸áFp̾é#®B·xŽíCýzœíÝ}±(Ä[Ggº¹a!ïØ`àçšî×}’Ë@Ò;ÐÞïìŠss†ñÝ‹68¢p—“2† (F—¶sYb? ‹Zìv.šºH•Í@ŽÖÝÖOœý[=S¹VÏ*Vïàò Å(7§·;]dcÁ~펄¨SXÚ÷ºƒ7Ê;æeJ,Ù.©¸sŸ>5CÏËvðaßž0ѧºÚô¢“#Æ «(3Óž¶8}ɇ=%öÔÞúoEÞ׆©ozT,âÖlù›o¼k8\]ZÒߺÍq¡ÓÞ?B¡:8$Ųô4I(Ñ÷!ñØÃàÕPøg,—ô÷‘7jô[ìr¸*=¡Œ m} ùÓ ÇÁvpO…?Ž*¼ºòªKFJbo‰àÁí¼¬;DW‚Bûvq6=†ÕE›1Cþ(#øó×[¶áœ—-U¹Æ<Œˆ ê ýƒç&FAòüI¸ôêUsÀ½0@eRûèåiœûº/†°WÇ÷ŽœÊÃÁ%y#½§Þ0|˜Ä©%(¹¿uöLÑÀÌ·÷÷76Laä^&“Õ˜.Gô=›š½ûGŽUíÕÂ>guF=f?H§’Õz‚-ávAO<õa±cnK:+[ ßÎç`µ²ÔÎg£ÒDº€ÁþÓÉ;aZˆícõåe"]䄆ƒ*Ïsø×ïtœ=×~°½º8Ïüaš¾‡?‡Rÿd8ÃRª“ï:Nž÷}:amrÑ'#_àHÔË÷‚oqæÍô¹™Š\ìu؈ZÇ…˜Å/аÿØ—þ#©°LÏÞÈ;sGx4ãTE)8B» tã2ÅðRÖà„Nç«è¥ª6`yT‰Ð:×Y¬|xçUß |0IkºéO°ø³t{ãÚƒçWHÙ˜ÿ¯ìÕ­k$huxaª½‚C1Ãlë£YØ#G¡¾失—{8 Éú’oÀWœK}³ØNrÀÕ+²âöùÁêŽì©Š¤›HÜÿ7´é& ›Öªòï1„ÁŠ£Ò—c´p(AÌÆQ¬ †nj+œÜ—”ý6Œu ð»zêh£)޼öô½óy¯ƒ«úì?—ž©x£ǧÎQ¬e"i™˜çè„~n4 òØoýÚþ؃wºjó ¢Þn ç^Ò·>;}ùÍßF­aþ—ðîÍSÐ¥0 þè–&”¥<=»/î ‘ö¥öëÁ&_²hµ‹>¬HÞôÌ?˜K Zhÿ:”Ò€Ã/7£iæ¶pȪKÃèÖ:Æÿ1öô>‘‚ÓZ{º"ý©¡çwô²ph,1ÿ®½Ú†…9LfimØôqµeø@ΕDǬ¸‰bëui_YV_\½Öü],1 È?$?ï¿ Ž Ç ¦s‘«f©¶¾¼ß|jmg Îsi žÚ`·?ÕÀ^ „‰ù0œŸ`ƒ¢õ®|g°Œšõ“éùwH’VŸÑ Ó  ¾ÑA§Å Éø üó½ÔY¹Ü„-BaUnƒž0¶§ÛmÄÆæšN–ÎOCŸÓ%+» ‡±ƒXÓX³Šã‰êÒ×ôÞ`q#µ1Ÿ6‘Ær?uOÑ9l;²Æ¼ðPâÏc«Òã èIôÛaCÓL¤‰íûÏûëP%pÌ]Z%ÚT;Žp'¦Â€[ó]z*œ<5MsCÖ‰R-œ~Š›Œ¿÷ä|t¡ß'–æ? Â|´Èá]ç’qCòº“LÚ×y±Ç\ÅQ,GRP8Ô90¿Tñ#Ðf/x¥Þÿ ÅugÕÏ:ûÃ䕜ãM€ÉKÊ*ŸepfЊ‡ÆsÌݹ®íûH;—ö¹T.áüßËCJ)0£ÞÕ!$Ö·ÃúHbd½6ø»ü~gÖ]žÂ©Æƒý"œWñ ¯¢ †²lヸ9œ£²’óqN‚¹Á‹ITgÖ±ñF©]jŒ0:?v:Ê ‹+TÌÁNoaãš„¤Ðk˜aJëè³"Ò¹Rè´>)vЮ‹®´´aå9Ÿ|G$±¹²<¥ÿ 4ܾþ01ì6½Í÷A†ˆCGîU³üäï¦všé8¦éÁn Æ+¼Åw}Bä6ù³¦¯Ç-|¾¥ô ù¬ÃÄ[/‘az;oU²Í£·2n½m†Æó†}[W)0X&u¬+( ßì^ÜÅÜ%Õ]ÅìÐríypZ ô^öÌM|ç†kÁ{ô?T˜À¢th…ñ¡lyG+=¨…#Ï™šXÃt¾aŸŽ©4HªFlƒ^í¸§E‰qPœò{`ô4Ö¦»/X" ˆEöì–GJ–svÞÐcœÙ û÷`<§~oí6Ò À’mƒšÇÕŒW?–Å¡š£%]Þ“_ ¬¬Ð{ÞÂÖ<ìVeG]¡û¾e-Ê Gbf'¢uõ‰¬qmQM|=ØÅÒ\sèââ6ßi¾Jõ¾} ï%úâ0÷ñôÀÈ  ÒÚ”{A ›?οã9¨ ®5©MÏÝKG¹—pjàG­TòvùĶ˜…SjlQ9þûx/ÅV©<(ó8ÁüHîBmt‡t=TÔ½Œù96Gn_ZŹ»u‡ß§ˆT^Ò‡îÄæBßÜ]ê7FÛ¼eHm­d®Œ½Žw«·úôÇÉ®bã99–Ûl=OÇj…¤ˆâJ†±6MðO'þ"P[D¿,áÙ‰C)ôݹ=PZè4z“Kîdì zp½fÎ m¬!)ul×—4"TT+í3Áø‚ù%wh:ßcyÖ'ùše6Àè——é_eý¡Ê0áÌ2ȳŠâ©~}¢´ç̤D`¾“P!~F>¹Ë™_…ñ;ž©U®/° cO#g)w«)1]™&P÷§ôÔÿч$ë…J!#sìW—;ubËÑŠý¯þ\K·$Òé-lxZ¿ÀÂ>QÝϱWk´My¸C±0FþÚë_§p Ù½+ñInobzd=Hçißþž}½Õ»w<ë#P= neéîÀгÆ#.Í0ñªçßÙ˜}˜qég³M~“²æ¾ ºMÖŠZûq¢Nâ¾q÷œûïGTÂÄ!œ±ÿ îÙ{›ïÑ·§µÃ•‰„ŸÉ=w°EkßqØIÆ)?:Ò…Œ~¬ü÷c8ø½ÔÝL9~øU<.»^í’ØH3F)Oö}„åumO>ˆž¸»ö± æ;¨[² 5„ýºÇ³dpîÙ;E§˜7jA©ç¾›ÎÛýtî@‹‘êÙ“édÁ"œ»N&;\“‡æd!¥‰< ÷¸wýT ä‹ëìÔDkœ>t܃vjÛŸâ•›¬Vq!ç\Û! ˜wL¶…eUoÝ´Þ9²%s^¡›‰»¹ï^Å¡¿ŒÌÇó<°üÂRŒ_Åi)Ò$…E'à\âë‚é!ÌÈ‹w¿ç^êǪ‡jܨ°{Éþü¥JìZ6*j†%{]Îüø[ ‹æ™Î7¿èeÎTi‚.ßQ­ÇQlñ9sTÙ:†xi¯5å¥ÂXËŸH¦÷Ë8$™|iã©6 l1d.(pBý7•ç†w8±t×ÇÄH"Td,Ë3&ÔY;ñéªe4οòí0¹‚s6~³Æ0¸Çnå”°Ö%Æ6=eæGÊßÛŸ¿Ÿîƒ>^3bEÇ$ëN‘Ä `JÓ&ŽñèG\U4¡«Î?ý[׊‚NàZä\áÃrj¨—0«æ?*Ф[WÎí×7ÅÉß;#N%vcÍÙÚŽcG× ãrTŠ”¹9 û·½ýÎ] &×>XO Bg#îðc0 u]!k¥𬫷‚ùpØ +—àÜ2+Í']OX-œ ,ÙöøKîøaÂ8íi@笯Hðgõg€ñ²¯”]©VDjÚÏdáBS:7wF òý­âÈ€€­ÝÁUuŽy‰qÌÎhw9§)ãàÙøŒ¥$>¬yiö (ú»dWØïBUÄk¥E—OØÄ0ûÅýv(n $½”¬i'0”<ÎHgÁÅøµá‘¶LÛ?4bŽsW•©š:¾iÿËÕŽ¶Ð0×ý™G?ºï}ì;­Ãnr“ Ú±Çï~ʇ_9ðô,Åg|ëõj¶Ç9vùÅ“ýXÇê­ãË-ˆý5fÍ•'-·ùL®¼~ÊÇCgŽü\:„[Žõ¬‡ÞáD[³°W(ø”–hÿ!©ÇnœRàsÃ^–§ñëuúÐ߬×ê¢|¦¸o½x>µÊ;„›UU¬s­‰!û´ôZY Ì2âø‹#<µ±Îªùàî綘bõÌ7¨ÔÇøXƾ.dÂT ‹@»è0l0æT1qFÒê¹—é$U }Áî4&_èò¼»õ-§ŠT¯ÞÀ6\8¨‘×ë`Óœù× a®ì¥öý iüTçvåBôý¼ b·wç:¨_ºÓƒQoV'ÞÃ<›C“¨,ˆÅwJ}„ºÒÆqIa˜Òw;çeñIç¥N'틃Ž4G®ÛÈCÝn;KMahPsýµm/‡¤cR›oq±;G©ä#4½ûñ÷e-N¾•pnº¨_}±ýAA¢þ3ùí1šg…GÕ±ï¨Ê â¹lŒm_ã€Ê#“m캺0aCÍZq€™43K’¿ÃP³ý?{Ugˆˆ¥<èÃÕ›~%"RY8ûš­F’6x¿ïøkׂ‹–SçGrÂìˆfâïièñ:&êÀ¶pÛˆ\x&‰KÁ/BE¾‡aÎÇû!t°XÊ+¤çŒEsE”É*ËmÏ—¨:Æå+#nžL‡¥ñ¤ˆ…‡ŽÐycvr€þ%îÅ4Ëb}Ÿþk+w¯¶8ÔÔb€©ñmb{Š¡C·™îh,Bܧö4+¨æn5tò:€#35gý|þ÷¼js¦[Ð$SóV%æ¸þâÀHn Ö)™n–~ƒ3Z7kî*Á$ÕÈ Î¦5ÝùÓ¦„QÊ”±tü·†]D4‹MÔ…–°óYáÅ–8×\ÇAç ŽkëÓЉ¼0¡vOŽÀ EC'þÌë{@çÁÅÞý×ý«ô\è?æÍ°û5´Ë=®<×І É5Eª°ŽB.Øú ¸šÝˆ9¸°¤ò"}ö*L¥šÇ¤:®þõ^bÐÍÒÕîUgp£S^Ù¨òÔŸ®½ÛÓ¡}¾B꿪9!ÌØDãG/²~\ˆïaÄeévEÁD_ˆ/pnòÅf7\Ë\µèpùV\¹l@(nRÏ} ËlÅ=ÿl5O$)³RLi7±ýu·Ûï'Æ8ºÿ̯ݲD¡Ý-ÝM\¹aw™?Sˆ*%—œ”¼±àýT¬Î– †×dÏÞ[£àðßäîÏ`þ,OŒáUì“ûIê8_-{ª ül·ëÙŒêaKµ4ö­ÐWœ¿ŒÕFò·é]q^/éMÌ`ަœ¶à© ‚šA›ëßÂíaàð[?#œþ]{ïë„¥ËÏLöÄäF_½ 0”tŒwSf ÚϾ5{ë‡+g",M~Äz¡=ôp¨~H"8¸ VŠÞ¼LÆŽ¸Bësÿéb>­`[ÌœN·x§Îÿ¤ÆÑ‘Og*¼¦péiúÆÝÛ|(¿yüî®e\øÂ®§Ó ‚NÅz™8ÕZÐQrÞ*¿,ùXs0céQæ«üX༵Ìþ*æÎÈäßH·ƒ~™¬ê&èõ³7ÌIÂùZבôú8r€û¨ïiX´£hïý }—‚Ò8Æß¹"+õŸÍ&üxÎF6Ú{$ÜŽAâeŠoŸ. ‡ù(÷_¼MʧE_« áúWI0ž!ÉðnÇ Ü8¥á®ô\†DWÿ–ÿ¸Ã)£ S²Ðá{Í~EÃJïù¿»ä÷ÛoÄiOxoá"±ünÀ' è-w#i_‰…ʆ"n…þOÏpä¿IÉ[uÆX5yËâéK.¨´{FÞ=ˆ£¦fmƒiY0ëÖ´õôÅÄ>kUI;J…^Ï×îÐÕQå1­è‘G¸2]ý“/4´ÍsÉöÝÀvÊW¯ª°³@p<=ägŽåä›@²8|ÁaöæëÙžüýWbN.¦8³©G)¹öŸp žuæ\;.î‹§©p9‡sî=AWNAÃtAPøW:˜Y4žýsÎÊ¢üNñ„çà³ÛAÖw®¡ãˆÙä³+ÓX|ÔyNWét¬]>žœ™@ –¼þ•qÛã 9®ÒøáÜʦ¦H4/ PÞéÒªf¸™#’E9ÄúU^àð¥2i^kì<÷¥ÔýÖIØÈÑ&eÿ´Š]ÂgFª°ô¦Ô1͆m®-ºufß±8ð‹SIqó=ô/EH§>Àè@ °¯ ¤ß—/#|z¹ÍŸ)}ÎA»‚„á·”cXÙõŽIŸÂ†K:ƒO:T@Qåu†ˆÏœ°)ü›‰è Ä?›7E/¿€… u3ç8ráoàðäÒt(LØ_{kù‹.èbt¸ óMžy_Å×aZ0áó»Ý]0µK$Á¿÷4d ½Ü8(&Œëwn)?Å‘K•Ÿ7ÈDÚÚÃÅÝèì>çóç¼X²ŸÞÃÕþ qýæS8Ü@1Ñ1L„ùuTÝC?±[®äK“#Tš=¶¼Þ1ƒÃ®{Ž$ÚèoR~'à€ÙÀkžèËç2_Іs7rC ÖõÚsï„lM,NG[‚ÏgÅö­@è¾b'N#Š•:Ü ñº’8º+ÏPRôx–Vö­>ʼnƒÇ–^ÅLû’l™º8h6sÜpÜ„QŸg|¯þ’`UJd÷ËCÿA¿JN“Ç œØ%pä!j‰3(ìä(ÔŸ+©÷R€P{I¦›Á8´’æÏÔ{·Îžña3ìÁõ!ÓëûÿkÁþÔ~8¡QŠc¿ÙÃì¦qãí8®ÛÃþëȬ۽U}%  Æ¿¿×r'ni:–Y¿Ä*C}Ѫô:9Ø ¬y“o8qÜi‚ö\æIèá¹4v]*~ÞÈ‘XrõÍñR˜¸"qDÕ»‡ÝTø¿äævz!ækê¿îòÏ-.M{îÕ«7·ZÙq¼ ðÌûeHŠ?áïxÙÊÛ¬¦¥Dg€8“òœa€Œsóûô]õ‡â 7CN¨Iµ]ºˆÁ@öu JçÌÀ6¯ØÐiÒœ/k ÿ;¿XR¡ç”´…OÕÿÜæ©Eß3L¾ÉP©tí(ý† .7Í›äøIÀü¾­A :"ý.r±%‘:]þê+{¬<ôêÒôNUË 4MdŽ"R¹Z3&w0C9«È ÷ÀN åh±jeHC¹Û(ÕÞPY÷ÃB¶¸ [¥ü®<W=K˜.0ÂànÓ–l«vX¹¤xU™…D¤}c_DªÀ鑸ϧ é0¡îÅ«A0ù»(Ò?Cæß7Û?~»‰®A'ŒuÓ {qѶ«wf,*x7‚°•)~çäVT9ñl›7ÌÕ%×ΩájÆ—Ñ{­Ky­Ùò,kZíDÉpç8\k}0Ÿ¸ý~.Ç Ùñ¥8d陼 L~¿ëG¾ ¤v+ͳ¯U‘¢Ûµ‡Éø ®›xgÓ£'ïh[5 ¼ü#Û® Mv7(5éãx §Ð©ÁD•ùå7¶¹ýFÂOBßô-owvìëéP݆ó>Ǥ‹‚Eƒ=n«ÓáðG›«Ü+=Ø=úGS”ê2‘ÅF÷1ay›“-,ô*J ½’aHnÄ:“.g”pIâ—C^W»vÖèu¦ôÁª¹.騿/Ô9_î¯À}ÜÝë¡@ö²w”}¶ícSisŒÔD¦ø—nñc¿@\tØãóÛy»>MŒV‡¼½Çêèk(ø7äñh—žxZÑn¶v¨ß— üÂA¤Žz”½ÿl#ΟïHn~™M jœ×v+~U¦gÿÛ…ÐJH1ød‘ûÍ}BMDê]ÝaÂTâ# ýiÖŒ]“U/,î¿;üÿtìUÕ.(1@Êm©ƒv?~n×ùDØ“~#tå{—;#ôQÚЦ-⥈|;®ØCŸ†q›ÇÉ6˜YÞ7Ò¾W ;ùØx Ôp>v‡W—® /P9Âyu˜•ý÷žó#Ô÷_Ò.Ò|€ÝûžMÚ8aõ‹†Èј"R•šÇœoÂÜRа&×ý•«º W©½5EÒ·®·òµ“0B7òòa(Žè,ó>š(ƒ2rS¬Ië=˜j_Ðü¦ˆ:ôÿ}¾„Q¾f{k±±ôñÏcçƒanåMS2§0ÌüÜä³p„UÓÿÞOÁDGÌPœ‹^µqå„–SÊÏ×a8èÛà Ҷ÷”÷+·=Áž”¸Ò$eþðò5ü•zµë‚ª0dþ–LJ|í‡u&âlF0w6©È¨g„øúÞœR…њ⌫ÍHbß+ÅöÛS.}>J#ɸíu-“ÑcãÝŸ1:µP_ï}‰Óe;—ÙÕ&Ö`…^ìç¼ Ô9Þó;Å ËŸóBß°ãðdÂÞ'ß“€üHùK=ß8¢ÿqñG(Šïï$„”FC£b6#?´xßÑ®@‰G·Úo¶ÿýÿ=mªà ‹[§M]åe°Ž…@‘Ø'‡[T]~¨ŒmÑ´»d+·¹¹ø‚¦.cqYªH"r.…Ü:üù Ôø.|@] L#!tXG=«—Hú«¹É·òÿ…Uaª+'U2»ë®/?ôÅöUs>å×>0xmsß‹?ž¸¤ò>ÏeêåÝ ë [_•X¸õC/{Å4ÀBFFÿ0’3{:~ÛúÀÜ£´7ŒO’áU8kݸ… RÚvU&‰­a“F†‹]d; _ß/ù3BbzT|4”ó0»(u|æ×uø>u„kÉ> ñ×'qêÉ…Šáÿa:Ý£‹”W¡hä_‘ð(Ω²ÄäTœÁR‘Í œí´8ƒµ˜Ì}ä‚Å?œà|0é³ëîçéB¿ªöϯWç`œKzágº+~£–yjkÕ£fî5²>f¶¶öSS`!ö²Ì=b;´lRmÆÉB;{XÍI¹hí78ø«Ÿ w)ç¾4¿‡N’Q-/»fzDøÎ;aOÿŽ1›=e8Âè®ñä,»-ÿò,ŽÜúyKåÌ8yÒ$($"ù ·ÔEÿ°z¿”ÚŠå)Ö~8,ݯ³zO³™Hž{gÃK¥‹ƒ÷w‘ݰɠ*Ü¶É º#ãË’)XIaÜ g„Þ‘£AºgÑâ½–”[(Ñ´ÊùÀª-žŽ[Zª¡_—q ü.4‰A»ótˆö,•Ékðï8:ïÐľInó¾´Ùuwõ3Àä÷å&6lkQ¢úç•K¤r®x*üÚ˜^Ï«\ §½«&ºÈ;†3餃tqÌ#¨ärU8ó…¨jc„:HÚ½,;‹•·UÓ…lsNvkÓ[è»Ü•¿ÒÿŒW÷ØÃÏØTî…©——ï%Ù«@•y¼^´<©ç=™ò/4áÔ‰¯.EùH:cÈ?–fÚE¤Ä MGç¦: Ð’OÿÓ ÀH#‡[7) ç5ê»ú ÛžN›e(ÞÉ5¾˜_Ûž''F§Þ ¨ó𮍽Մ+§²Å›¿¤Á|ÃñŸ¾Ü¦8Ou)GÐÆ ûÊI>‹ïl±xö¶­^éc*×aÚ,‡ñ®óÓc«ÿw}u—K+ ÑZo0Þ7WÍØÁ¹N ÓƒVò¨t½½Yëãyª26±^I.]~å;”Ó˜bHå=¸ÅxA<K=ƒGÏm>ÂÓ'b>ûZ 7é¶xyW(6î\Àg]­~ÆQa¢ ™Ð( rÌ ²~‘ó°eü‡Kg]—² ‡¤ø}OþNO¨uVxOëmÇά].qÀ•v­ ñ·Ç‘¬œûS"@bºébæ`¾ûƒõnuI}òˆdò¦Ç ‰!F8W_Ù¶¼oI]m—*'ùa9Ü\ºú»lî¨ü H)qáE¹6\y3$Q$É áUüFUU¶ÐBó׮ɱI”þ‡nÐà\M·Î|N9J¯þçÅ =þ¡K>wÿÀ&™-å-ç+Ü´Ûï«‚ÿ2ïz>\\q|ÔÓ„$&ZÙo`éŽ÷Á×Ï`|£÷zó>wî°x}à4fg8e×kÀ¯‹2NK¸ñMÜNÜ) ˆJïý§…Ñ<ùºã^&8žwXsåûÖóÁõߘpRÜlä:c,‹©œTyõ†Wlx{(\H¢U2P{äºr /îA»Ó1á,éªLüýåM(¤‰ ,ðEä æ|ý:@Eä|uÖ=ù KxwÀç7~Ø&¸ñ'Ém*ú¼NíšÄŒC1™ÿªp!Iÿ÷Ü0'7ñò8 4-¿­¿x0û)½œ;o`êŽ ?5j`5uÓÈoT+Ý<âe`dàý»4j?XÊIÓ7ò À‚ÌMç±—|Jëȵ8–jÿxWq-N½Yð¥¾K]”¥ÅpSümLɳ¤ëÃ1;µWŸkZqêŸA*GP mxÍ&ôÁ\¡ÚèDèލPEcùŽ]ižÍv™ãÐôÐe­å¿Dêʱ·r LPþ¥õÒ ŒEýÙËÜ…÷©Í½þðÃ…©Þ<1W'¹dsÝ‚éÕùÛgÿÁ¦cŠ«O¢ê`”ºàxÞ¾hã;y6xÈ6>)IóÔÂ/©[ä 7˜ÀŸ°{ö°uÌÄdÉ¡ÉÏæ]NÀ>çü›/…e`þcGÒ^˜¿ÿ52º :%~«¿zãK zœÐ×RüªÕö¸kÿÆÖCž±¨‡ëgm‹8E«±3?K@DüŽ Š{²uBhW<ëF· ßðÖq6üCÕWþ³Í‡­¿2j7a͵q'hm¯ûµ­wF€dm°rç¶$˜ ²¨zÀ:“Äô!i{ØIþœ~xŠ„w 1àÆú@V‹ÆA¨åŒ¤ËûŸHL^ÅšóI½ŸÆ˜Mžÿ#<ÛsŸTx]¹xñ×ÿÊq´»xëgóNwÜüsHåq½è¶¶ lèøêÊîxE¤:¡¦é»²ˆýüY¿œéŸb‡dXìϰU˜—gšPÀ†ófBf¼8Ho*,DvÀ‚/ëgö¹öa…Ér*1'„@ó zñ”ÂLë’g­·×ÿV¯Pn4vüXnH)…ÜÇ Új`¢x¨fï d¸¿=„qþØøpõÔVò¹$SéâPKPÈ·oh^£-š2÷~{ñm ë#£EÏçë¨_J~ë}g‚'/Ø‹˜BÿLÕ»3¶0bí&‘¸ç Âp¤Í+[¨±£ç¼/;°™³áV•ßþVy\æô»mgg-Î7´Ûl{;aÓÌ3¶Þݧj«®·©¸ß(wÉþ,¹¿§ÙWBÄ\¶y^Ýúó÷X·WúÀÙìPXºÖrYd­6ßú†‰u>Á!7WÓ”Pq‘ýáÏtXP­þ›”ŒÑ ^nO_”`¿²Q/±þ+¶ܿڱ‡w|‹‰v¥…éáÆ$[…m¯V3:y¾Ë’ÀøÅ\çKT9wì÷?Ië®`b¼5†k;ên?«’‘›x÷Ùhç·3m Èä[˨ßÝ銩CrÕÔØ!jlýsú·Únsœ‘г¹Ì‹õv:3§ÀPöÜäD¬9¶EjÇ~̉€Qz!ŠÈŽÓ¸pä·xýT±W $º]€"rñÏíõ}àÆÓ7†eÐ]Äó%JæÎÜ6ü¸¤K¤Úõ’÷ Öc÷FŠÁìqKì?ÊQ|-Ɔ@ÃkÆvåŒ- 4´wM„ƒ‚¶ßkÈò'wù4øôŒY?{sæÕnÑpm¯ÀåÇÌmEø¡Tê(ÞÞÜ%ŸÆö˜5+­ý¡PG¼zÿ¨0–Úld©‹ËÅK©n°jåkþ˜Ç:_%z’„ó5Ô,*`1NÕЄ‡[_s,ƒ%i±/ŒûÁÙÓ?pŽÕoÃm!ì$ú1*hó-’“—ÂâG­ËxÈ8¶©ôþ¡”?¬ «·µa“AãXcNtÿxöq|iýz׈< &v‹wdãº[XØu~S˜îΦ™?E9['vÚû‡]L 5.Xà˳ûDiÎ*ÝWkRó†–l±Ù¹à߯àD)uæh/„íUœ†¬;;_´ CÊÙ]¥»M@ü¯Ì+Ár.͸S}¢ç3Ö 3°°ÃÑÖØˆÚ 4 d—ßõ³sn¸âðŸ²F–WÐ'Åk½CIAöOùâ¶×•Éå˜ hl®2ê€Í]’2é0M}[Ù5§ ûþcÔðZÇ1þ8:zÙ\¤J~ ŒË™6žÓÿ¾Àè†Ó¬ÐÑíñå#Õ¶½dÕö¥6÷Ùß —‡ùªšú_{Jp™ìâªõø<Ö|ñ¿ÃrR 7´¿ f!éâ{êÎ70ôWÖJ-˜lçpØúƒ ÷ëµùðȧÕúýÖÿbX!"y™#ÒË\zL—®.N@Ç…HÑ{—K]˜Õ Çþ¸?ôäS†¾sêé¯J£þŸ8JóaX¡¿`Úø\é>ì6¸¶¨ÊvÈÞboýKÊ¢¦?ãÃWØöDÆ£Ò΋°{½GınF’ÔZœèå±éýáÔX[õ{®•s{¶?÷!Ò˜a/,òN¦?}7D { Á$f›3ÍÖ?¢y`ÈÝÛ1É~‹¬šGDýh ŠSè©ô³8ó‰6ótn„î´ÜsgÆ–ò9è=B–Ndù•Q¬›-—¿c-«fÔCäh ɃÝ0Âøî“¤'ôEffž’ÄáIÑ/~æPô{ßý×qв>Øéc1ÅvÆwìûÅE¯æTUa¦{÷ìž'ãÐ00üñíVê¶'îÏZ‚žË´îÎPÐr"eI]«„®œû„#|J3¾e‡ ›  œŽÓ}" í/Õ§4`z£”™žž@}xsK¶úÌņ¶}=m†‘oƒåg9ϨvÄšùXÐI73ÆóÛ ù÷ŠRH²#©8ÿd†±É ví†$ßè·:câ”_ß<þDÊÃä?io{øÛµ³ƒì $r#‡‚! ®ïÎȤݻ‹­#u/Wf±%ð±`BùÑmÎw¸K¸½®uQå­Ö8OÏ2 +³Ê’J ¤n,_ s*z” »–¯b8PoüZµl7Ušý=`ðjm¾ zbϱ¯?ϱ`´NZõݘçô¿çô ¦–T­OrÈà¨þƒŒïGý¡ëóíCg„’-úcÚÕ4÷¾,>yÚ3—f[¤l¼`öP9¯ÉwL?+*å‘ã‹ò#±–¶ì0¾BÿôTLg0ÌnÀ©é-¿†EXÈdøp¸ :ϾLÆÚùȇêM16\¹ƒ×¯Gè3ù4ó¤cAŸ¢¾Èµ]8s6'}Òš ÓmÔŽ_i Ä™S=Çì`ƒ³Bb«u(xËNçƒæïwÞgqµÀÀšŠžâft¾’>Ú<õ§|“‡¯º|…¡—ƒ–ŒtØ!ôáÈÛáüB°–ÏTŸÂ‘ëœ|†‰0êØTó‹xT/ò¥D@‚}¤ãõþ\{~Q˜—?šè̤%P—…ýú¦à‰$?‘Ö£¸hÊïÿ€GÇB\²]ý#Í…av+ñ[Ò¥÷ƒ­¸T/"k33Ãê%w3—¡náÌ¡:_k2šà冦ý“Z éPwÒ&epNSǪǪ m̰‘ê¥24»‘w“Ò†…?úÕu+wq¦‚è:ÆÈŒå¼3U×ÿh©ìöª?¼ÿʆºLÞ‹ÙÃL`öOÇ'† »-0@Wøñé~¸ÚÕ%šHmg¼v‡Ë殦1Þûu£rUîÇêÊàX“šp%/4–7v}ÇæÜÇ/݃|›üÖö_IÐ7ýÌ(Õb(ªÇnîƒiÙÛ6{Narøh½Ì;û¸ç…Ìž.˜»§ìê0b@ uÈÍ=k¯9"qÝkj¸”m=¢³‹á4¿ZUB`RœÎ¸’ ô7=PØñRþÝÔN´4Ù>NhÏÔñœŸ¢™äý‰ ²/¨sXéaúHÐC#Ì\“ÕÔ=1‰5ŽÂ©fÞm°rغ•q5WîañµŒ[¿9×bÀƒ)Mk©Ç, ££•—÷l$¬SÚò}¶óù[Nª;ËvÉ„^gÊ#Î;HÜ‹ž¼33Ä-J|wî q„º×m”7— ;mv3<¿¤ÓI+Ë0óî![—¡,v’ïlÅèa‰æE©nK LMöÿ½,ö柿?l?9~P ð¡ð“€ÛŸBží ÝÎok gpvÏ$v©£8U#FZk¬†éâqÍg)¿a®?+j¢ŠKÎr]ºÎ¹Å§?D^Ù@²Ò»‘ƺ^ ŒÑmPp)úíAÍÿ=ǧ¡dÍÒ7 ¼ÿ®çŸÿÉK ÝóÑæ¸õ:”ꈽÏwZ€ùæÒš°Ã8Du0¡çö^j³? öa‰Y…çqAÒ çÌñz¹äîþÅÒ^ñÿñ†EÛ¦Þ7ÿl±ÿ?§Ìo=ºn´²‡ò´åæÿ‡ãS5þñáèô׃J&̘¦óàkØÙ´;«prc]E¯  †-ïŒoèÀ:óÌa‡Ã0Ùºzò+t{»K«’™q<í“Õs጖’îî:Å¥þ‰5_wEå_|@QH*ׄ5ŠÁÃ÷«0xŒÇç±ñ?lõÿxâÛý$œÑqï¬bÇÖÕë7n øß}»â\‹aRü²Š{*L[(Ê >Å–†±E8lSÍcCÑ9Ç>áò(÷Q1qèÖ9µ~OÖ›^i¦>…­Çò…§"5qq­teD>.-«3ÿÇ\C `|”k, í;Ö¶LÏ!Ì,P}{1æÙgn2ó³cMy˜2Ea;ìü:ßgA 2¯XѺ Fv uØãÛ¸‚´n¡øÔ{ÅÍ™ˆ+ïUêÑÖãÏß,ÃþD¹9Î7Ø’oêúá‹c¾¢²“Ïv@êí¨Ef(!õ^öÀœ×ÛœByPˬ÷ ñ.¨¿xϯ º?÷´»Æ½O êw±ÂÜýÀ_Žf×ðo¹±Û)¥÷PH#Ôjé· Öm'$gvZâↅ{îÃ>ã\Ö-àÄ9½*¹†çµFQX<«&” 9ÄŽØAÓùŽGg¶½hÜKm*^j†¼6Œ[…ò dy—)sN´Ã$½§8™9î—pDKß>_¹‹Ë !ÞI8a£ßýç=.y0–»{yá¸èEÓïDpý‚¤óhü ¬åRŠ]ð%R}Ÿ )ÜCnBÖa½Õ@ÙH`z¨“íïÙù<ÕÖKå¬}þé­Ü¤ŽG†fV¿Ý_Ï@Û»çŸ'¼¢ßZ¢D»-¼ÎÚ¿iâ…[É å3ïßÀôœ™Êl$;O³¿ß$‡a!7§·ð “]Hlô0º8)\ƒ« gW ?b»s‡ß“çQ™Þü¤z˜‚sDC_’Çqë•60µüüÔW†h•TÀq®~ LøíÀào‹ÕTËO¬ `Ãô֌ɄnØÖ¢ÉÕç¿0Y­o¯›¶€¯Ü Xñë…ór7’Ÿ¾x¦®t üF­gêhKÄÆã“0&ºZ#¬:*Ôƒî2ðÝÒ¡„® ÙS0ßø<ǵÂug<.ý‚\ú+'ïcý»Œc2‘ÂÌåoÔåKÿ^í—ƒ1ñ&ýVqXÐ/·œçÆþ©;<½Ùqpžv_‹ˆì¡ßt­Ã²©¸sæ|®8 örã˜ø?œ¬ßa—kq»”%nÓžð!P‹¾|ôßÅ30ef-y[ ãzܸ”]Ïà܉þ;—oRAá±þ3׬¡¾ô”“ÂÓ`œ ÈnàÎOÃY¾œ³]÷ Ëeå¥ÍõÅm¾j”¹®òN_k5ÞËŠ…Æ£ô¿Z  çéÐÝ6"ýÅã0+@öIi:zUX b¾ž q Ã_è™ÉKB³™üúñ ñ_2>âà­Z¿Ì{X› ±ãЗ I˜?¿ëøg Xý Õ3ãü–w_ˆ7ro²ŸÏ@kÒ&•ŽMö½ÅoiÿûÃrOD.áO•g•-V}Ð÷-µÒÐGn9XåÉÿØ~X&³5â ‡¾s„Ý#ؼf—$+Ï€«‡·L|KÔ°í’ÃG ½í¾]ú’ë˜úöë"ÔQ±ê8¼ËzÆ}Š­ÌއNyþ ÒòúŸcrùex8¢Ò%(ÇEièâ~cÛÞéxýí\™Óqf#˜`Òƒò¿÷Lp|Ki‘ƒñÎèSÏ¿0Û„ùÔÊÄWK†0V|Úòs,‹“ÙN82á¼vlDíf .•ü(¼½Ó»_?6ìu£ƒ†­kl…–86c}öóN\0;óÈHÜ)VëÚbi@fŒÞ×a YŸ¥By`+¿óJDõ*ô_I;@Ýì³ x϶¼ Í×Û×+(ÚÃä²}AO ‡sÃ.(xg¦KTÇ1oí¡÷qªçþÀáùìÚ<˜kÁµäG'¢›Þ$@¯åâ‡ì°?tHÛØ W^Íм¼x'uµULK`i÷Ÿ\‡›XÀú÷Ïïk@šó—õÁ6zßžýrÙB•˜Í—Û«a5ï€ ,xµª+´–áh ¾Ÿ,Ñ8I·Vp–eÛó έÚ}è‚*Ž9£ýNš0/­ŸE~€1-*­Nà¬çGY¿˜Xðžàf¥^…ó&²”uµ¯Ûób–Åq™Fo¹¥Ì$*aü0yóå:‘ÆB"Ñ&É<³üž7“؈-b÷‡*ŽÝ†cåe—µ…`…êзçnq¸Ìr»†[v6l׉AóÜàzÛzsWQ}óg‹‚[aV§ÿ)sUTî.¿%Û\ƒÊ)ì§#†ˆÔVÎw—±Þ›u稽 Î;/í¸T õU}+Ô)J8ã8‘æóÛG8¥gëÿ¯™ûŠjâû@Š4i**ˆ6ºŠÄŠD,(¢ÒDD»‚"JQDÅÒ‘²CïHï%¡wRèõò¿ëþ¾÷²0YkæsÖLfåLfÎÙ{¿œø¤,­uVÊcÿÊŠÍ›VäÃxÌ‹Èæi*ŽçjíxNš‰3)“ÔC¾u±\ûÂzéTìl—v>ô}šï¸=®Ð…æ3óíJÌ£ (#áÀE*V· ÊFyã÷—Ƕ¼Õ—%rÜ•aãv†!ÏÎJ2X¬î×/öè$Ðóî=$ìWúÂJ±Êe3ï‹Øƒ-ÂH Ìúèúò–ŽÝ:ÕÎ+Šæ2><‰Ë Y³ŒËh´ 7,Òëpk´ôä¹!Ù1èü¹Û›ÈÝW—é×aøËš;a[VCëÜ­\+¨÷ÓØ¢³)ÞT¿9Àž'÷Ýù¾_%ÆŽºzaÛMöÇÈPž¸nz‹ù%l³ “òéz½¶È“{°k|E`ðh¹r,Z§hõLÜTºÔÓ 5Ôúê{×A‹Û£KyS°0þƒï´)4L°ë’ÓMr°_çlÝfì =YõÆä& E+q< KÅý'žg´AK‚ùï„m3ó·ø]ëxèŽc/+ ƒRÃv/j9‘&vþÓNÛ^H¸áM623Á¼ÎÏ_ô¶ï€×K-§u©.0°µŽóÃ=H7^ò†ªƒ7~?ý¥~ öT” ZÌÌ -¥­Ö»¹ÀòâÊö½é¬,,^MÐ…|kËO—óµ ­;øváfu('F,H | wsÄùÿ ÏE‘§å—¬ÓW¾e‘£'ã.ÁT27Õ:~7©âj—cþ²¥kBóXʧÅG±ÙDÙo;rAÄÒE‰ºº$Ì}¡-÷ø)L#»ï’…i/“OFI„++Æd»´ ‘c£î6NÎ-8(tæY'ÇNìæ7^²@d ‡÷ñ}Ëë›êc*@í!K±ˆ="P–IÞ{³`´%ËÝ~½ê8§n¤fÂH„˧ÒKN8Ü—][ï”Ú§fÛ]ý Eœ[ße³Œº~ªÄ9{6oݤo€Ê¨zá)´ƒ^úfáÃØ*›W¼™„ô—Fíl1Íçût°W2©xYa±d46ØÉÖÇjA„æZ®«×.!UÓNn±m*ôRNéX¨DCëÂú+£&û°z¯åwöm^@Oªê50«ÀéØÃD"8°9uE'ÿ£™ç›¡U'mçiB”Щ©H‘Þ§¹Sª“ÚkÜï-ÛŽÿðö‰ÃàËzkûz ~=õüeRwU›ð Nc‹Æ‚('å‘dû27òVZen×Õ „IYão‹¨rÝd±Bzï¼'|©8±T›L– ‚êåÏ[ÛÇvßî–J¤,„?Ïôâˇ÷öSûƒ|1rêå×Å&¢DÂñëŽöY0ý£&üê", ×” ÷­ÃŽCgÏËC»ÏàVáƒ8IÊð×SÆV CÏ1¤ ü J‡Ègfæ9Û’”Fœô•j|Õj„5²—6¯Ãá2·þ%¡Dì’ó.ʇŒE‹×qha›A¯“ü:4=]Û=ï FGM/ñYFBí’±ñ&þhã¾g`/ Ý=£¼úgkfâ”äè³2ÌwŽ)ú0“¿4l(Vvy@ö¦…{Þ&AÍ»6Ñg°µEB¼©t3 ?çf‹áó¢¾×ïUÀûHÅ6[?ø#ÿ€?¸ì¶EFü. œŸ}U·Îª©A7WY{Ê£c0 &uk ø”ñ\àIÀ!™÷ç„Î~º°Ñ•³¼kÀ»ù¥šaŸ T’¸Ëë©ñ˜òbwÒJ&ôŽ;G9c^nÖ=Ô#˜éÍ·@æÜcl'«8¼=^ˆU‹L„@åñ5²|÷z±kŒxX¿$NB jåªFïù‹ã´Æú¾&ˆÒŒ^§å@ÃÞ%NÑ$»1ŒÚyS*~”…¨ÕÔŒ¶`›ƒGBœÃa¬pãñí/[-[>=ª²‡Éô„$ h\n›Q‹‘Ír¹¬ríÂ0m5Œô«þ3þ:/“ü«Û1ZgÍÖ;GÀ€ôi†8ïL«´Í2™{GzÓï>ÐÅÁÈÛ‹+ž!¹’§¯Ø÷#6¨Ú°‹ÒoâÔVîÌxè3_¥üXiêÏRJ³ rÌ Ýâò<¨ÍÖ5Ù$g†E2šÍ²8ºoÑñïm0Ø­èÞ(õn&Ÿ½c¸6üóÌ<ûáÒ»usò±Æú+Û&joÄÛ¾ ö7Þ9¨vÚ%/õÛˆÔ…êÒÇ8µ±`û+ÎÕ628òµÄgÓ‚h$3´Gccfþ?Å嵿W}aèàþP³•[¡¼.ùNä/.¤Ñ8…2müp¤ùôû’~; O }aWˆ•ê/>ª¿{†‡Ï©Ž.Ú‰½›û‹&ï‡AÇÃÆq»´€|uaÈã߀3ï6©¶GJÄo_í༆ÕBò¡d7ìXsÄ;é£- -ÿ¾Òhá)* …½|‹ŒÃV; fú/á¯8S¿xY¥»:õðµãU»a$ˆc‹¦ü^Ho\è Ê8“ÇEœ”)6Þ•9ólH܇ÖYŦšCë§6Í_NâØ’Ûf-ƒîPì@/yüf5TXW/ŽOƒAƒåO¿KkÌœŸUh4?}ÖsJ|KFòÉëž'Ù·@ßU!ë$ ˆXuû{èñt(H>rׯº×u)Û»}ÆØ#UËŒ§ÏAQŒÝ–§#0,`ic'Ebksðyw ¨"·E»{¶BYÉã1J LFÈR³[ ÔÓr…û÷chzÛÃ-%ÁXí?ò^í{ Žw§_s²„f£3ÓÔýqp‘9‡ÂïØO YÏ#oßÛy¬T‚F6Ó§E€q„ÿpø÷lè2Ó0ÿ?¬7}5Ü Š•·Õ¤…s"€nÚÑY¡ihù*Ä úÆòŠÞ‘AšïÜÃw¥¡}O}î¯Hì¼– FUÙÎå4K[|Ááù.1”0¤z}<ŸàéÅÙázKêV"YA‰"–Ü‚Cäöºíå8üºIp©¤ ÖÝåÞUäˆå’ƒn› CáüVCNkQ˜Úzê–§‡9Ò’tF?‰žÄ¦FaÉEoq &èǘß# ¿µpÙí-6ï/†ÇùcM(×<ˉH¹R úqüúµ/*z£ôjY¡|›z.˜|b˵I´K”ÔñTí€ÄÞßuw©²ëì/zЕæ<ˆMÔoóJó!ÇЫq,µ‹ìvZF˜AǼ'²üK{púÜ€à‹³±gAïÊ´>,Þ¹²£.•ŠÁGçEò@Ô˜™ëµDüX'\³Æ9¿™»N€×âPNÒ‚ñ-þnÅolwnà2††¾å7O3ÂqÂ0¯¡R/(™™…ý«¼AÿË<¬xlÀ¾-; Rœ^ ¯ #Õ×tip”2”o3"B~ïrÿUoò ÝùJ’çe˜T=½HÌu58°q-]aN€®7w)¾u$œÜ¥;IÎþ ä¸Óú釹‘qäÞð2žlÝr2_´ØÈk’6µ\Ã6˺¶ývè?è›ë]±rš×áýê™ø~ñm{m2~¿¤b<Ú&=N‡=»ƒ‘bº~Ëý…Póî¨W¼ÏoL¼øûAòIE,kÚÿþ=N™×Y‰LÁ裋óÎtÄuPÊ(Ù€þEKE©øéLü^Qu¥¾ 2®–½à8 ÍvMŸÞxBšHw¼DÙGlyz]ÞÔâÐå ƒ\ÅVÀ°­·×jK`,N®lÁöo²…clØê#\•¨.Š6·úÚ¯ÙÃ`kxUI;àPjü<}Áýà™êpˆº‹ "ÅI;ª†±¿¦ÜÅ\é4£Ø„…ÅnÐ÷WžçNÁ ‘Ý\î’Ø{ÏÞïbë ø)úAÁ0!û¥..ÉéTÂÔ¦4ñÚjœà›çy¹í> é߯:¼ñ:év7m?ò›Ý†l¤­‚¡Müw½»‡9žØ¾âÏo,|ƒcëO³iÇÛã}ÇCáÆE0.HßzÛÿ ´éž/Pˈ›ðä¥Q¿€>/£Úø1hí¹l”__‹á7€Â¡ÊX»¸"l§Þ:Z#Î MI£Œ¬ƒk±œãÕ]õã8y2…€·îÙ-Dû¹ï'{Šòþ‚žç//ÀÚÞ±•â|Ø&IýXòü´Up¯õÈ<ƒ5çÒ(»CÆ;¸¤†·§2û"½ã±Þ‘(~ÕK ½œÕG`RÌÿÏZ‰l¨7+c‚õîï'‘±É:Q~Ó ‹ö,0-Ä–ÛÁzUìZ˜mS]¯>ô‡.dÈCw™QöÍÇìH"l—††;OÙ‚¿§@Åù¸ò¶{a@¶ã¢d“Ø*­ GÎË’¸ÎÛh}8´ÚFíÕV¯€N³“Yé§HìKSýb¤c±ÔwÂþÞoh¯Þ5¯âg:DÜÒnê/êžwÛ¥]¡¯^ô¨ôeAã8õd, Sdµã+ø1÷º±uT»JŸ=XóØŒÈQõçkåàM˜ÉT²oǺØNó½0¢òøvÜm$6kqÑ}´`ÀçµUãØnÈÖüU½±^µò/3qL+{l¹ÙÈ"¸l9Hþ4Û­Îñ¹8Þg`^î~ŠõÖ9o›GGºža£ÑqÅ™8<ÅX¯J‘k½q4ý(ÏÝÏØ8œ'Þ5y+Ç<…óy¤ 7TW4«®߯ɴ?b¡¹sÝŠEþ1õÕ pÒ†iŽÉ7S÷gžÃ<¹}{°PRí¹Óg¤M¥ú&ïÅnS‹M8”©­q±uæH?–.ѧÒcú¿¥®eêâô²û‹yLG‰œ•§M®Ÿ‚¥àÕ­ŠƒØ½Í>pÓu6,¡ç˜Z¥i9eœ“EpÈ &ë¶U"Òý|>U<0†¢³Ü~q…°R²ÿ¹ô‚JèÛ2ßÂxŸ'~¥ @í#¯¿$4cŸâ€C¤êU-ò}ì);—×>òï6ï•Ú>I©ðÃòGÏw{ó­Ôp]Sœ”ô¼3¿ š½b?¦´áÀÝ|ûµRºX°yýö…Ðë«ô0°}ûâ}'Šé.8úAÇÝ2¯Ç7÷™<¬ÂñÕ|¿IArõ¢URKbïZSõ Íϰ {ôj\¯$Ð:>º¸û€d!ãkáûâ 'ü»õ“c`Úòå|ëëüЬ4vÈ7!›ó5.:ùAË§Ž Á¿„ñ»ö@ØT¬v_8“«C5FöwápQElúô<5–ï°‹LÍ/6zUÚ(c³MªN¹4JßÓP¢Tc$´r=4Ä¿ZzLFÚ×ñ8^Æ??\ T¯í…’K'Ú æaýš ¹ÌC¯»—aÆ7M¤½Yâo6X¤ãNö·HÙ½™]cô;u×}v&`>§Èº_ÛRð*ßÿâb"4¼ó»0ô`!P>®x/£w7ˆ»WσzòWëLŒú ïV…^ÁºG]¯“wÂ{v[ÍàzdĽpÈùÊ ”}yçnœ£BÛ¡¨¤ª8½}¸óº­.©0{梎äQ§…c‚Iê?«f@ÕcÛ”ù·ðKcÊÛãvHËs 2í€n¯1Þ›!_pTôÛîó ªÐý-Çxíót7xí7 ï¿Uì/‚Ž#ÅÙÑ®Z@½ç^Õñã3Ðv­jtRÄ6ÞÃßJ®{%ZwOÀ ¤¿;½¾„éDÇ%ŽQ';9Î~ëq¶"ö–úž ï£À×Åõ¶²…Ðz*>óìMGLi6¹$°kÔF¤X«šÝÅìŸÑöÛoFᨭ ð e_Lë·Ò—% -ûäfT¾@É5¥Îпôë[º6KXZQ6/ÚÆ•ç;oøýB/b|*R-w”-½Ô¸ëNÜ ePuñõÑZé,dHïW« ìùxÔÛÀ³[ÓMÕø5¾a)ßõÞíDvÛ@j>Wˆ°ýåŸyHÆM~ÚêÌ¿ …• 7•–8Éã·&Á`Â(%É%Š&×œí»šu2.‹ŠÃدüŠ(гE\æpëÈÊL”ý<¤ 5ßR†‰óª}z·ÅTiÃÍßÇȧ°omþ…ð]D¨ü¾™º(¯ÒòŽ<–0VÁŠëG.AY ß=·))¨å´Ó<ª‚¸íW­}O2=&í—2œ*¿yr2±:v~Ì6ô¶ÄÂá’ù–®·‘þá”Ìi‹Tètè Éj3"q\‘fp…@žéÍiµäœ.—ÙïuÛ( YÖ §wc*çs*ÔWJ®xç-ä{õ¿R• BåóVWüB$L– ç^@êÓï[î{CÞ¾ˆ3¿mÀoªeØ›6õh šx ¦XmÁ®ÏÖÝO×>ƒ„- ½[—nÛì&…`HÄ2å’òÂ×!н`Åžðù%@>ct Iy;ö—ÜK~BœÀ±çí\’Å ÑGô—û&?µÛ½.r÷~,ëÓ³Ž<ó‡ùRnÒy~b'»ãmµKPcr_lU0Ž«+·&߯úÞ\ćIØ<Ryòon¬:ÝŒ Æ‚Ýçk#]"~EbáRÍ"Deª Ýú´ÁÈ«oxÅÒÀê´;>)¶©òÇ"Åœ«×Rý§ \DFÏ ãÅî¥CGÙ`l×íoU è*§sëp&¶žûYbòõNPG]‰Œ|yUJj#”ŸZÖ&¢…ƒWÙ÷^Ü»èRïø×/ñÄgû“üe™‘8ž,Ûú9¬•­bÒB£}÷ÝgêÛK![®óW@Ô3ÁUNù^дùÞ¹ÅVÂÐrö˪Ñà“@9ÞµšÍÄH¸Q-W|‡SÞÜs¶$CºeïS‹ƒ¶Phâ±§ûF6”>i¿!qHèÛ7¯–wÆz>ª›‡1îœÉü:ØÑÁ%³#ã 6íã=ñTZ¬ Ü^눔Æ#ŒšKÜXõ¥wÏÁÐ)Lä}Xœ‹ðv1Bô`´L†¢úpôm¾xh-ß+è¬è8ט÷ÚËÚß½ ” ­e:z@£R¸Ã„CÉÅc’{÷Ó°`­úb®¾D ^[GtÜ}ó 6ž8÷f 6lI«Ë€n1óE×Côï;>Ë©@O‘|ŭ逕bÆ>…V™HÍKö;]š#œBßÍß#­Mz*ãØw~û…´Ço>\©J&²¨µ‚˜â›q&´±KÍ"þ£äyH_:ÿN…*NýÈ‚ù¯è@å]`£¶óT{xõº¹È¢O͈J¼•DP·‰Ö{¥;VAY~¢—ε˜.õmÊèæ¨¸y,~Š9Ÿ²4“¾†‰ËvnnÇœËqZZ8±êöÞ$çùØœÿT·jPºÄ6HÄR |þ"^­ÚËȸÀHz(u ú‚MÄÃÓ.¢‹·kÛóëUØ·9úP6ù<‰ínhLÚ» Õit*¤¡â`HÉ[i$Û} jêkbŸEHp£¤)2ê}¦Ïê¡·©òÙ¡äT ÑÞæáúòeÜ>ßö(„/)Šª™^ß‘Vg“°¦—zŽþ.ç)¬ÀšÌݼŒ"läߪ²ËCÃù5R5!má­®£ÃìÐêá'hÅ…C +2uiPóˆKìÒqD{>+TTÆ}ÇðÉðMgôx£ªóè >T¿}&ìQô³ÞâŒÔfRÛ©ÅîØXÖnÇÁe€Ã Ê5NWàŸ°xξaŒo¼§sä /q>7úóŠÄöö™À;Ë÷XÛGó9ûZ:×TV܆ž·bÇì%0]ZäçMÁëØzüDêVm µ­{os\šD(¸žµ¡{&o^°Jt‡Û5MJõèÅ¢[;—8lù†=K|ÿù lÙtíÙÕõÐ×rmÚ_r¦Ž04\fT€C&ÅúåY0­ôŠ_V0)uâ}‡uư÷›ØÛÌã¤|ñÖ¶0†˜#*ä#½ 3¢wi‹ÇÚx¹ƒ›{ýØÚNë=DÖÝá^ õÙऌݷä™ú-%P=cíò"hñßÖê×ÿ’îØBŒ‡bJIîèC$í9‘Í‘‚µ‚÷]EžDªûC߀©l¬8WÑåî ©Ú®{‘†íláÍK Y³Hkâ]PLDZÂ}Æ Žz]‚Æ/ÂßxUtÃ¥:TòÇT:5”b—®ñŒÞ ƒ=¸Jò@ÑXWS˜'¶Þè7^*b w\cé6>Ð÷Óý†PfVùi)<”Θ‰K]¶™ü(éç±Ï+ç]ÀŽ/»Õ; $±‰êpxCÝ juX^nµéÁ=jMZÕÀPð«UiIÁöÐÉþozsÿUœÙ̼šçðë!Œ\Ì&wÈ`çB‰w»rͰkÉfWïGXqgã=î›…@›¸¡¼eü;$Œ€§ 4©zœ8 ¼ʵ ¾Ë+7Ù×[Ø]$"y3‰÷êú­0‘! ¶â¼,6Kn|-ü&˽žO¿»% y£ãEŸ¡1êl½ÙšÏÓu$Æj¤œÈß,y§ðR>v½ßk¼)æ<ô§J™Ê[a»þÕ–Ü6oÎèîZ¸ØþœáË œÉ+S&Šh@ù–¾*ÙÓ,?>I+ðÆæÝM×·ê bóçË–‘ÀA¿Zcw3Sd3:äуv- qP}ÿOëJh}w® êºö¤gÞò ÊWM’eCÒ^]ð›~–4¦nèMˇb¥ap÷](±Ðßù’t3Žß »¢pbñ<þÑ)èS·ÒºáÖíµù§ ÍÜ7ù¥[ës¨ð–1ò$qŠG¨æñáЪ Þ«†?pà’ÈÒ•h¨‹Z«»¬ òÓ#-pÔ›ÝÄüÝ4ôXåõ4L G,Ñ|Š8üé88}™_(ÏÙzŸÇ‘ÖƒΊ7`ªƒwæÖpð ÎÏ^¡iõÖ”$Å>özž·þT´žSqÜ %Î¥m1ój±¥¯‡*â·°#}=‡Ü'­cØ—Ú=$_»{/ïO=A<7È{!€lÞ8úlñh}}9Ħ.)Šíñb)=`Ô·ÖÒæp}ðjTï÷O킜¦5ÔÓ/¬ ÐfÒO‡‰Ÿ±ÄŒHÌ,:سkwø˜;¦nÅž}$q’Ñ^èì¿RÜTÙ‹ÝÊæŒ£.é8å8ÿž‰‡®¯ì¾ã¥ŒŒwªû¤¾µCº£¼Ú™÷÷`Ò×YO›¯èœÐ­Ïƒ²ƒ×{Þ‹¡ç’ßI¢¡áг<ès½Ù^¨Úñ~‹âVchûY;šb}‡VüþȾ¿È~¦Ü!ûá…YÒ¯¡Kl‡ÞåÌýغVü»Ë{9è{\ìÌÓ¼¦—^2 Jn€æh‡û[¦ƒ`Jĸåô¯6hJ7ËÐ^ ƒÖD©rbåB[áRîFh°`O#V„n»•ÍÚõË =e£yòj ð°?Ö¶ãÓg]wl›©oàø‰»–Pk³'ÿE4©íç¹Ä'Ž”À—›¶|E±š’_UÃów›Žþ:ï5:eÝ1¾ó`è[{ æîc8üh‘Ú¯H=ºÛÜÄ#âÌ<í´÷@¡ôçG|Ðz…¾v§c6ÝÙªÆ=y;‡®½¨9~špQÞ¹õc8´<¢D9ÀiùQ½â^ –”^r+» )H«cï¶}ÉSëáOÍKì¢0¨Výk¹\Òq¸;Á,Hy¹æãø2HóÔV-Hº¯ûÞ݃Ôw²•5Wû¹G[âv*©F†Ï‚OMÇ¡@¬‘öY©2’‹}wœÃA’ˆ$eÜhIAòôŽ#0QöýÁ«[X^]ûJ=GƾðLå,1À¢Ÿ_%¬1 cÝsÙ°Ý©D.>ÙšØãóaŒm¡nN÷Nèýx¶ m¿‘p³+øÒ¡"|ÐqͿɜCóé­³:ê ¹Ädh)ˆ‘} igL¯ŸœІ8ûµÍ» 7ÌïÉay¤ìÛ˜3ŽEnÄS«j8I\ËoÞ~Éý zýª/Ý„n9öDs)Eh™g5r¶Õ“h«M~ÁŒ =;«‚)hsØUgâ.fÎÿº²x¡ —¶k„BÛ¦Òë\^£§æ5òNìm”ÎJeû EO÷(ÈIžƒ¢·NDî”Ç®èJ¼xt ºx$Ĥ͵¡4+:ï·æwU«öáñhÄ|5ÞSµï œSwHñÌŒ£€‚Iãd G59H>5†ç †{3uŽØÚ³¾8ô]NQ· SHß&f(³¢‹l–‘ȵ@¶N¨®Î·‚©à¸Ú v@¹u“wQô¸m:«l‰uD{oŽÀ]ª¦†]´\áy3étºb×Ll›¶ƒá~·8 a°®÷MK·ô .ݸ|/¤,‰,[zëæ;ÿ®?4‚äÃ=Îj“Tìôq†ßé²Î;BEþr‹i‘)èÉç&Q ÃaØËl¥f3äÐýÆöºÆAëÃÍ6Náw¡¯¾'Íà|öfìï.Xa­]è½Üh§ï*×)ýÂÎL‰v>,O^m{… BâM¤ÿ\Óƒ¶iÁ²e…²w”ûæ©—"-úé†z~öå9õóí¨"ZÆ(vYHÖôзñÿg¥nù˜úFÄÐVkGbË ¡~À®ÛÜ_ ÷þú¥mŽÃ‡Ñöó¦(èQX¡¶X? ‹_þ>0цíßôínVƒjÆ~Fóѯ8\3²iú÷OHh”;.;FäØ[è9õí;Œ_éãNÍ»'•¥Ž«Íä­µ·Rñ4‘Ý£ô…Ži0CúV;ïÀRk¹·ÝUS8X˜~¼1ãvœÿÍ›áÇ Tµ×>ïu[gÆWƱMd"»SËÆŽ{üÐ9ÿDf§”Rª âÏ«Ò1T“í9¢]é]ñöÓ …ðÈyùq)¬}Fø¸D‚ÁíUK›±Ðcqßó‡Ø=Ö±îMå¶!õò`ƒyiÐàS¼ö£_ò±þØ&…=3ê£W§rÙÁøŽÜ€‹Ú±"k]è§<"&\ó“ëK„ÎúsS°æ­µ ×>þ²£º\^¨wN´8¢C¶•2"ë|0Jöq_Éo¤›ZX9ÝTgí–i[¤_ê[ïH±Äö´ªC4¯ÕH9šMPï$r|’ŒH§µàpõ>OµwÎ8Vßɦ¶Ù é?)!òCH/ìn–Ó­¶ÈÝù»öªB5ß"ç#_°^áÂÅ„ÍêØ°ÕRX5TzJè{ páàêû/Ï”„Ž×µü!¥jýùP![ ´ú¸áͤð½ö|úü;V{cÅ„ñEµ¿SËø°–ä}öqa”Š ­sk24i:v…¼\ƒe×üÄC¯>ÇŠ+ê„TѽåJNÝé¦&‹6&õSý{ÉÐUšãzXh ´O±—øıÝ<.Úx¾´¡;dEêЮޯq÷¡ŸrQé—½'v®¯Õ4E™V9Ç>m2ýÎ[(bÁÒ¥}ûþ/]N>nª<¡Ü̇ÙbДT²p_©D{ÿf—‘—ÆÚܵ%©K-‰»=Þ?ÔƒbíÈñuÇ=‘&n,5f€´mY ¯Ô!;¹è#Ðb¸:–Ãr· ›ãìÐ|>¦÷Z×U ª“îæòÌäõËT—Õ×í!rHôÿ™ôÏ[ãé¹öTÈ?¬0å½ëôðþ>dóŒCw'¤5öBña 9C¨Uš÷(ÖS'–ˆVeäÜTü° ÊZ5_“Ëø§Ž·V^vÆe2n¾\¶ÆÒÚQÞBQF—hæ2 –¾,‰‘‘‡‰†Zi룱*ªœvÍÌ­ÕõI'ú¸Îg»éZÍ^Æî÷åiü¹üÉgSPÿTíÀÎóð{åÕo畃G­ô™ô͆8¼óþöÑ•—±SqÜùxØ*ìîéb¬X¾;#­Fž©ôbgrpÿ‚Iôá·Jì,üƒ¥/®þê~[¯«=»N(¢ÀH‰LŒGG†Î;Û4ÁЪƒüËa‚gunÜÛVLlx4­9 57.jž1AògÃâ¶„œ³Êjj;i}Ý l:SÕ63~)‰BT9¿HNô0¬ý¹˜È¹=ïÄÒ ¬iiKp†–'vBn¼'›t²m Ç] ƶS ÈXÕ5;Èèî ~çC5ö‰-ÝÎùœH°kÕ²I”ÆÑ÷‘Ê*·aGàý<þ!G~và —ÐL7¶éj¨6 ºù´³ƒÎfûe¿nÆ #[ÕåPøãÈjÓGV0*y7èÙ hé&4-oïT7ŸaÇR a!í$¯wýÒLuêÄ%Iaè ´ _S´%„½Ž8A幕¢c?â¡uÙ…³qñö8 ªl½˜FbÓ·OAY÷î2.h»È¢{T6nõüåd3¼ä¸¥ºWŒÄ¾öQÃåè_Ðé§CÖê´òŽ×¸©"χ°1Ÿö8»6öG5 ð'(I&ß»‡S¤—äƒÔ™yjÓ°ŽRöJèŠP‘Qƒ±ÆKë·vÇC´*7i‘›ö7°X²cÊc-¼Žhe CxáGE=Gèßo×úvõm$YÉe|ú|ÊÎ74_ûe‡CGÝŠï¤B»ãWÕ+Œ¹«v:ß*›È¡É6P¬äÒd:P1{Ÿ¸ Ó?ŸéŠëB®äçþÇÃÀx°­rüÂè.=ÓÇEÂNáóÇÊi0Ö.ú§Õø(´ßýš<õõ(|—?ëó! K<£©l|Aâ@Ášû†Oph]XÛçÃêPú*ü!ßS-˜z­«—úÁï8>4)@ÿŠÇM¿ß‚Aù¥W6©¿Á®œS’Q8\éë¹X,•Ä+uâø«˜$ R ®øP`èß ½ ×–­TB†Ê½8õÖvü§Âòi¨œ2ÒÎÊTÇ(>Uú¥aÈÓýn[<­WµÕz<ïMRΛ"(‰>Qèñm Ôç<,ðt‚¶L††ý<¬½£ó]LqV¤BÀ|íìNjáÿòÝ;ŒŒ¿äJMbGéªÎZªVõê 'v#£©6Á±.Z¿°E+Úãø»fû]Ž ØÔ8š—°ƒÄ–•{âD—(ÐJõ²¢·ùŽòåßçl!«èÇé`³µ¸÷\ƒ„ˆ°ç Ø”¥oú€¤z¯ã)8va%¡ÿ˜}¾±ãDŒEg[?¶‹»p¨®À0ÎÇñV!Ú0\S÷äL¾.ô–]°õ5""Ù£ ¿çhN°JØVßuöÞ‘•ÑÅî'´=‹2j€¡.ʱE}7væ>¼»&;Ý7®}:ß)p­dlÈÅôGMbm Æ'ÉÞ«: %˹Ÿ»3Í|Å™¯^@#­zÑÆo3y™ßfK¢'äÇî´~@…ª>uYçÒ|¤8¸Ÿ½Í~ ^è°k úi"å[„Ì]5œøîÑÂeËÀÁV…ÒÛU iù÷¼ÕßgêqîÊëo6ÞÆí¼£ qcØñ&¾êR¶’^ÛÜÿ½6­ ¾ÆòË×ó4Œ`mâÞ¼¥çªüåèØzì3~¬Íö}_ M:†¡•áÀM™©#/K”h‡¥ÙÈQJ\-P¶Aã‰ö[wLÉ·°ú-Z„ä&á­a²®òð¢SHænŒ3 ¢`›@Ôà˜~ LM¿Ú »¡>ÉÃëm”ìjá=Iš÷$åø{NS ´¦~uN«ƒö'òao6ùbˉ“¿n7ÇŒ§£„Õ_r!­ùÑäËÉ>h‹ñ··¶×šÌÕ±d,³©ÄÇ®àÕê½2Ælr¸Ô/ý‰=ŠÝ[§¥9ûuaªS²ÿ‘:P¶±©;êt`çÁÝÜE aGà@ÌLÜZXjûû%fÑØ,N_‚¡u§S•ÿôÃq| y&NK»«¿1O³}o¹ÚA ¿ï&¥6BG‘ÛÁl'/oi¾úYÌpT5÷;æY·$;nþè†æ_^x\ò^r®cTœ§/„îˆx„#{Èäæ4g _5«ëÐ&Ö~ê<=î£AQË::K Ã»TøK˜8´îq.R~ã/3¬†Ê±³þ“©CI „-q ÑØ€iì;ƒqM7RÍV èŽ!ÍeEþóêØ¾û:íÛ wè]ÇLô#ñ¼éY²?šŸîØ¡'ò~ÆÅÖ/‚ÖL‘Eá«]°qDjOÑ50¤ì×®¹½W%¾Á‘¯[ï€И mOƾ†óa2ì Xíåq©·°’Ÿw(w¿éƒ–o\ï†öþß:qø'ùôáµå³IbuxÞs¢ÌéüOc \ºæZh]ñaµû0Ëš²¨ÒúóPØlbéwnÿÛö¿˜æ¨ÖE†â/“¹rë}c*‰×XÖž‘Wáã f5–A9; œÌ4)‚*ʬjæÜцԠo+bXUL^:e=øhVc=C½Söû0ÍöMÎu9Åžs-‰Ýýì7gUyV¦–]•Yn¹z6±ÐdWJ`$³„ô#¥sn£Òï‹Ö*–•fz'ñ2Ñbvçsï ߯Ì,±]¿ÈçúR»¹(>o9–ù›UÅð,Î|¥²†YM’ ’¤ИæààS™£kæZè\6Îñ”c)Ëʨlé^Ô3›Ø~\;~:1MߟZWÚžï˜s‡O äUq±±ªÐÙW¹õ¾©Þlb[ˆºFξ5̺”ʵ·¹çZ´"è7›˜yêÝþ) ™%Ф‚ê¿¢çÜæf³ÆWI,kkM6ç¸ZŬmZÉ׿Æ,±Æ\÷ãZ©Ð9·³áOè'VÚE—ؽi™Uº¦FÁª5Å̻ռüô"ÆçܧØÛS´"YÖ®·o^s,ß0›pÏrYó¾CL³éH_Ö¶þÑ9—6ñõøs=ËÚ˽hÛsqåY¹'èWüG‡i6«†Èkq¨Mg­¯âCV:Zš‚¬š?«£¢¡>‰&¦Ùê&7¡±JuÎ’¾¬½I“UEŸ…ž¡' ²³Ú2ϽèÛ“_L³xµ¾`’Nì\ ôÕ?ÉG%³ªDöïJMÄ Kgêø%¹Îw0ÍÆo>›sÉs-ÒÏWÇO¹dUvfÝÚ=™í³J]û´ø%‘ŸYbýRÝäaæ¹È”$Ñ7/²YUìz&çò9M`6á×ÚÓm^6?˜fG®É­Ð9·úæõ_ ÊXUغt5ýñlB Å` ¼ô³ÄâÚb§‹—ç܈©ŠÑ¬ ],ëãS·üÏjôÁ Câ~ÌzÉÁ–¸ÔϹ´°O¦e¦«YÖ~Q¶(gµíXÊ©N!~¦I}ßsåžÆ«¹+ ªžTcU¡õÃ×µkgµSfÍÁÞ2 ¦I›ºðXj£Ø\K$h,ÝeÒóUZÏüøfo¥ÙÄ‘‰E¡y{˜æÀ†Ç æùŽs- ì¶}uäD4ËÚø´pË u±YmºÚwO¯“Yb“ßPÒÏ•9·¨mºlÉ´'ËZ\°€ô j6¡#ð5gÇ/³Ä&Úꣷ“ŽÍ¹5´÷<5ž¬*LÈœõg¿Ç6«E©gŸîâfšä/«¢ê‚æÜéb1¡û3¬*R>¹n8¦sj6¡{@ç^à'¦I[Yêt–®EŠQЇüȲvâÂ>úrÙ$ž¯²: ˜Ä,qúœC˜Ù·è9·9Dnß‹.–µö’Ò¥ÃϬfšã—mÜ:Å4ó¿]þ¡XÿlÎíZ«\”w‚UEÊèaûX¾YÝ·qÝù’k˜fCIÂRó÷½s-­“š·-q!«Šá¾KDÊÎMÌ&t]zšÚLdšAZAßÜ|æZ¬è4Š]f¡Â²×0ÂW9¤ÍêÓÕyú‘ÚÌŸÞäÞ/ý1×b¹Òkuo[ –•òÀ÷‚túlÂત¸gÕnÌɹãŒGÆ\ ÔO©™ÂIQ,kWÏØ)‘‹%³‰/×Y©Žµ3Ͷr•Ÿ7×z̵À ì¿°vsËšò;;ÖV8›˜<îüaM¦/Óˆ¬”|¼jîmKldwfY)®ã¼çLºgõÕfm›#« ™f‡þ¢‹zž97ÛaÓçß™KXUh!´má{*6›HÛÙ’l<¡Ç,¡=Áhíðm9whƒïÊ×.A¬*æ½Õ 5N´šMÈ‘4’ÝI:À4»ÛΗë̹ݞ›ÃúÞÝfY«:§?^+–ÕÚ±§é÷˜%fol* ûÌ3çŽÛ3š¦ÇóYÖ–Í“™µ³Ú#tä,ºÍ4c§5­ÍÆóæ\Æ"‚¸vŒ&ËÚðLëôäô×Ù„OAknzê„1KM̼TDù=çÒ·~=ß}Te-4JÚú.dV+¸ŸòŸ¾ZÎ,!ù×›üç»͹4N˵«¹«XÖRÊÚ›ût•g‹C¶{®r`št,4¼¾çZœwƲýMÖU"s‚ÇBnVG€·På³Ä&6ÂT6yÎ¥W„g‰?`Uÿ['®±ÿØõuÚ³Ú¶BŠ—Ö»ivl[PúÙIrÎÔ® ½å4« L÷y4›Ø³àÑz.[¦YŒô-OÛ̵@]øöÄpÃFVóØEŽ¥ÈhÌêT¼Þ1…L³~ûá³ ÂsîDpbT}>« uÓ6KžšöÎ&‰£D±Z!~ˆYŸ.G­à®÷s.eZèÑ£Í[XUlÞ³îÅw1Ú¬ÖR¤æ…œfšÍõ‘r{¤çZ`ÜšÒ(oãdYi9÷„þ$½ÕéE6¯>ŒÕ»Pô›eH©ýæ7«-Ïm“ªÎ…2Kh¶|‘âén?×bÓè“©ýþ¬*‰à½{Ò9r6¡_.Œo2~šiö­b8ÿë\‹?/Uxž}̪BÍÇ”w ?øÎ&N‹ÿ“4gšQRѹñÑsnÉ©÷R ã>²¬ ²Š?£k^Ì&4ßÔ+‘«yÍ4ûö÷±õŠî˜k±†{>îœÏº2ŽŠÇmRy1«­[RÑ f ƒ»U²DÊçÜlÅMòÙ¦¦,+=P±Ÿ¦g3«E"=®iÉrL³o^—ü\‹ô†_¡¼Å7XU(æsUÜÇ0˜ÕΙÖúÂfIb×—VˆQ+šk@T2"ŸUEÊþn‡ܳ }Ã×2 2MÆÁÇÕs-‘Íc©9¿Š«ŠÔ4ã×g÷ÙÏjïݱàEwb˜f®å×t·yQs-4^Ù3_x#«Š)› áú¬Fÿp}e¬¨Í4Ç·$Š6lžsË·ŸUbh°¬CÇò®X }™ÕGÊ£ÆÜgLsª©ËìÅíKsnß²g• þ*,k¶Ö=ÿÇåfª\oZøê“™fÝ©%Î̵H·tÜxÃMƒU…/[çŽðÍ&}JþjÅ4iZ•O6ðεÐ!Òr##“̪"]Ç'—'}`V»w¶¢¯0KhuйúÁt®Åš€t³=º/XÖ&ÓÔ…¥³ÚU׬ïÝ0È4{ fR_¤çZ¨zfÚ½s¥«Šªù®ütßYmV¿«î«Ê,!é„?_ô°öœ;D¸]sW¯em”lûvÚ~V‡â}–èa–ص¢0¨»u®jWuR/«Š¤ÀÀ7ãv³ÚI“˜º[}”i’Ùå •åæZhÛöÃlžË:rïཧ¿ f»OûÏø©Ì,aÔúÛü>eÎ¥½–w08'ǪbÇšÞÈ´ïf³ ¥ÏeÞÜx—YbÐ:ÕQë*s-´µÊZ”±ª$©4Û#ÍfÂ6ÌŸV6[Ì,‘ö2¶ZÂæÄœÛ]¾ßêœÇ*V¨rôES¹f5}R‡k™Y¢WØ5#ïaÚœÛÿpûæ`ËšÔ¹ü‡³ÈlB×öÔÉya©L3Ãí×:Áªs.¥}äv³2Ë:ÒæÑ¸)a`6±?õBák ̺,{¾(½º=×âÄ ª÷î¸nV²;¼çoËšUÊ…Þ´|“WÌ»‡Õ,DeVιõŽ²Û£Ÿ«±ªÐ¾jû×ÜGA³‰ƒ®ò=ûœ¦™æ@€¥K*eÕœK—ßÔ÷’β¶êåà^ê4›DBØ.e‘:Cf‰ý« ­LäæZ(2µ§¦žbY+ô:ôÆœ˜U’Ó®¶èUj̳Dc÷Í‹˜sÛ?Eö¤¬gU¡‰ý·´áG¯Y¥©õt)bš=Gž¿Èîœk‘ºøö;¢©7ËZ¦²]£êŒÔ¬Ž>É×s÷,`–¶ýTLGÅ\‹•_ßÊv]Á²†nQªÙ¼sÍlB¦ÔòÇ5w>3MêSéÔAvÒœKþšr|uËÊxùHuƒ«Ñlbÿañ«M‰ÌÞj³­¢ç‰Ì¹UõW…µ5YUl9¨“Ÿðûõ¬ÖÇJ}ûuÄ‚i¶]6žšŸ3×BËÅ®gÇe¥Ä|_±¸yVË«È ¯ÓìoSÉ>Yd<×bßXóZ6ÿ(–µ}›w¸ëä·Ù„Úhàß³å³DÆþ«Ê§æZ"Á*kë1úËÊv…Ãgìñ¢ÙÄÖWýÝw³„žÖgR~ûKçZÜœkþw£1« S‡ÖÌï¿y`VûÖå,ôجÂ,1ÇÚyiÙUƒ9w(øe}Só$« -9<ùòrV)U«yË·=f–HÞ.äi]è;çvŸ/ðyÅŪ]#e\»«t6±×ümÎà ¦YC(0àO4ška"úéVÎÏXU,y ¯D-›Ušá‚éò*¦Ypåøhãƒs-Œ1qp™Xϲ’V{~˜MœH'>ê#J3Íž/'®æZ¨ÙSs&mËZ?+­â´u6±üiå3ï¦ÙÏw¥>û…í\ ç>v qI±ª8²E§gUÆÐ¬æýîûu¾“YBúð_Åé¹»¾¦8¬eU!Ý«Îó Ùp61·û =¢˜%ªU{q­r®%¾MDŽä³ªÿ['¨„ÿ÷3›D“åùG °¸B©“ÿGJ°¸«ÿÒ5LVú)Ëâ®ÿK72Y¹¤‹«ø—*1Yå¤ ‹»ù/ÝÂd·þ#UYÜmév&»ã¹“ÅÝõ—ª1YøGY]—¿s7ɺüÕ ,®Ëß©A`².ÿFM‹ëòwî!0Y—ã^‹ëòwî#0Y—£Åuù;÷˜¬Ë¿Q›Àâºü:&ëòoÔ%°¸.ç“uù7$°¸.ç!“uù7êX\â_êÂdIÿF}‹KüK]˜,éßx˜ÀâÿR&Kú7X\â_êÂdIÿÆ#—ø—º0YÒ¿ñ(Å%þ¥.L–ôofôq£O]gô·FŸ4ú£ÿ4ú‚Ñ-F·}ÑèËF_1ºÍèv£¯Ýaô_F_3ºÓè.£¯í1ú†ÑÝF{¾i´Ïè[Fß6ºÇè^£ïÝgt¿Ñw0úžÑ÷4úÑþÛè!£ÿ1zØèGFû1zÔèÇFýÄèq£'Œž4zÊèÀB ¥öOju”Z¥VG©ÕQju”Z¥VG©ÕQj›Z-¥VKé|ýY-¥VK©ÕRjµ”Z-¥VK©ÕRjµ”Z-¥VK©ÕRjµ”Z-¥VK©ÕRjµ”Z-¥VKéJ#«§Ôê)µzJ­žÒçYM¥VS©ÕTj5•ZM¥VS©ÕTjõ”Z=¥VO©ÕSjõ”Z=¥VOé Œ¬¦R«©Ôj*µšJ­¦Ò_5²ºJÍÈj+ÍŒ¬®R««Ôê*µºJ­®R««Ôê*µºJ­®R««Ôê*µšJ­¦R«©Ôj*µšJ­¦R«©ôuFVW©ÕUju•Z]¥VW©ÕUju•þŽ‘ÕV:_Wºw•ñ™þáð¦Â«}•y¸:TV*<:w(ÞnÔMo7Û‹|;”ìÅì\Löbò%±¼v‹ûáøv(»µö§ñ¯[<Šð¶Xš|ŽÏ‡ßÅl?ܘ­-ª—ÅäÅøp°µX<§þÛ#ºÅ-ßgïš?¿*{;’+¢²ð”Á«²*2W¤‹¹rN_ž?ÿyLü¨$íç‹Ý䎈\íçõ`w9i·'ߎ$qÍÛ0ÚE>4Ö¥¯1è&—ñJn¼’Ó©­®_̾²våe©ü|,7Îy<íHA¿?E8Qì)6»ápøòºQÞör¼"¼çcY4¯²‹üé†"|ä¿3uË·ú/šë†U­Kú•.ºµ&ó¸+è×ZËŸw-IŠ)ó}þy89ø]²ßy†}~~lþ{çü7Y¾g2ÆšRNÖ—]ržüZä÷¹Ž~È­å»ípn¬7×x‹üÄÛ>×1äkñaØyszzrñtâûô’ÜZìÁ޼ïîkÈåcÛØ8쟉Côûr-s©·ä(õÏCÉÁïÿy¾Ü>3O,zr>öº,pAÎ%Vùüäåa1ìÏÙןãa=ý¾:“öã3ñÏÛäb3è<¥œ¼ÄùÀ5ÈùÛŸÄö`N±‡Ÿxå1†ý䄸‚7ìa.Iâ$IÚqL|Á+k‰mÉí̃[â±$iÇüùx"[süùxË<ΩÛÞ\ §œ‘³žœ^pœæô€I­WêÁsÙÃÞUtÞü,=Kÿ´Øw‹géYZ„Ú®ì÷o™½hvËüáù³¹ù™gvç—mÛtÞö#O;çÜí³>´äÔMnŸ}ó6Yßwæ–M²<„žµéÌísó—š$Oó–É×q¸‘oïÏ|gãÆÓV\ôžŽ¼i%?3_ý§?|êÞˆGè}uÁyjI>°Ÿ|5Å~öV$ŽŒ/?GÎ?%üØ;-ù›;[¢Wñ|ì¬H«öƒ_vãǸȧ_•ø‚KÍ_³ n–‰Ü¨Ö!OpŽðœDò…~üÓúŸü†ÿ׺Ôv5Åî>±¯hßgº?¢O÷ÅŠÄC÷/ôT æ©Å;~bGCp‚È©Ij'ì¥ÎˆÇ€è…Oñ7%|ŒWÄ>­kô(Þ'%uÁòÙ·4ߊÏìËÔñOz¼û&e÷èS_º/éþ5)ã ÑÓ?ÔÏ iu¿[%øÁÎI‰³îo´ä_÷™Šô—Î?!ùÅîªø_ëñ„ü*îéO ‚»é»´>á'îCíÇ»ƒûšìG“²^ãUm÷;á|kõ"-ãuüQœà‡¬›>=`¾.í´ÈYµs¡]I_ô6¥ó‰{±Cí—~YÆ—Êzôä5d­Ú‰u×xbÏráW»ëÒ§^&¤eqn̯ÞÙݯؿ…¯&ëÕ⇼¦Ì³n%íÎv?à#>KeÝ2±oJø‰uY„Câ\–õô‘«õ²Zæi‰3úªðïìîë°Có?þ*ô<~ì ž– úÈãJ‘‡}ã2¿\ÆáS|•…µði}Ä÷2ñ“yõ_ë|z‘õ*¿%-øÓq͓÷Ò*þá‹ó­?4NºÏÒ²¯è÷õŸq­'×üê~¦qWûU¾7+„yø×¾ÚW„CZêO÷yìÒz*ÚÇ/ ?­æM÷¥eÒj>Ôì^%ãZŸº^û+½Õý’ùàRýfuR´ß+Õ?qÀ>=.)^ô8ß~=žÂ§zµ®‹öÕÅâ«üÓí㉟oÍÜéãéB;sÀû~¾6ó ÷W3µóÍ<æíwÏ…gK¼­ì\˜Òû£Þö´¯Üêù"òƒÞǽÝïó~C;ŽîÇ9ì›yÔÛ/'ív‰=3û|œã4öyÛÛÝOÖ/vUÚí`~æaï³`ÿØéqÐù°+~â2Ú®7âF>zÛí ~ÏÄ<#·OìÀ®žö~ø‹ýÄ;9)µÏÜ+ö+â:Ô¾.ìÁ>ô?ØÞ.ö؉üꂽ;Áó™?”ñ) _Iø±;ˆƒçuäe¸]nÔñ&ŽÄ™ûÀ'x_êψإñ7àZqN~“qpJx~#®ÔöDœv¶ûÇxO{‹_!gŸÏƒËþî|çšÌS/‚ÿˆ£îì[Ä‹zî•;ˆ/8Æð›8ñ!^ûÄ>ßÇ´Iû¤Ì§{Ûù¢–Vä®dÿ ÿ>ñ#ðÇ~éþF>ÉöìkŸ8K¼µîﺯé¾äú#¯Úç#ÿ¬}Ùûz||²?ð‹ŸZoØàŠãqÅ.âzg{_÷ÓŽýMëÜâç¾öõõ@‹ZìWÈ#^É|âãä<(n‰‹œ§„?ÔO"óûÛÇc=øÿ®ŒcçÃí|xäüèÑv¾Ð»ÏùxÐŽë[%¿Þê÷{¢^¸èmŒs½m´}>Ú¤ûúŽëóÊOËu8—÷¡ÑÏz±?ìÕu¢Où‘;óÈÂuçŽùÀc{¼U_‡^ü&?ÜSÿÕ_ü‚¿“îz5Oøö*¿ÆqTüT¿ÉKÒ.¿C~2^IÚþ:ö%øXwÇÆ¯þ—Ú×ÃÅ]܇?I»žVD®_§{T}½_ 8—ëé\‡ÖºÑëç—ýq/ЋüŠð1.×·5_«¸ï*ñÜ…•§z·ø5)òF%¯àžøé}¿ö6ì”ñÃÆùÜÔ|‰¼Àéì?à«$ëÕŽªàMïSkžµó[ãS´Ÿé¾DžZ‚O•«õ4Ú^wøU|è¾Í:õ}Šä1^*Ø«8U¿4.š¯Fw=z<Óý,ôê}7W¼ÅOë\ñ©÷ÇdéØŸJ¸/ª_­{ÝOõ~|Mäj_÷1p¡¸f_P—$Ÿº?iü°[ó(ö„~CQh½K›p¾5Ѿ>Î×õþ™âžóg½^§÷³+ÒêýÆùÞÇu;ý^Èù«Ø|zªcß¾x>AìaœëŠê¿^?­J[~ñ÷àùƒØÛ”>ß³ù> q¥U=½ã ™¯É8}ìï~CIøÈ‹úU?†¤Ÿ´óźáC~<âó|ßÐë,ø|ìBâ—ë¿z†ue×ûÓzŸ²_ÆÉŸêg<%ÂG‹<âƒ]Š[®sc¯î·E÷ƒÏõuý®òŠž'иxyÔûÞz¿GqÏOŠ\ÝGJ²Nã\•qZõƒ~<§%-û>û¤Ö v‘½ï‡ºê}9ͯޟCoEÆ•_÷»‚8ƾ¤Ïès§u¤ñ/zîƒú•ëC÷Ãõ¹Õ«Ï’ð¦ç!¯xÞÓÛ×üè>Ò’ñþ‚yÅ™âXçãyQ‘§u¢ÏqèzÝ7õ9 ½ß«Ï§è}:æõ¸¯÷Á5ïzÿVõ×…O㬸¯É¸â=šÏ–ôU~Çù°ÈÕºÔó˜ á÷~ÂýÄý׬îÙšÑîÙºûÎ~4ëÙºûkH¿—õúø÷}¼ÏÖÏîzjüYßî=pKòHðýdÍ5§~éɵ1^Z¿æ½Ç==ãœ×Ý·gë?]qöå¡÷ ×7*ò¾½~ÍÕï¼õè,]sÍK6—¾™ .´q}„õîÙú©ç÷|5ä>¼~Íe7÷}.sggžÚ½ç’?)Zöw?‘óØ7´àv`þaoÇzìT»ãú½Ëù×õk.?âÜ‹Âï‡ÿð1]ž¯¹ÇXçñ ~ì$/I<$ŸïG-Ä·cžø£Ÿüïy`=ö?•ã¸âü‡ë8޼€â¾Ïa?ñð<‡¿"?ÖQŸ‚Oò~ðþ±Ä»4à=qW\…¾©Cê•uq¾ì8–¸‡ýäøbì üg\ôľRÛ³õŸïKvd“׬_uC:ù„¿·½ž³ŠóƒúÖÝ{7¸ZóøbÖ ãÄ“ø ‹? ò;pƒŠ[ò€üQ‘\p¸q¿àó¼aOaÜy„ ]_ÈÓ:`þ¶õkÞwÏóïõäIí’8‡ýÔ qÀo=ŽhRçZWà™õe·Sê?«vGø«üÄ‹ãù{BöÞö}5kÊ:ꤺæšW´ž¾Lqqáx†þøþßž7üÓ8Å:ôÒµÛÓÁ?î|n_ä³&~ô ŸÆ‡¼î—|â—×gØêŠú%¿šWê”øiüá—ü?xÆÎ'¤N4¯59ÜÆþ+õ8Ñ}Uî{FYç8I¸¾5¼jóS§<™•Oºùô\úÒl¨yÒ­½Ï;&p1zÕ ­[ñ‰¬ºyíeçßô@V»ùô‘o]y[¬«5Oú÷Wùî¬réUŸ~ñÐåÙè7ûÄ_Šu;æ^ù¹G¶g Ÿï§¯øá _ >ÆÇ6<ðùÕ×¾1¾}Ç…g~üûYïÞ]w}ç3½YõOüÇÛ.Šñ çÞæþ½|ë÷î;úwݾó”ão ½µ]·_r庉¬îë’Ó‡øø§.Îú\ÿøU7ÔGWÈú=ƒÃ«ÎzÇx++»ìmº¿ƒ7žpÝw|&«_·_uÃÑ¡oìºýwq껢E.z‡ÜÞI·«évõz¿äó£'\÷£åçùÀïÊ Í™»gÿ&«¬½ô=¯9í³Ñö8“—‘µ—^þ¡3Ƴq÷;û\.~ Ím¸ïEûÎÈš®wÄã]öxNgï3Ož‘}´Äÿßôø’7ÅǸãlbÕæµ_>écYýö[þá“'gµ¹ >|w©##àÊã0åúˆ;óØ >Gú±xUÝp޽à"~?év/s\`~ƒ_òÙïñ¬I<° ¼Œ“/ç‹zñ8Dx|¢þ|^ßÂOü#ÿø‡=Ë7Q÷îÕã ž«îwË×µ¼?é8Eq#Ï=Ž×¦Û?M¾%¿ ©ðAžik‚ã8ïp½éæµoûÌ^¸ª»?+pæù#oÔö3ŽžªÇ;uÿÑGž©ÅyC.öŒ»ü%¾ ®z¤ÎSŸ/9¾"Ì»ßìã=®ÇçgöïÝuË%Ÿß}p_7ng¯ËÁ/öü`e|Àí¤nÙ_‘Ëq Üâ½±¯¸ô{wÝ~ôoÈ–H8>gžr¹¬£­ n8î ¼âH;éÏØO§Ô!û'mËóK¾Áõ¬Çźãyq½ÚåQÔ}ð‚?±O;~¨Û†ä;üô|‘ö%Ö7¼nÐÓrû¨‡Šç'î£x~âú¯û¿Ä'žWçR/Ĩö#pV•ãÓwÜ>òGüñ‡:ÄÆüüÿãøÄ>K\ÚDï'rmì¸gþ²qo[ÞN}pþïÚlò–ù¿[³†÷k´Î×”ù†÷kÞVÏzæ/+{‹Ü¸ÎèããÞÖè#‡¾ë£ý ï7}~ÂûÈv9£>_¥õyìåº&ñ‹û>Ø!ú°'®£#>âìã+Á›û_÷õ Z¯Oü8Çudâáí¤ä‹x–é‹ãο !ŽÎ7á­Þ7[&y ~âàýŠðã_KøÒ¼gÅyöuüfø•ø‡ô^&Å>òZÅ^±‹øN‰¿Ó¢¿â-õ÷\8#â÷”à³)òi—KýLû8¸oJ½«–Æ ÿ°\ì‘xMùú•¢'~ßãüp-ñz‘ºZîýø‹]ÞGýŠÈ'ÎØ8—8ES‚ò Ù?¨×8Ê>÷s±ûÁ±÷×H]³OLJŸ¼S5É3ëÁûRÁ+u1 u0®r}<ö#©WðÍþ‚ì›Ø9*vcW»½ÅOÚ¦Ä7òÁq¹àŸ<»~öû†ÈcßgÒ‚üŒã~:Ç Ý—[×¥ngÔø'àUpÈ}:ö¸ÿîñËÇW¶ßÈ%oðƒ{üür|_½‚_px#>à•8—¿ï“xÆïv¯Ê:—Gü"_à{Ä+Ž“’oö«¢çB⾺ë%ÿÜGŒý¼²ŽãÀB›p¾Õºà¦éîùA¶‚÷Fð~ Ÿð~¼—ÀÇyl™÷«´ëî¹úøó_‘U®Xwõl‚ñ›–ž÷…+wG¿áó±3Þ·á|ä!~×¹ô‚›¯]÷œÐÏïù8~¬ZwÏ_ÜuÁÊ¿çŠfoÚ“ y[w{›.'Þƒ~Ÿoð¼£Û_wþë\o¿óÇû‚ðÜ9öL¢ßÇÉ7òð¿âüøÕT»YçvŽ]±îî÷¯Ø’MŠ}Þry“èqyc<ÎóøÃûH<ŽU—S“ç([>ñ¼ï1Á.×?áýº¯CÞ¸÷Ñ~ÈñޏºMç'e×_Å痢ܓ/ö9ž#'ÿb¼7ÈåÆû|>ðæëÑö¢å÷%-çkˆŸàkè‚›VŸ³ï½£äÕãï¢%ž’ÿx_„˧>F±“÷öˆ5盦^‰ù}SÄÙÇËÞbG?(ûÄ”ÏOIÔ°â=¼× >×ÝqÜæx#ø‹çÃo=þ¢Ÿãùâxï­>'†?MÙ/š‚/=¯ þ©›xï&qÆ>ìç=ŽàÔãéõ¯¯v>oÓÖY^í<惥s7û§ž gß禳g/Ù–ëlãMОyÎÜܦÎÞ¶ió,’ÞrL|:6>ŸŽO/ˆO/ŒO'ħuñéÄzžNÚ_G=´yÓöMGµÍÜ·Í]|nÎÿ ¶4¿³>ýôùÿ2pþWœþãÖ¨slavaan/NAMESPACE0000644000176200001440000001540114624603671012745 0ustar liggesusers# only when we have src code # useDynLib("lavaan") importFrom("graphics", "abline", "axis", "box", "hist", "pairs", "par", # generics "plot") importFrom("methods", "is", "new", "slot", "slotNames", ".hasSlot", "setClass", "representation", "setGeneric", "setRefClass", "setMethod", # generics, "show", "signature") importFrom("stats", "as.formula", "complete.cases", "cor", "cov", "cov2cor", "cov.wt", "dnorm", "lm.fit", "na.omit", "nlminb", "optim", "pchisq", "plogis", "pnorm", "qchisq", "qnorm", "quantile", "rnorm", "runif", "sd", "terms", "uniroot", "var", "weighted.mean", "aggregate", "dlogis", "qlogis", "optimize", "lm", "setNames", # generics "coef", "residuals", "resid", "fitted.values", "fitted", "predict", "update", "anova", "vcov") importFrom("utils", "combn", "modifyList", "packageDescription", "read.table", "str", "write.table", "write.csv", "packageVersion") importFrom("quadprog", "solve.QP") importFrom("mnormt", "sadmvn") importFrom("pbivnorm", "pbivnorm") # AIC and friends... if(getRversion() >= "2.13.0") { importFrom("stats4",AIC, BIC, logLik, nobs) export(logLik, nobs) # not AIC, BIC? } else { importFrom("stats4",AIC, BIC, logLik) export(logLik, BIC, nobs) # not AIC? } # export ordinary functions defined in this package export("lavaan", "cfa", "sem", "growth", "lavaanList", "cfaList", "semList", #"fsr", "twostep", "sam", "efa", # new name # old name "lavParTable", "lavaanify", "lavNames", "lavaanNames", "lavParseModelString", # "parseModelString", "lavInspect", "inspect", "lavTech", "lavListInspect", "lavListTech", "lavResiduals", # utilities "getCov", "char2num", "cor2cov", "lavOptions", "modindices", "modificationIndices", "modificationindices", "standardizedSolution", "standardizedsolution", "parameterEstimates", "parameterestimates", "parameterTable", "parametertable", "parTable", "partable", "varTable", "vartable", "fitMeasures", "fitmeasures", "inspectSampleCov", "bootstrapLavaan", "bootstrapLRT", "InformativeTesting", "simulateData", "estfun.lavaan", "lavScores", "lavTables", "lavTablesFitCp", "lavTablesFitCf", "lavTablesFitCm", "lavExport", "lavTest", "lavTestLRT", "lavTestWald", "lavTestScore", "lavMatrixRepresentation", "mplus2lavaan", "mplus2lavaan.modelSyntax", #"lavData", "lavPredict", "lavPredictY", "lavPredictY_cv", "lavCor", # API functions # lav_matrix "lav_matrix_vec", "lav_matrix_vecr", "lav_matrix_vech", "lav_matrix_vechr", "lav_matrix_vechu", "lav_matrix_vechru", "lav_matrix_vech_idx", "lav_matrix_vech_row_idx", "lav_matrix_vech_col_idx", "lav_matrix_vechr_idx", "lav_matrix_vechu_idx", "lav_matrix_vechru_idx", "lav_matrix_diag_idx", "lav_matrix_diagh_idx", "lav_matrix_antidiag_idx", "lav_matrix_vech_reverse", "lav_matrix_vechru_reverse", "lav_matrix_upper2full", "lav_matrix_vechr_reverse", "lav_matrix_vechu_reverse", "lav_matrix_lower2full", "lav_matrix_duplication", "lav_matrix_duplication_pre", "lav_matrix_duplication_post", "lav_matrix_duplication_pre_post", "lav_matrix_duplication_ginv", "lav_matrix_duplication_ginv_pre", "lav_matrix_duplication_ginv_post", "lav_matrix_duplication_ginv_pre_post", "lav_matrix_commutation", "lav_matrix_commutation_pre", "lav_matrix_commutation_post", "lav_matrix_commutation_pre_post", "lav_matrix_commutation_mn_pre", "lav_matrix_symmetric_sqrt", "lav_matrix_orthogonal_complement", "lav_matrix_bdiag", "lav_matrix_trace", "lav_matrix_cov", # lav_partable "lav_partable_independence", # used by semTools! "lav_partable_unrestricted", "lav_partable_npar", "lav_partable_ndat", "lav_partable_df", "lav_partable_labels", "lav_partable_from_lm", "lav_partable_complete", "lav_partable_attributes", "lav_partable_merge", "lav_partable_add", "lav_partable_constraints_def", "lav_partable_constraints_ceq", "lav_partable_constraints_ciq", # lav_constraints "lav_constraints_parse", # lav_func "lav_func_gradient_complex", "lav_func_gradient_simple", "lav_func_jacobian_complex", "lav_func_jacobian_simple", # used by semTools! # lav_model "lav_model_get_parameters", "lav_model_set_parameters", "lav_model_implied", "lav_model_vcov_se", # lav_data "lav_data_update", # lav_samplestats "lav_samplestats_from_data", # estimation "lav_export_estimation" # deprecated functions #"vech", "vech.reverse", "vechru", "vechru.reverse", "lower2full", #"vechr", "vechr.reverse", "vechu", "vechu.reverse", "upper2full", #"duplicationMatrix", "commutationMatrix", "sqrtSymmetricMatrix" ) # export Classes exportClasses( "lavaan", "lavaanList" ) # export Methods exportMethods( "predict", "update", "anova", "coef", "residuals", "resid", "vcov", "logLik", "fitted.values", "fitted", "summary" ) S3method(print, lavaan.data.frame) S3method(print, lavaan.list) S3method(print, lavaan.matrix) S3method(print, lavaan.matrix.symmetric) S3method(print, lavaan.vector) S3method(print, lavaan.parameterEstimates) S3method(print, lavaan.fitMeasures) S3method(print, lavaan.fsr) S3method(print, lavaan.summary) S3method(print, lavaan.tables.fit.Cf) S3method(print, lavaan.tables.fit.Cp) S3method(print, lavaan.tables.fit.Cm) S3method(print, lavaan.efa) S3method(print, efaList.summary) S3method(print, efaList) S3method(predict, efaList) S3method(summary, lavaan.fsr) S3method(summary, efaList) S3method(fitMeasures, efaList) S3method(fitmeasures, efaList) S3method(pairs, lavaan) S3method(print, InformativeTesting) S3method(plot, InformativeTesting) S3method(inspect, lavaan) S3method(inspect, lavaanList) S3method(lavInspect, lavaan) S3method(lavTech, lavaan) S3method(lavInspect, lavaanList) S3method(lavTech, lavaanList) if(getRversion() >= "3.6.0") { S3method(sandwich::estfun, lavaan) } lavaan/inst/0000755000176200001440000000000014625553503012501 5ustar liggesuserslavaan/inst/CITATION0000644000176200001440000000120514371002473013625 0ustar liggesuserscitHeader("To cite lavaan in publications use:") bibentry( bibtype = "Article", title = "{lavaan}: An {R} Package for Structural Equation Modeling", author = as.person("Yves Rosseel"), journal = "Journal of Statistical Software", year = "2012", volume = "48", number = "2", pages = "1--36", doi = "10.18637/jss.v048.i02", textVersion = paste("Yves Rosseel (2012).", "lavaan: An R Package for Structural Equation Modeling.", "Journal of Statistical Software, 48(2), 1-36.", "https://doi.org/10.18637/jss.v048.i02") ) lavaan/inst/understanding_lavaan_internals.R0000644000176200001440000004433514625553503021103 0ustar liggesusers# How does lavaan work? library(lavaan) # PART 1: from syntax to matrices model <- 'f =~ x1 + a*x2 + 3*x3' # parsing the syntax lavParseModelString(model, as.data.frame. = TRUE) # creates a 'FLAT' initial parameter table FLAT <- lavParseModelString(model) lavNames(FLAT) lavNames(FLAT, "lv") # lavaanify() # - first creates FLAT # - needs information from the data (eg number of groups) # - builds the initial parameter table # - the parameter table is the core representation of the model in lavaan lavaanify(model) # if sem/cfa is used, more parameters are set free lavaanify(model, auto = TRUE) # example: equality constraints using labels model <- 'f =~ x1 + a*x2 + a*x3' lavaanify(model, auto = TRUE) # alternative for 'simple' equality constraints # (will become the default soon) lavaanify(model, auto = TRUE, ceq.simple = TRUE) # explicit equality constraints model <- 'f =~ x1 + a*x2 + b*x3; a == b' lavaanify(model, auto = TRUE, ceq.simple = TRUE) # multiple groups/blocks model <- 'f =~ x1 + c(a1,a2)*x2 + c(b1, b2)*x3' lavaanify(model, auto = TRUE, ngroups = 2) # matrix representation: LISREL-all-y model <- 'f =~ x1 + x2 + x3' PT <- lavaanify(model, auto = TRUE, as.data.frame. = TRUE) PT # map every parameter to a matrix element MAT <- as.data.frame(lavaan:::lav_lisrel(PT)) cbind(PT, MAT) # alternative matrix representation: RAM MAT <- as.data.frame(lavaan:::lav_ram(PT)) cbind(PT, MAT) # first create default lavoptions fit <- sem(model = 'f =~x1 + x2 + x3', data = HolzingerSwineford1939) lavoptions <- fit@Options # create 'Model' object (S4 class) # (often called lavmodel in internal functions) Model <- lavaan:::lav_model(PT, lavoptions = lavoptions) # another representation of the model, more suitable for computations Model@nmat Model@isSymmetric Model@dimNames # the matrix representation is in GLIST (group list) # GLIST may contain the matrices of multiple groups, levels, blocks, ... MLIST <- Model@GLIST # MLIST is the matrix representation of a single block # MLIST is used to compute model-based statistics, e.g., # the model-implied variance-covariance matrix lavaan:::computeSigmaHat.LISREL(MLIST) # PART 2: lavaan() workflow: # 18 steps in total (many of them can be skipped) # 1. lavParseModelString + ov.names + ngroups + nlevels ... # 2. set/check options (eg meanstructure = TRUE) # lav_options_set() # lav_options.R # 3. check the (raw) data, or the sample statistics # lavData() lavdata <- lavaan:::lavData(data = HolzingerSwineford1939, ov.names = c("x1", "x2", "x3")) slotNames(lavdata) lavdata@ov.names lavdata@ngroups lavdata@X[[1]] # the raw data in group/block 1 # 4. lavpartable: create the parameter table # needs to know: how many groups, how many categories, ... lavpartable <- lavaanify(model = FLAT, auto = TRUE) # 4b. compute and store partable attributes (ov.names, ov.names.x, ...) lavpta <- lav_partable_attributes(lavpartable) lavpta$vnames$ov # 5. lavsamplestats # compute sample statistics (cov, mean, Gamma, ...) lavsamplestats <- lavaan:::lav_samplestats_from_data(lavdata, lavoptions = lavoptions) slotNames(lavsamplestats) lavsamplestats@cov[[1]] # observed covariance matrix first group/block lavsamplestats@icov[[1]] # inverse observed covariance matrix lavsamplestats@cov.log.det[[1]] # log determinant covariance matrix # 6. lavh1 # summary statistics of the 'unrestricted' (h1) model # with complete data, this is trivial (cov, mean) # when data is missing, we need to estimate cov/mean using EM lavh1 <- lavaan:::lav_h1_implied_logl(lavdata = lavdata, lavsamplestats = lavsamplestats, lavpta = lavpta, lavoptions = lavoptions) lavh1$implied # 7. parameter bounds (needs lavh1$implied) # only used if bounded estimation is requested lavoptions$bounds <- "standard" lavoptions$optim.bounds <- list(lower = c("ov.var", "loadings"), upper = c("ov.var", "loadings"), min.reliability.marker = 0.1) lavpartable <- lavaan:::lav_partable_add_bounds(partable = lavpartable, lavh1 = lavh1, lavdata = lavdata, lavsamplestats = lavsamplestats, lavoptions = lavoptions) # remove bounds again to save space lavpartable$lower <- NULL lavpartable$upper <- NULL # 8. compute starting values lavpartable$start <- lavaan:::lav_start(start.method = lavoptions$start, lavpartable = lavpartable, lavsamplestats = lavsamplestats) lavpartable # 9. lavmodel: create internal model representation (with GLIST) lavmodel <- lavaan:::lav_model(lavpartable = lavpartable, lavpta = lavpta, lavoptions = lavoptions) lavmodel@GLIST # 10. lavcache: compute some additional summary statistis # only used when estimator = "PML", and "MML" (for now) lavcache <- list() # 11. estimation # - default: lav_model_estimate() + nlminb() (quasi-Newton optimization) # - lav_optim_gn(): Gauss-Newton optimization # - lav_optim_noniter(): non-iterative procedures # - lav_mvnorm_cluster_em_h0: EM for multilevel models current.verbose <- lav_verbose() if (lav_verbose(TRUE)) on.exit(lav_verbose(current.verbose), TRUE) x <- try(lavaan:::lav_model_estimate(lavmodel = lavmodel, lavpartable = lavpartable, lavsamplestats = lavsamplestats, lavdata = lavdata, lavoptions = lavoptions, lavcache = lavcache)) # store parameters in lavmodel lavmodel <- lav_model_set_parameters(lavmodel, x = as.numeric(x)) # store parameters in @ParTable$est lavpartable$est <- lav_model_get_parameters(lavmodel = lavmodel, type = "user", extra = TRUE) lavpartable # 12. lavimplied + lavloglik # store model implied statistics in @implied # if likelihood-based method, store also loglik in @loglik lavimplied <- lav_model_implied(lavmodel) lavloglik <- lavaan:::lav_model_loglik(lavdata = lavdata, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavmodel = lavmodel, lavoptions = lavoptions) # 13. compute 'vcov': the variance matrix of the (free) parameters # this is needed to compute standard errors VCOV <- lavaan:::lav_model_vcov(lavmodel = lavmodel, lavsamplestats = lavsamplestats, lavoptions = lavoptions, lavdata = lavdata, lavpartable = lavpartable, lavcache = lavcache, lavimplied = lavimplied, lavh1 = lavh1) VCOV # prepare lavvcov slot lavvcov <- list(se = lavoptions$se, information = lavoptions$information, vcov = VCOV) # store standard errors in parameter table lavpartable$se <- lavaan:::lav_model_vcov_se(lavmodel = lavmodel, lavpartable = lavpartable, VCOV = VCOV) # 14. compute global test statistic (chi-square) # trivial for standard test (=N * F_ML) # more work for 'robust' test statistics (eg test = "satorra-bentler") TEST <- lavaan:::lav_model_test(lavmodel = lavmodel, lavpartable = lavpartable, lavpta = lavpta, lavsamplestats = lavsamplestats, lavimplied = lavimplied, lavh1 = lavh1, lavoptions = lavoptions, x = x, VCOV = VCOV, lavdata = lavdata, lavcache = lavcache, lavloglik = lavloglik) lavtest <- TEST # 14bis. lavfit # store 'fit'information # no longer used, but if I remove it, a dozen (old) packages break... # 15. fit baseline model fit.indep <- try(lavaan:::lav_object_independence(object = NULL, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, lavoptions = lavoptions, lavpta = lavpta, lavh1 = lavh1), silent = TRUE) # 16. rotation # only needed if efa() blocks are defined # lavmodel <- lav_model_efa_rotate(lavmodel = lavmodel, # lavoptions = lavoptions) # 17. create lavaan object # don't run, as some pieces have not been created... # lavaan <- new("lavaan", # version = as.character(packageVersion("lavaan")), # call = mc, # match.call # timing = timing, # list # Options = lavoptions, # list # ParTable = lavpartable, # list # pta = lavpta, # list # Data = lavdata, # S4 class # SampleStats = lavsamplestats, # S4 class # Model = lavmodel, # S4 class # Cache = lavcache, # list # Fit = lavfit, # S4 class # boot = lavboot, # list # optim = lavoptim, # list # implied = lavimplied, # list # loglik = lavloglik, # list # vcov = lavvcov, # list # test = lavtest, # list # h1 = lavh1, # list # baseline = lavbaseline, # list # internal = list(), # empty list # external = list() # empty list # ) # 18. post-fitting check of parameters # lavInspect(lavaan, "post.check") # the sem/cfa/growth function just set some # options to user-friendly settings: # default options for sem/cfa call # mc$int.ov.free = TRUE # mc$int.lv.free = FALSE # #mc$auto.fix.first = !std.lv # mc$auto.fix.first = TRUE # (re)set in lav_options_set # mc$auto.fix.single = TRUE # mc$auto.var = TRUE # mc$auto.cov.lv.x = TRUE # mc$auto.cov.y = TRUE # mc$auto.th = TRUE # mc$auto.delta = TRUE # mc$auto.efa = TRUE # PART 3: extractor functions fit <- sem(model = 'f =~ x1 + x2 + x3 + x4', data = HolzingerSwineford1939) parameterEstimates(fit) # = subset of parTable(fit), but with additional columsn (z, pvalues, ...) parameterEstimates(fit, output = "text") # this is a big part of the summary() output # summary() # first creates summary output as a list: out <- summary(fit) names(out) class(out) # with a specific print function out # fit indices fitMeasures(fit) fitMeasures(fit, output = "matrix") fitMeasures(fit, output = "text") # lavInspect/lavTech lavInspect(fit, "est") lavTech(fit, "est") # PART 4: lavaan slots fit <- sem(model = 'f =~ x1 + x2 + x3 + x4', data = HolzingerSwineford1939) class(fit) slotNames(fit) # 1. lavaan version used to create this object fit@version # 2. user-specified call fit@call # 3. timings of several substeps unlist(fit@timing) # 4. options used for this object unlist(fit@Options) # 5. the parameter table fit@ParTable # list parTable(fit) # return as data.frame # 6. the parameter table attributes names(fit@pta) fit@pta$vnames$ov fit@pta$vidx$ov fit@pta$nfac fit@pta$nblocks # 7. Data slot (S4) fit@Data # has its own print function slotNames(fit@Data) as.data.frame(fit@Data@ov) str(fit@Data) # 8. SampleStats (S4) fit@SampleStats slotNames(fit@SampleStats) fit@SampleStats@cov[[1]] # list with element per group # 9. Model (S4) slotNames(fit@Model) fit@Model@x.free.idx # parameter index in parameter table fit@Model@m.free.idx # parameter index in model matrix # 10. Cache (list) # cached information, only used for estimator PML and MML (for now) # 11. Fit # deprecated, only kept to avoid breaking some (old) packages (eg rsem) # 12. boot (list) # only used if bootstrapping was used fitb <- sem(model = 'f =~ x1 + x2 + x3 + x4', data = HolzingerSwineford1939, se = "bootstrap", bootstrap = 100L, verbose = TRUE) head(fitb@boot$coef) # 13. optim -- info about (iterative) optimization process str(fit@optim) # 14. loglik -- loglikelihood information (ML estimation only) unlist(fit@loglik) # 15. implied -- implied sample statistics (per group) fit@implied # 16. vcov -- variance-covariance matrix of the (free) model parameters fit@vcov # 17. test -- global test statistics fit2 <- sem(model = 'f =~ x1 + x2 + x3 + x4', data = HolzingerSwineford1939, test = "satorra-bentler") names(fit2@test) fit2@test$satorra.bentler # 18. h1 -- sample statistics + logl unrestricted/saturated model # often similar to info in @SampleStats, but not if missing data, # multilevel data, ... fit@h1$implied # this is what is used for lavInspect(fit, "implied") fit@h1$logl # 19. baseline -- information about the baseline model (needed for CFI/TLI) names(fit@baseline) as.data.frame(fit@baseline$partable) fit@baseline$test$standard # 20. internal -- list to store specials flags/info, for internal use only # 21. external -- list to store specials flags/info if you are an external # developer # PART 5: source code structure # 5 main functions: # 1) xxx_lavaan.R # 2) xxx_lavaanList.R # 3) xxx_efa.R (new in 0.6-13) # 4) xxx_sam.R # 5) xxx_fsr.R (only for repro reasons; access via lavaan:::fsr()) # mixture of new, old, and very old code # very old code: functions do not start with lav_ prefix # for example: lavaan:::computeSigmaHat # files that start with ctr_ contain contributed code # written by others # for example: ctr_pml_plrt.R (written by Myrsini Katsikatsou) # (with only minor edits by YR) # 00class.R contains S4 class definitions # 00generic.R defines 'generic' functions (that can be specialized) # fitMeasures(), lavInspect(), lavTech() # zzz_OLDNAMES.R contains aliases for (very old) function names # that are still used by some packages # eg computeExpectedInformation <- lav_model_information_expected # zzz.R traditionally contains the (in)famous startup message # most files start with the lav_ prefix # the second term often refers the type of object for which the file # contains functions, for example # lav_matrix.R # lav_partable_subset.R # lav_model_estimate.R # lav_object_post_check.R # but sometimes, it refers to what is created, or what is done # lav_test.R # creates @test slot # lav_print.R # prints various objects # for ML, an important set of functions are: # lav_mvnorm.R # lav_mvnorm_h1.R # lav_mvnorm_missing.R # lav_mvnorm_missing_h1.R # lav_mvnorm_cluster.R # lav_mvnorm_cluster_missing.R # the standard ML discrepancy function is found at the top of # lav_objective.R # sometimes, lavaan needs to do trivial things (like regression), but # needs a bit more info than what we get from standard functions (like lm): # lav_uvreg.R # univariate regression # lav_uvord.R # univariate probit regression # lav_mvreg.R # multivariate regression # lav_mvreg_cluster.R # multivariate twolevel regression # during iterative estimation, we need to compute the value of the # objective function (i.e., the discrepancy function) many times # lav_model_estimate.R # runs the iterations # and defines the following objective function: # function to be minimized objective_function <- function(x, verbose = FALSE, infToMax = FALSE, debug = FALSE) { # 2. unpack if(lavmodel@eq.constraints) { x <- as.numeric(lavmodel@eq.constraints.K %*% x) + lavmodel@eq.constraints.k0 } # 1. unscale x <- x / parscale # update GLIST (change `state') and make a COPY! GLIST <- lav_model_x2GLIST(lavmodel, x = x) fx <- lav_model_objective(lavmodel = lavmodel, GLIST = GLIST, lavsamplestats = lavsamplestats, lavdata = lavdata, lavcache = lavcache, verbose = verbose) # only for PML: divide by N (to speed up convergence) if(estimator == "PML") { fx <- fx / lavsamplestats@ntotal } if(debug || verbose) { cat(" objective function = ", sprintf("%18.16f", fx), "\n", sep="") } if(debug) { cat("Current free parameter values =\n"); print(x); cat("\n") } if(lavoptions$optim.partrace) { PENV$PARTRACE <- rbind(PENV$PARTRACE, c(fx, x)) } # for L-BFGS-B #if(infToMax && is.infinite(fx)) fx <- 1e20 if(!is.finite(fx)) { fx.group <- attr(fx, "fx.group") fx <- 1e20 attr(fx, "fx.group") <- fx.group # only for lav_model_fit() } fx } # lav_model_objective() can be found in lav_model_objective.R # 1) compute model implied summary statistics (for each group) # using eg computeSigmaHat() # 2) compute value discrepancy function # eg estimator.GLS() or estimator.ML() # see lav_objective.R # 3) if multiple groups, combine the values using group weights # 4) return value (fx) lavaan/README.md0000644000176200001440000000236114622072662013004 0ustar liggesusers# lavaan lavaan is a free, open source R package for latent variable analysis. You can use lavaan to estimate a large variety of multivariate statistical models, including path analysis, confirmatory factor analysis, structural equation modeling and growth curve models. The lavaan package is developed to provide useRs, researchers and teachers a free open-source, but commercial-quality package for latent variable modeling. The long-term goal of lavaan is to implement all the state-of-the-art capabilities that are currently available in commercial packages. However, lavaan is still under development, and much work still needs to be done. To get a first impression of how lavaan works in practice, consider the following example of a SEM model (the Political Democracy Example from Bollen's 1989 book): ```R library(lavaan) model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + y2 + y3 + y4 dem65 =~ y5 + y6 + y7 + y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual covariances y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fit <- sem(model, data = PoliticalDemocracy) summary(fit) ``` More information can be found on the website: https://lavaan.org lavaan/build/0000755000176200001440000000000014630513546012622 5ustar liggesuserslavaan/build/partial.rdb0000644000176200001440000000007414630513546014750 0ustar liggesusers‹‹àb```b`a’Ì ¦0°0 FN Íš—˜›Z d@$þû$¬²7lavaan/man/0000755000176200001440000000000014626372131012274 5ustar liggesuserslavaan/man/mplus2lavaan.modelSyntax.Rd0000644000176200001440000000162314622072672017503 0ustar liggesusers\name{mplus2lavaan.modelSyntax} \alias{mplus2lavaan.modelSyntax} \title{Convert Mplus model syntax to lavaan} \description{ Converts Mplus model syntax into lavaan model syntax.} \usage{ mplus2lavaan.modelSyntax(syntax) } \arguments{ \item{syntax}{A character vector containing Mplus model syntax to be converted to lavaan model syntax. Note that parsing Mplus syntax often requires correct usage of newline characters. If \code{syntax} is a vector of multiple strings, these will be joined with newlines prior to conversion. Alternatively, \code{\\n} characters can be included inline in \code{syntax}.} } \value{ A character string of converted \code{lavaan} model syntax. } \author{Michael Hallquist} \seealso{\code{\link{mplus2lavaan}}} \examples{ \dontrun{ syntax <- ' f1 BY x1*1 x2 x3; x1 WITH x2; x3 (1); x2 (1); ' lavSyntax <- mplus2lavaan.modelSyntax(syntax) cat(lavSyntax) } } lavaan/man/Demo.twolevel.Rd0000644000176200001440000000224714622072672015317 0ustar liggesusers\name{Demo.twolevel} \alias{Demo.twolevel} \docType{data} \title{ Demo dataset for a illustrating a multilevel CFA. } \description{ A toy dataset containing measures on 6 items (y1-y6), 3 within-level covariates (x1-x3) and 2 between-level covariates (w1-w2). The data is clustered (200 clusters of size 5, 10, 15 and 20), and the cluster variable is \dQuote{cluster}. } \usage{data(Demo.twolevel)} \format{ A data frame of 2500 observations of 12 variables. clusters. \describe{ \item{\code{y1}}{item 1} \item{\code{y2}}{item 2} \item{\code{y3}}{item 3} \item{\code{y4}}{item 4} \item{\code{y5}}{item 5} \item{\code{y6}}{item 6} \item{\code{x1}}{within-level covariate 1} \item{\code{x2}}{within-level covariate 2} \item{\code{x3}}{within-level covariate 3} \item{\code{w1}}{between-level covariate 1} \item{\code{w2}}{between-level covariate 2} \item{\code{cluster}}{cluster variable} } } \examples{ head(Demo.twolevel) model <- ' level: 1 fw =~ y1 + y2 + y3 fw ~ x1 + x2 + x3 level: 2 fb =~ y1 + y2 + y3 fb ~ w1 + w2 ' fit <- sem(model, data = Demo.twolevel, cluster = "cluster") summary(fit) } lavaan/man/InformativeTesting.Rd0000644000176200001440000001365114622072672016415 0ustar liggesusers\name{InformativeTesting} \alias{InformativeTesting} \alias{informativetesting} \title{Testing order/inequality Constrained Hypotheses in SEM} \description{Testing order/inequality constrained Hypotheses in SEM} \usage{ InformativeTesting(model = NULL, data, constraints = NULL, R = 1000L, type = "bollen.stine", return.LRT = TRUE, double.bootstrap = "standard", double.bootstrap.R = 249L, double.bootstrap.alpha = 0.05, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, verbose = FALSE, \dots) } \arguments{ \item{model}{Model syntax specifying the model. See \code{\link{model.syntax}} for more information.} \item{data}{The data frame containing the observed variables being used to fit the model.} \item{constraints}{The imposed inequality constraints on the model.} \item{R}{Integer; number of bootstrap draws. The default value is set to 1000.} \item{type}{If \code{"parametric"}, the parametric bootstrap is used. If \code{"bollen.stine"}, the semi-nonparametric Bollen-Stine bootstrap is used. The default is set to \code{"bollen.stine"}.} \item{return.LRT}{Logical; if \code{TRUE}, the function returns bootstrapped LRT-values.} \item{double.bootstrap}{If \code{"standard"} (default) the genuine double bootstrap is used to compute an additional set of plug-in p-values for each bootstrap sample. If \code{"no"}, no double bootstrap is used. If \code{"FDB"}, the fast double bootstrap is used to compute second level LRT-values for each bootstrap sample. Note that the \code{"FDB"} is experimental and should not be used by inexperienced users.} \item{double.bootstrap.R}{Integer; number of double bootstrap draws. The default value is set to 249.} \item{double.bootstrap.alpha}{The significance level to compute the adjusted alpha based on the plugin p-values. Only used if \code{double.bootstrap = "standard"}. The default value is set to 0.05.} \item{parallel}{The type of parallel operation to be used (if any). If missing, the default is set "no".} \item{ncpus}{Integer: number of processes to be used in parallel operation: typically one would chose this to the number of available CPUs.} \item{cl}{An optional parallel or snow cluster for use if \code{parallel = "snow"}. If not supplied, a cluster on the local machine is created for the duration of the \code{InformativeTesting} call.} \item{verbose}{Logical; if \code{TRUE}, information is shown at each bootstrap draw.} \item{...}{Other named arguments from the lavaan package which are passed to the function. For example \code{"group"} in a multiple group model.} } \details{The following hypothesis tests are available: \itemize{ \item Type A: Test H0: all restriktions with equalities ("=") active against HA: at least one inequality restriktion (">") strictly true. \item Type B: Test H0: all restriktions with inequalities (">") (including some equalities ("=")) active against HA: at least one restriktion false (some equality restriktions may be maintained).} } \value{An object of class InformativeTesting for which a print and a plot method is available.} %\seealso{} \examples{ \dontrun{ ######################### ### real data example ### ######################### # Multiple group path model for facial burns example. # model syntax with starting values. burns.model <- 'Selfesteem ~ Age + c(m1, f1)*TBSA + HADS + start(-.10, -.20)*TBSA HADS ~ Age + c(m2, f2)*TBSA + RUM + start(.10, .20)*TBSA ' # constraints syntax burns.constraints <- 'f2 > 0 ; m1 < 0 m2 > 0 ; f1 < 0 f2 > m2 ; f1 < m1' # we only generate 2 bootstrap samples in this example; in practice # you may wish to use a much higher number. # the double bootstrap was switched off; in practice you probably # want to set it to "standard". example1 <- InformativeTesting(model = burns.model, data = FacialBurns, R = 2, constraints = burns.constraints, double.bootstrap = "no", group = "Sex") example1 ########################## ### artificial example ### ########################## # Simple ANOVA model with 3 groups (N = 20 per group) set.seed(1234) Y <- cbind(c(rnorm(20,0,1), rnorm(20,0.5,1), rnorm(20,1,1))) grp <- c(rep("1", 20), rep("2", 20), rep("3", 20)) Data <- data.frame(Y, grp) #create model matrix fit.lm <- lm(Y ~ grp, data = Data) mfit <- fit.lm$model mm <- model.matrix(mfit) Y <- model.response(mfit) X <- data.frame(mm[,2:3]) names(X) <- c("d1", "d2") Data.new <- data.frame(Y, X) # model model <- 'Y ~ 1 + a1*d1 + a2*d2' # fit without constraints fit <- sem(model, data = Data.new) # constraints syntax: mu1 < mu2 < mu3 constraints <- ' a1 > 0 a1 < a2 ' # we only generate 10 bootstrap samples in this example; in practice # you may wish to use a much higher number, say > 1000. The double # bootstrap is not necessary in case of an univariate ANOVA model. example2 <- InformativeTesting(model = model, data = Data.new, start = parTable(fit), R = 10L, double.bootstrap = "no", constraints = constraints) example2 } } \references{ Van de Schoot, R., Hoijtink, H., & Dekovic, M. (2010). Testing inequality constrained hypotheses in SEM models. \emph{Structural Equation Modeling}, \bold{17}, 443-463. Van de Schoot, R., Strohmeier, D. (2011). Testing informative hypotheses in SEM increases power: An illustration contrasting classical. \emph{International Journal of Behavioral Development}, \bold{35}, 180-190. Silvapulle, M.J. and Sen, P.K. (2005). \emph{Constrained Statistical Inference}. Wiley, New York. } \author{ Leonard Vanbrabant \email{lgf.vanbrabant@gmail.com} } lavaan/man/lavResiduals.Rd0000644000176200001440000001121214622072672015221 0ustar liggesusers\name{lavResiduals} \alias{lavResiduals} \alias{lavResidual} \title{Residuals} \description{ \sQuote{lavResiduals} provides model residuals and standardized residuals from a fitted lavaan object, as well as various summaries of these residuals. The \sQuote{residuals()} (and \sQuote{resid()}) methods are just shortcuts to this function with a limited set of arguments. } \usage{ lavResiduals(object, type = "cor.bentler", custom.rmr = NULL, se = FALSE, zstat = TRUE, summary = TRUE, h1.acov = "unstructured", add.type = TRUE, add.labels = TRUE, add.class = TRUE, drop.list.single.group = TRUE, maximum.number = length(res.vech), output = "list") } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{type}{Character. If \code{type = "raw"}, this function returns the raw (= unscaled) difference between the observed and the expected (model-implied) summary statistics, as well as the standardized version of these residualds. If \code{type = "cor"}, or \code{type = "cor.bollen"}, the observed and model implied covariance matrices are first transformed to a correlation matrix (using \code{cov2cor()}), before the residuals are computed. If \code{type = "cor.bentler"}, both the observed and model implied covariance matrices are rescaled by dividing the elements by the square roots of the corresponding variances of the observed covariance matrix.} \item{custom.rmr}{\code{list}. Not used yet.} \item{se}{Logical. If \code{TRUE}, show the estimated standard errors for the residuals.} \item{zstat}{Logical. If \code{TRUE}, show the standardized residuals, which are the raw residuals divided by the corresponding (estimated) standard errors.} \item{summary}{Logical. If \code{TRUE}, show various summaries of the (possibly scaled) residuals. When \code{type = "raw"}, we compute the RMR. When \code{type = "cor.bentler"}, we compute the SRMR. When \code{type = "cor.bollen"}, we compute the CRMR. An unbiased version of these summaries is also computed, as well as a standard error, a z-statistic and a p-value for the test of exact fit based on these summaries.} \item{h1.acov}{Character. If \code{"unstructured"}, the observed summary statistics are used as consistent estimates of the corresponding (unrestricted) population statistics. If \code{"structured"}, the model-implied summary statistics are used as consistent estimates of the corresponding (unrestricted) population statistics. This affects the way the asymptotic variance matrix of the summary statistics is computed.} \item{add.type}{Logical. If \code{TRUE}, show the type of residuals in the output.} \item{add.labels}{If \code{TRUE}, variable names are added to the vectors and/or matrices.} \item{add.class}{If \code{TRUE}, vectors are given the \sQuote{lavaan.vector} class; matrices are given the \sQuote{lavaan.matrix} class, and symmetric matrices are given the \sQuote{lavaan.matrix.symmetric} class. This only affects the way they are printed on the screen.} \item{drop.list.single.group}{If \code{FALSE}, the results are returned as a list, where each element corresponds to a group (even if there is only a single group). If \code{TRUE}, the list will be unlisted if there is only a single group.} \item{maximum.number}{Integer. Only used if \code{output ="table"}. Show only the first maximum.number rows of the data.frame.} \item{output}{Character. By default, \code{output = "list"}, and the output is a list of elements. If \code{output = "table"}, only the residuals of the variance-covariance matrix are shown in a data.frame, sorted from high (in absolute value) to low.} } \value{ If \code{drop.list.single.group = TRUE}, a list of (residualized) summary statistics, including type, standardized residuals, and summaries. If \code{drop.list.single.group = FALSE}, the list of summary statistics is nested within a list for each group. } \references{ Bentler, P.M. and Dijkstra, T. (1985). Efficient estimation via linearization in structural models. In Krishnaiah, P.R. (Ed.), \emph{Multivariate analysis - VI}, (pp. 9--42). New York, NY: Elsevier. Ogasawara, H. (2001). Standard errors of fit indices using residuals in structural equation modeling. \emph{Psychometrika, 66}(3), 421--436. doi:10.1007/BF02294443 Maydeu-Olivares, A. (2017). Assessing the size of model misfit in structural equation models. \emph{Psychometrika, 82}(3), 533--558. doi:10.1007/s11336-016-9552-7 Standardized Residuals in M\emph{plus}. Document retrieved from URL http://www.statmodel.com/download/StandardizedResiduals.pdf } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) lavResiduals(fit) } lavaan/man/lavExport.Rd0000644000176200001440000000347214622072672014560 0ustar liggesusers\name{lavExport} \alias{lavExport} \title{lavaan Export} \description{ Export a fitted lavaan object to an external program.} \usage{ lavExport(object, target = "lavaan", prefix = "sem", dir.name = "lavExport", export = TRUE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{target}{The target program. Current options are \code{"lavaan"} and \code{"Mplus"}.} \item{prefix}{The prefix used to create the input files; the name of the input file has the pattern \sQuote{prefix dot target dot in}; the name of the data file has the pattern \sQuote{prefix dot target dot raw}.} \item{dir.name}{The directory name (including a full path) where the input files will be written.} \item{export}{If \code{TRUE}, the files are written to the output directory (\code{dir.name}). If \code{FALSE}, only the syntax is generated as a character string.} } \value{ If \code{export = TRUE}, a directory (called \code{lavExport} by default) will be created, typically containing a data file, and an input file so that the same analysis can be run using an external program. If \code{export = FALSE}, a character string containing the model syntax only for the target program. } \details{ This function was mainly created to quickly generate an Mplus syntax file to compare the results between Mplus and lavaan. The target \code{"lavaan"} can be useful to create a full model syntax as needed for the \code{lavaan()} function. More targets (perhaps for \code{LISREL} or \code{EQS}) will be added in future releases. } \seealso{\code{\link{lavaanify}}, \code{\link{mplus2lavaan}}} \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) out <- lavExport(fit, target = "Mplus", export=FALSE) cat(out) } lavaan/man/lav_matrix.Rd0000644000176200001440000002646714622072672014753 0ustar liggesusers\name{lav_matrix} \alias{lav_matrix_vec} \alias{lav_matrix_vecr} \alias{lav_matrix_vech} \alias{lav_matrix_vechr} \alias{lav_matrix_vechu} \alias{lav_matrix_vechru} \alias{lav_matrix_vech_idx} \alias{lav_matrix_vech_row_idx} \alias{lav_matrix_vech_col_idx} \alias{lav_matrix_vechr_idx} \alias{lav_matrix_vechu_idx} \alias{lav_matrix_vechru_idx} \alias{lav_matrix_vech_reverse} \alias{lav_matrix_vechru_reverse} \alias{lav_matrix_upper2full} \alias{lav_matrix_vechr_reverse} \alias{lav_matrix_vechu_reverse} \alias{lav_matrix_lower2full} \alias{lav_matrix_diag_idx} \alias{lav_matrix_diagh_idx} \alias{lav_matrix_antidiag_idx} \alias{lav_matrix_duplication} \alias{lav_matrix_duplication_pre} \alias{lav_matrix_duplication_post} \alias{lav_matrix_duplication_pre_post} \alias{lav_matrix_duplication_ginv} \alias{lav_matrix_duplication_ginv_pre} \alias{lav_matrix_duplication_ginv_post} \alias{lav_matrix_duplication_ginv_pre_post} \alias{lav_matrix_commutation} \alias{lav_matrix_commutation_pre} \alias{lav_matrix_commutation_post} \alias{lav_matrix_commutation_pre_post} \alias{lav_matrix_commutation_mn_pre} \alias{lav_matrix_symmetric_sqrt} \alias{lav_matrix_orthogonal_complement} \alias{lav_matrix_bdiag} \alias{lav_matrix_trace} \alias{lav_matrix_cov} \title{Utility Functions: Matrices and Vectors} \description{Utility functions for Matrix and Vector operations.} \usage{ # matrix to vector lav_matrix_vec(A) lav_matrix_vecr(A) lav_matrix_vech(S, diagonal = TRUE) lav_matrix_vechr(S, diagonal = TRUE) # matrix/vector indices lav_matrix_vech_idx(n = 1L, diagonal = TRUE) lav_matrix_vech_row_idx(n = 1L, diagonal = TRUE) lav_matrix_vech_col_idx(n = 1L, diagonal = TRUE) lav_matrix_vechr_idx(n = 1L, diagonal = TRUE) lav_matrix_vechru_idx(n = 1L, diagonal = TRUE) lav_matrix_diag_idx(n = 1L) lav_matrix_diagh_idx(n = 1L) lav_matrix_antidiag_idx(n = 1L) # vector to matrix lav_matrix_vech_reverse(x, diagonal = TRUE) lav_matrix_vechru_reverse(x, diagonal = TRUE) lav_matrix_upper2full(x, diagonal = TRUE) lav_matrix_vechr_reverse(x, diagonal = TRUE) lav_matrix_vechu_reverse(x, diagonal = TRUE) lav_matrix_lower2full(x, diagonal = TRUE) # the duplication matrix lav_matrix_duplication(n = 1L) lav_matrix_duplication_pre(A = matrix(0,0,0)) lav_matrix_duplication_post(A = matrix(0,0,0)) lav_matrix_duplication_pre_post(A = matrix(0,0,0)) lav_matrix_duplication_ginv(n = 1L) lav_matrix_duplication_ginv_pre(A = matrix(0,0,0)) lav_matrix_duplication_ginv_post(A = matrix(0,0,0)) lav_matrix_duplication_ginv_pre_post(A = matrix(0,0,0)) # the commutation matrix lav_matrix_commutation(m = 1L, n = 1L) lav_matrix_commutation_pre(A = matrix(0,0,0)) lav_matrix_commutation_post(A = matrix(0,0,0)) lav_matrix_commutation_pre_post(A = matrix(0,0,0)) lav_matrix_commutation_mn_pre(A, m = 1L, n = 1L) # sample statistics lav_matrix_cov(Y, Mu = NULL) # other matrix operations lav_matrix_symmetric_sqrt(S = matrix(0,0,0)) lav_matrix_orthogonal_complement(A = matrix(0,0,0)) lav_matrix_bdiag(...) lav_matrix_trace(..., check = TRUE) } \arguments{ \item{A}{A general matrix.} \item{S}{A symmetric matrix.} \item{Y}{A matrix representing a (numeric) dataset.} \item{diagonal}{Logical. If TRUE, include the diagonal.} \item{n}{Integer. When it is the only argument, the dimension of a square matrix. If m is also provided, the number of column of the matrix.} \item{m}{Integer. The number of rows of a matrix.} \item{x}{Numeric. A vector.} \item{Mu}{Numeric. If given, use Mu (instead of sample mean) to center, before taking the crossproduct.} \item{...}{One or more matrices, or a list of matrices.} \item{check}{Logical. If \code{check = TRUE}, we check if the (final) matrix is square.} } \details{ These are a collection of lower-level matrix/vector related functions that are used throughout the lavaan code. They are made public per request of package developers. Below is a brief description of what they do: The \code{lav_matrix_vec} function implements the vec operator (for 'vectorization') and transforms a matrix into a vector by stacking the columns of the matrix one underneath the other. The \code{lav_matrix_vecr} function is similar to the \code{lav_matrix_vec} function but transforms a matrix into a vector by stacking the rows of the matrix one underneath the other. The \code{lav_matrix_vech} function implements the vech operator (for 'half vectorization') and transforms a symmetric matrix into a vector by stacking the columns of the matrix one underneath the other, but eliminating all supradiagonal elements. If diagonal = FALSE, the diagonal elements are also eliminated. The \code{lav_matrix_vechr} function is similar to the \code{lav_matrix_vech} function but transforms a matrix into a vector by stacking the rows of the matrix one underneath the other, eliminating all supradiagonal elements. The \code{lav_matrix_vech_idx} function returns the vector indices of the lower triangular elements of a symmetric matrix of size n, column by column. The \code{lav_matrix_vech_row_idx} function returns the row indices of the lower triangular elements of a symmetric matrix of size n. The \code{lav_matrix_vech_col_idx} function returns the column indices of the lower triangular elements of a symmetric matrix of size n. The \code{lav_matrix_vechr_idx} function returns the vector indices of the lower triangular elements of a symmetric matrix of size n, row by row. The \code{lav_matrix_vechu_idx} function returns the vector indices of the upper triangular elements of a symmetric matrix of size n, column by column. The \code{lav_matrix_vechru_idx} function returns the vector indices of the upper triangular elements of a symmetric matrix of size n, row by row. The \code{lav_matrix_diag_idx} function returns the vector indices of the diagonal elements of a symmetric matrix of size n. The \code{lav_matrix_diagh_idx} function returns the vector indices of the lower part of a symmetric matrix of size n. The \code{lav_matrix_antidiag_idx} function returns the vector indices of the anti diagonal elements a symmetric matrix of size n. The \code{lav_matrix_vech_reverse} function (alias: \code{lav_matrix_vechru_reverse} and \code{lav_matrix_upper2full}) creates a symmetric matrix, given only upper triangular elements, row by row. If diagonal = FALSE, an diagonal with zero elements is added. The \code{lav_matrix_vechr_reverse} (alias: \code{lav_matrix_vechu_reverse} and \code{lav_matrix_lower2full}) creates a symmetric matrix, given only the lower triangular elements, row by row. If diagonal = FALSE, an diagonal with zero elements is added. The \code{lav_matrix_duplication} function generates the duplication matrix for a symmetric matrix of size n. This matrix duplicates the elements in vech(S) to create vec(S) (where S is symmetric). This matrix is very sparse, and should probably never be explicitly created. Use one of the functions below. The \code{lav_matrix_duplication_pre} function computes the product of the transpose of the duplication matrix and a matrix A. The A matrix should have n*n rows, where n is an integer. The duplication matrix is not explicitly created. The \code{lav_matrix_duplication_post} function computes the product of a matrix A with the duplication matrix. The A matrix should have n*n columns, where n is an integer. The duplication matrix is not explicitly created. The \code{lav_matrix_duplication_pre_post} function first pre-multiplies a matrix A with the transpose of the duplication matrix, and then post multiplies the result again with the duplication matrix. A must be square matrix with n*n rows and columns, where n is an integer. The duplication matrix is not explicitly created. The \code{lav_matrix_duplication_ginv} function computes the generalized inverse of the duplication matrix. The matrix removes the duplicated elements in vec(S) to create vech(S). This matrix is very sparse, and should probably never be explicitly created. Use one of the functions below. The \code{lav_matrix_duplication_ginv_pre} function computes the product of the generalized inverse of the duplication matrix and a matrix A with n*n rows, where n is an integer. The generalized inverse of the duplication matrix is not explicitly created. The \code{lav_matrix_duplication_ginv_post} function computes the product of a matrix A (with n*n columns, where n is an integer) and the transpose of the generalized inverse of the duplication matrix. The generalized inverse of the duplication matrix is not explicitly created. The \code{lav_matrix_duplication_ginv_pre_post} function first pre-multiplies a matrix A with the transpose of the generalized inverse of the duplication matrix, and then post multiplies the result again with the transpose of the generalized inverse matrix. The matrix A must be square with n*n rows and columns, where n is an integer. The generalized inverse of the duplication matrix is not explicitly created. The \code{lav_matrix_commutation} function computes the commutation matrix which is a permutation matrix which transforms vec(A) (with m rows and n columns) into vec(t(A)). The \code{lav_matrix_commutation_pre} function computes the product of the commutation matrix with a matrix A, without explicitly creating the commutation matrix. The matrix A must have n*n rows, where n is an integer. The \code{lav_matrix_commutation_post} function computes the product of a matrix A with the commutation matrix, without explicitly creating the commutation matrix. The matrix A must have n*n rows, where n is an integer. The \code{lav_matrix_commutation_pre_post} function first pre-multiplies a matrix A with the commutation matrix, and then post multiplies the result again with the commutation matrix, without explicitly creating the commutation matrix. The matrix A must have n*n rows, where n is an integer. The \code{lav_matrix_commutation_mn_pre} function computes the product of the commutation matrix with a matrix A, without explicitly creating the commutation matrix. The matrix A must have m*n rows, where m and n are integers. The \code{lav_matrix_cov} function computes the sample covariance matrix of its input matrix, where the elements are divided by N (the number of rows). The \code{lav_matrix_symmetric_sqrt} function computes the square root of a positive definite symmetric matrix (using an eigen decomposition). If some of the eigenvalues are negative, they are silently fixed to zero. The \code{lav_matrix_orthogonal_complement} function computes an orthogonal complement of the matrix A, using a qr decomposition. The \code{lav_matrix_bdiag} function constructs a block diagonal matrix from its arguments. The \code{lav_matrix_trace} function computes the trace (the sum of the diagonal elements) of a single (square) matrix, or if multiple matrices are provided (either as a list, or as multiple arguments), we first compute their product (which must result in a square matrix), and then we compute the trace; if \code{check = TRUE}, we check if the (final) matrix is square. } \references{ Magnus, J. R. and H. Neudecker (1999). Matrix Differential Calculus with Applications in Statistics and Econometrics, Second Edition, John Wiley. } \examples{ # upper elements of a 3 by 3 symmetric matrix (row by row) x <- c(30, 16, 5, 10, 3, 1) # construct full symmetric matrix S <- lav_matrix_upper2full(x) # compute the normal theory `Gamma' matrix given a covariance # matrix (S), using the formula: Gamma = 2 * D^{+} (S %x% S) t(D^{+}) Gamma.NT <- 2 * lav_matrix_duplication_ginv_pre_post(S \%x\% S) Gamma.NT } lavaan/man/growth.Rd0000644000176200001440000001370514622072672014106 0ustar liggesusers\name{growth} \alias{growth} \title{Fit Growth Curve Models} \description{ Fit a Growth Curve model. Only useful if all the latent variables in the model are growth factors. For more complex models, it may be better to use the \code{\link{lavaan}} function.} \usage{ growth(model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, NACOV = NULL, ov.order = "model", ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated as numeric (unless they are declared as ordered in the data.frame.) Since 0.6-4, ordered can also be logical. If TRUE, all observed endogenous variables are treated as ordered (ordinal). If FALSE, all observed endogenous variables are considered to be numeric (again, unless they are declared as ordered in the data.frame.)} \item{sampling.weights}{A variable name in the data frame containing sampling weight information. Currently only available for non-clustered data. Depending on the \code{sampling.weights.normalization} option, these weights may be rescaled (or not) so that their sum equals the number of observations (total or per group).} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. For a multiple group analysis, a list with a variance-covariance matrix for each group.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.th}{Vector of sample-based thresholds. For a multiple group analysis, a list with a vector of thresholds for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} \item{group}{Character. A variable name in the data frame defining the groups in a multiple group analysis.} \item{cluster}{Character. A (single) variable name in the data frame defining the clusters in a two-level dataset.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix for each group. The elements of the weight matrix should be in the following order (if all data is continuous): first the means (if a meanstructure is involved), then the lower triangular elements of the covariance matrix including the diagonal, ordered column by column. In the categorical case: first the thresholds (including the means for continuous variables), then the slopes (if any), the variances of continuous variables (if any), and finally the lower triangular elements of the correlation/covariance matrix excluding the diagonal, ordered column by column.} \item{NACOV}{A user provided matrix containing the elements of (N times) the asymptotic variance-covariance matrix of the sample statistics. For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} \item{ov.order}{Character. If \code{"model"} (the default), the order of the observed variable names (as reflected for example in the output of \code{lavNames()}) is determined by the model syntax. If \code{"data"}, the order is determined by the data (either the full data.frame or the sample (co)variance matrix). If the \code{WLS.V} and/or \code{NACOV} matrices are provided, this argument is currently set to \code{"data"}.} \item{...}{Many more additional options can be defined, using 'name = value'. See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{growth} function is a wrapper for the more general \code{\link{lavaan}} function, using the following default arguments: \code{meanstructure = TRUE}, \code{int.ov.free = FALSE}, \code{int.lv.free = TRUE}, \code{auto.fix.first = TRUE} (unless \code{std.lv = TRUE}), \code{auto.fix.single = TRUE}, \code{auto.var = TRUE}, \code{auto.cov.lv.x = TRUE}, \code{auto.efa = TRUE}, \code{auto.th = TRUE}, \code{auto.delta = TRUE}, and \code{auto.cov.y = TRUE}. } \value{ An object of class \code{\linkS4class{lavaan}}, for which several methods are available, including a \code{summary} method. } \references{ Yves Rosseel (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \doi{https://doi.org/10.18637/jss.v048.i02}} \seealso{ \code{\link{lavaan}} } \examples{ ## linear growth model with a time-varying covariate model.syntax <- ' # intercept and slope with fixed coefficients i =~ 1*t1 + 1*t2 + 1*t3 + 1*t4 s =~ 0*t1 + 1*t2 + 2*t3 + 3*t4 # regressions i ~ x1 + x2 s ~ x1 + x2 # time-varying covariates t1 ~ c1 t2 ~ c2 t3 ~ c3 t4 ~ c4 ' fit <- growth(model.syntax, data = Demo.growth) summary(fit) } lavaan/man/parameterEstimates.Rd0000644000176200001440000001445214622072672016433 0ustar liggesusers\name{parameterEstimates} \alias{parameterEstimates} \alias{parameterestimates} \title{Parameter Estimates} \description{ Parameter estimates of a latent variable model.} \usage{ parameterEstimates(object, se = TRUE, zstat = TRUE, pvalue = TRUE, ci = TRUE, standardized = FALSE, fmi = FALSE, level = 0.95, boot.ci.type = "perc", cov.std = TRUE, fmi.options = list(), rsquare = FALSE, remove.system.eq = TRUE, remove.eq = TRUE, remove.ineq = TRUE, remove.def = FALSE, remove.nonfree = FALSE, remove.step1 = TRUE, remove.unused = FALSE, add.attributes = FALSE, output = "data.frame", header = FALSE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{se}{Logical. If \code{TRUE}, include column containing the standard errors. If \code{FALSE}, this implies \code{zstat} and \code{pvalue} and \code{ci} are also \code{FALSE}.} \item{zstat}{Logical. If \code{TRUE}, an extra column is added containing the so-called z-statistic, which is simply the value of the estimate divided by its standard error.} \item{pvalue}{Logical. If \code{TRUE}, an extra column is added containing the pvalues corresponding to the z-statistic, evaluated under a standard normal distribution.} \item{ci}{If \code{TRUE}, confidence intervals are added to the output} \item{level}{The confidence level required.} \item{boot.ci.type}{If bootstrapping was used, the type of interval required. The value should be one of \code{"norm"}, \code{"basic"}, \code{"perc"}, or \code{"bca.simple"}. For the first three options, see the help page of the \code{boot.ci} function in the boot package. The \code{"bca.simple"} option produces intervals using the adjusted bootstrap percentile (BCa) method, but with no correction for acceleration (only for bias). Note that the p-value is still computed assuming that the z-statistic follows a standard normal distribution.} \item{standardized}{Logical or character. If \code{TRUE}, standardized estimates are added to the output. Note that \emph{SE}s and tests are still based on unstandardized estimates. Use \code{\link{standardizedSolution}} to obtain \emph{SE}s and test statistics for standardized estimates. If a character vector is passed with any of \code{c("std.lv","std.all","std.nox")}, only the selected standardization methods are added.} \item{cov.std}{Logical. If TRUE, the (residual) observed covariances are scaled by the square root of the `Theta' diagonal elements, and the (residual) latent covariances are scaled by the square root of the `Psi' diagonal elements. If FALSE, the (residual) observed covariances are scaled by the square root of the diagonal elements of the observed model-implied covariance matrix (Sigma), and the (residual) latent covariances are scaled by the square root of diagonal elements of the model-implied covariance matrix of the latent variables.} \item{fmi}{Logical. If \code{TRUE}, an extra column is added containing the fraction of missing information for each estimated parameter. Only available if \code{estimator="ML"}, \code{missing="(fi)ml"}, and \code{se="standard"}. See references for more information.} \item{fmi.options}{List. If non-empty, arguments can be provided to alter the default options when the model is fitted with the complete(d) data; otherwise, the same options are used as the original model.} \item{remove.eq}{Logical. If \code{TRUE}, filter the output by removing all rows containing user-specified equality constraints, if any.} \item{remove.system.eq}{Logical. If \code{TRUE}, filter the output by removing all rows containing system-generated equality constraints, if any.} \item{remove.ineq}{Logical. If \code{TRUE}, filter the output by removing all rows containing inequality constraints, if any.} \item{remove.def}{Logical. If \code{TRUE}, filter the output by removing all rows containing parameter definitions, if any.} \item{remove.nonfree}{Logical. If \code{TRUE}, filter the output by removing all rows containing fixed (non-free) parameters.} \item{remove.step1}{Logical. Only used by \code{sam()}. If \code{TRUE}, filter the output by removing all rows corresponding to the measurement parameters that are part of the first step.} \item{remove.unused}{Logical. If \code{TRUE}, filter the output by removing all rows containing automatically added parameters (user == 0) that are nonfree, and with their final (est) values fixed to their default values (typically 1 or 0); currently only used for intercepts and scaling-factors.} \item{rsquare}{Logical. If \code{TRUE}, add additional rows containing the rsquare values (in the \code{est} column) of all endogenous variables in the model. Both the \code{lhs} and \code{rhs} column contain the name of the endogenous variable, while the \code{op} column contains \code{r2}, to indicate that the values in the \code{est} column are rsquare values.} \item{add.attributes}{Deprecated argument. Please use output= instead.} \item{output}{Character. If \code{"data.frame"}, the parameter table is displayed as a standard (albeit lavaan-formatted) data.frame. If \code{"text"} (or alias \code{"pretty"}), the parameter table is prettyfied, and displayed with subsections (as used by the summary function).} \item{header}{Logical. Only used if \code{output = "text"}. If \code{TRUE}, print a header at the top of the parameter list. This header contains information about the information matrix, if saturated (h1) model is structured or unstructured, and which type of standard errors are shown in the output.} } \value{ A data.frame containing the estimated parameters, parameters, standard errors, and (by default) z-values , p-values, and the lower and upper values of the confidence intervals. If requested, extra columns are added with standardized versions of the parameter estimates. } \references{ Savalei, V. & Rhemtulla, M. (2012). On obtaining estimates of the fraction of missing information from FIML. Structural Equation Modeling: A Multidisciplinary Journal, 19(3), 477-494. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) parameterEstimates(fit) parameterEstimates(fit, output = "text") } lavaan/man/mplus2lavaan.Rd0000644000176200001440000000173414622072672015200 0ustar liggesusers\name{mplus2lavaan} \alias{mplus2lavaan} \alias{lavImport} \title{mplus to lavaan converter} \description{ Read in an Mplus input file, convert it to lavaan syntax, and fit the model.} \usage{ mplus2lavaan(inpfile, run = TRUE) } \arguments{ \item{inpfile}{The filename (including a full path) of the Mplus input file. The data (as referred to in the Mplus input file) should be in the same directory as the Mplus input file.} \item{run}{Whether to run the specified Mplus input syntax (\code{TRUE}) or only to parse and convert the syntax (\code{FALSE}).} } \value{ A \code{lavaan} object with the fitted results of the Mplus model. The parsed and converted Mplus syntax is preserved in the \code{@external} slot of the \code{lavaan} object in the \code{$mplus.inp} element. If \code{run} is \code{FALSE}, a \code{list} of converted syntax is returned. } \author{Michael Hallquist} \seealso{\code{\link{lavExport}}} \examples{ \dontrun{ out <- mplus2lavaan("ex5.1.inp") summary(out) } } lavaan/man/PoliticalDemocracy.Rd0000644000176200001440000000411114622072672016332 0ustar liggesusers\name{PoliticalDemocracy} \alias{PoliticalDemocracy} \docType{data} \title{ Industrialization And Political Democracy Dataset } \description{ The `famous' Industrialization and Political Democracy dataset. This dataset is used throughout Bollen's 1989 book (see pages 12, 17, 36 in chapter 2, pages 228 and following in chapter 7, pages 321 and following in chapter 8). The dataset contains various measures of political democracy and industrialization in developing countries. } \usage{data(PoliticalDemocracy)} \format{ A data frame of 75 observations of 11 variables. \describe{ \item{\code{y1}}{Expert ratings of the freedom of the press in 1960} \item{\code{y2}}{The freedom of political opposition in 1960} \item{\code{y3}}{The fairness of elections in 1960} \item{\code{y4}}{The effectiveness of the elected legislature in 1960} \item{\code{y5}}{Expert ratings of the freedom of the press in 1965} \item{\code{y6}}{The freedom of political opposition in 1965} \item{\code{y7}}{The fairness of elections in 1965} \item{\code{y8}}{The effectiveness of the elected legislature in 1965} \item{\code{x1}}{The gross national product (GNP) per capita in 1960} \item{\code{x2}}{The inanimate energy consumption per capita in 1960} \item{\code{x3}}{The percentage of the labor force in industry in 1960} } } \source{ The dataset was originally retrieved from \verb{http://web.missouri.edu/~kolenikovs/Stat9370/democindus.txt} (link no longer valid; see discussion on SEMNET 18 Jun 2009). The dataset is part of a larger (public) dataset (ICPSR 2532), see \verb{https://www.icpsr.umich.edu/web/ICPSR/studies/2532}. } \references{ Bollen, K. A. (1989). \emph{Structural Equations with Latent Variables.} Wiley Series in Probability and Mathematical Statistics. New York: Wiley. Bollen, K. A. (1979). Political democracy and the timing of development. \emph{American Sociological Review}, 44, 572-587. Bollen, K. A. (1980). Issues in the comparative measurement of political democracy. \emph{American Sociological Review}, 45, 370-390. } \examples{ head(PoliticalDemocracy) } lavaan/man/lavTestLRT.Rd0000644000176200001440000001706614622072672014604 0ustar liggesusers\name{lavTestLRT} \alias{lavTestLRT} \alias{lavtestLRT} \alias{LRT} \alias{lavLRTTest} \alias{lavLRT} \alias{anova} \title{LRT test} \description{ LRT test for comparing (nested) lavaan models.} \usage{ lavTestLRT(object, ..., method = "default", test = "default", A.method = "delta", scaled.shifted = TRUE, type = "Chisq", model.names = NULL) anova(object, ...) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{...}{additional objects of class \code{\linkS4class{lavaan}}.} \item{method}{Character string. The possible options are \code{"satorra.bentler.2001"}, \code{"satorra.bentler.2010"}, \code{"satorra.2000"}, and \code{"standard"}. See details.} \item{test}{Character string specifying which scaled test statistics to use, in case multiple scaled \code{test=} options were requested when fitting the model(s). See details.} \item{A.method}{Character string. The possible options are \code{"exact"} and \code{"delta"}. This is only used when method = \code{"satorra.2000"}. It determines how the Jacobian of the constraint function (the matrix A) will be computed. Note that if \code{A.method = "exact"}, the models must be nested in the parameter sense, while if \code{A.method = "delta"}, they only need to be nested in the covariance matrix sense.} \item{scaled.shifted}{Logical. Only used when method = \code{"satorra.2000"}. If \code{TRUE}, we use a scaled and shifted test statistic; if \code{FALSE}, we use a mean and variance adjusted (Satterthwaite style) test statistic.} \item{type}{Character. If \code{"Chisq"}, the test statistic for each model is the (scaled or unscaled) model fit test statistic. If \code{"Cf"}, the test statistic for each model is computed by the \code{\link{lavTablesFitCf}} function. If \code{"browne.residual.adf"} (alias \code{"browne"}) or \code{"browne.residual.nt"}, the standard chi-squared difference is calculated from each model's residual-based statistic.} \item{model.names}{Character vector. If provided, use these model names in the first column of the anova table.} } \value{ An object of class anova. When given a single argument, it simply returns the test statistic of this model. When given a sequence of objects, this function tests the models against one another, after reordering the models according to their degrees of freedom. } \details{ The \code{anova} function for lavaan objects simply calls the \code{lavTestLRT} function, which has a few additional arguments. The only \code{test=} options that currently have actual consequences are \code{"satorra.bentler"}, \code{"yuan.bentler"}, or \code{"yuan.bentler.mplus"} because \code{"mean.var.adjusted"} and \code{"scaled.shifted"} are currently distinguished by the \code{scaled.shifted} argument. See \code{\link{lavOptions}} for details about \code{test=} options implied by robust \code{estimator=} options). The \code{"default"} is to select the first available scaled statistic, if any. To check which test(s) were calculated when fitting your model(s), use \code{lavInspect(fit, "options")$test}. If \code{type = "Chisq"} and the test statistics are scaled, a special scaled difference test statistic is computed. If \code{method} is \code{"satorra.bentler.2001"}, a simple approximation is used described in Satorra & Bentler (2001). In some settings, this can lead to a negative test statistic. To ensure a positive test statistic, we can use the method proposed by Satorra & Bentler (2010). Alternatively, when \code{method="satorra.2000"}, the original formulas of Satorra (2000) are used. The latter is used for model comparison, when \code{...} contain additional (nested) models. Even when test statistics are scaled in \code{object} or \code{...}, users may request the \code{method="standard"} test statistic, without a robust adjustment. } \note{ If there is a \code{\linkS4class{lavaan}} model stored in \code{object@external$h1.model}, it will be added to \code{\dots} } \references{ Satorra, A. (2000). Scaled and adjusted restricted tests in multi-sample analysis of moment structures. In Heijmans, R.D.H., Pollock, D.S.G. & Satorra, A. (eds.), \emph{Innovations in multivariate statistical analysis}: \emph{A Festschrift for Heinz Neudecker} (pp.233-247). London, UK: Kluwer Academic Publishers. Satorra, A., & Bentler, P. M. (2001). A scaled difference chi-square test statistic for moment structure analysis. \emph{Psychometrika, 66}(4), 507-514. \doi{10.1007/BF02296192} Satorra, A., & Bentler, P. M. (2010). Ensuring postiveness of the scaled difference chi-square test statistic. \emph{Psychometrika, 75}(2), 243-248. \doi{10.1007/s11336-009-9135-y} } \examples{ HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit1 <- cfa(HS.model, data = HolzingerSwineford1939) fit0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE) lavTestLRT(fit1, fit0) ## When multiple test statistics are selected when the model is fitted, ## use the type= and test= arguments to select a test for comparison. ## refit models, requesting 6 test statistics (in addition to "standard") t6.1 <- cfa(HS.model, data = HolzingerSwineford1939, test = c("browne.residual.adf","scaled.shifted","mean.var.adjusted", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus")) t6.0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE, test = c("browne.residual.adf","scaled.shifted","mean.var.adjusted", "satorra.bentler", "yuan.bentler", "yuan.bentler.mplus")) ## By default (test="default", type="Chisq"), the first scaled statistic ## requested will be used. Here, that is "scaled.shifted" lavTestLRT(t6.1, t6.0) ## But even if "satorra.bentler" were requested first, method="satorra.2000" ## provides the scaled-shifted chi-squared difference test: lavTestLRT(t6.1, t6.0, method = "satorra.2000") ## == lavTestLRT(update(t6.1, test = "scaled.shifted"), update(t6.0, test = "scaled.shifted")) ## The mean- and variance-adjusted (Satterthwaite) statistic implies ## scaled.shifted = FALSE lavTestLRT(t6.1, t6.0, method = "satorra.2000", scaled.shifted = FALSE) ## Because "satorra.bentler" is not the first scaled test in the list, ## we MUST request it explicitly: lavTestLRT(t6.1, t6.0, test = "satorra.bentler") # method="satorra.bentler.2001" ## == lavTestLRT(update(t6.1, test = "satorra.bentler"), ## update(t6.0, test = "satorra.bentler")) ## The "strictly-positive test" is necessary when the above test is < 0: lavTestLRT(t6.1, t6.0, test = "satorra.bentler", method = "satorra.bentler.2010") ## Likewise, other scaled statistics can be selected: lavTestLRT(t6.1, t6.0, test = "yuan.bentler") ## == lavTestLRT(update(t6.1, test = "yuan.bentler"), ## update(t6.0, test = "yuan.bentler")) lavTestLRT(t6.1, t6.0, test = "yuan.bentler.mplus") ## == lavTestLRT(update(t6.1, test = "yuan.bentler.mplus"), ## update(t6.0, test = "yuan.bentler.mplus")) ## To request the difference between Browne's (1984) residual-based statistics, ## rather than statistics based on the fitted model's discrepancy function, ## use the type= argument: lavTestLRT(t6.1, t6.0, type = "browne.residual.adf") ## Despite requesting multiple robust tests, it is still possible to obtain ## the standard chi-squared difference test (i.e., without a robust correction) lavTestLRT(t6.1, t6.0, method = "standard") ## == lavTestLRT(update(t6.1, test = "standard"), update(t6.0, test = "standard")) } lavaan/man/lavCor.Rd0000644000176200001440000001465714622072672014031 0ustar liggesusers\name{lavCor} \alias{lavCor} \title{Polychoric, polyserial and Pearson correlations} \description{ Fit an unrestricted model to compute polychoric, polyserial and/or Pearson correlations.} \usage{ lavCor(object, ordered = NULL, group = NULL, missing = "listwise", ov.names.x = NULL, sampling.weights = NULL, se = "none", test = "none", estimator = "two.step", baseline = FALSE, ..., cor.smooth = FALSE, cor.smooth.tol = 1e-04, output = "cor") } \arguments{ \item{object}{Either a \code{data.frame}, or an object of class \code{\linkS4class{lavaan}}. If the input is a \code{data.frame}, and some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} \item{ordered}{Character vector. Only used if \code{object} is a \code{data.frame}. Treat these variables as ordered (ordinal) variables. Importantly, all other variables will be treated as numeric (unless they are declared as ordered in the original data frame.)} \item{group}{Only used if \code{object} is a \code{data.frame}. Specify a grouping variable.} \item{missing}{If \code{"listwise"}, cases with missing values are removed listwise from the data frame. If \code{"direct"} or \code{"ml"} or \code{"fiml"} and the estimator is maximum likelihood, an EM algorithm is used to estimate the unrestricted covariance matrix (and mean vector). If \code{"pairwise"}, pairwise deletion is used. If \code{"default"}, the value is set depending on the estimator and the mimic option.} \item{sampling.weights}{Only used if \code{object} is a \code{data.frame}. Specify a variable containing sampling weights.} \item{ov.names.x}{Only used if \code{object} is a \code{data.frame}. Specify variables that need to be treated as exogenous. Only used if at least one variable is declared as ordered.} \item{se}{Only used if \code{output} (see below) contains standard errors. See \code{\link{lavOptions}} for possible options.} \item{test}{Only used if output is \code{"fit"} or \code{"lavaan"}. See \code{\link{lavOptions}} for possible options.} \item{estimator}{If \code{"none"} or \code{"two.step"} or \code{"two.stage"}, only starting values are computed for the correlations (and thresholds), without any further estimation. If all variables are continuous, the starting values are the sample covariances (converted to correlations if \code{output = "cor"}). If at least one variable is ordered, the thresholds are computed using univariate information only. The polychoric and/or polyserial correlations are computed in a second stage, keeping the values of the thresholds constant. If an estimator (other than \code{"two.step"} or \code{"two.stage"}) is specified (for example \code{estimator = "PML"}), these starting values are further updated by fitting the unrestricted model using the chosen estimator. See the \code{\link{lavaan}} function for alternative estimators.} \item{baseline}{Only used if output is \code{"fit"} or \code{"lavaan"}. If \code{TRUE}, a baseline model is also estimated. Note that the \code{test} argument should also be set to a value other than \code{"none"}.} \item{...}{Optional parameters that are passed to the \code{\link{lavaan}} function.} \item{cor.smooth}{Logical. Only used if \code{output = "cor"}. If \code{TRUE}, ensure the resulting correlation matrix is positive definite. The following simple method is used: an eigenvalue decomposition is computed; then, eigenvalues smaller than \code{cor.smooth.tol} are set to be equal to \code{cor.smooth.tol}, before the matrix is again reconstructed. Finally, the matrix (which may no longer have unit diagonal elements) is converted to a correlation matrix using \code{cov2cor}.} \item{cor.smooth.tol}{Numeric. Smallest eigenvalue used when reconstructing the correlation matrix after an eigenvalue decomposition.} \item{output}{If \code{"cor"}, the function returns the correlation matrix only. If \code{"cov"}, the function returns the covariance matrix (this only makes a difference if at least one variable is numeric). If \code{"th"} or \code{"thresholds"}, only the thresholds are returned. If \code{"sampstat"}, the output equals the result of \code{lavInspect(fit, "sampstat")} where fit is the unrestricted model. If \code{"est"} or \code{"pe"} or \code{"parameterEstimates"}, the output equals the result of \code{parameterEstimates(fit)}. Finally, if output is \code{"fit"} or \code{"lavaan"}, the function returns an object of class \code{\linkS4class{lavaan}}.} } \details{ This function is a wrapper around the \code{\link{lavaan}} function, but where the model is defined as the unrestricted model. The following free parameters are included: all covariances/correlations among the variables, variances for continuous variables, means for continuous variables, thresholds for ordered variables, and if exogenous variables are included (\code{ov.names.x} is not empty) while some variables are ordered, also the regression slopes enter the model. } \value{ By default, if \code{output = "cor"} or \code{output = "cov"}, a symmetric matrix (of class \code{"lavaan.matrix.symmetric"}, which only affects the way the matrix is printed). If \code{output = "th"}, a named vector of thresholds. If \code{output = "fit"} or \code{output = "lavaan"}, an object of class \code{\linkS4class{lavaan}}. } \references{ Olsson, U. (1979). Maximum likelihood estimation of the polychoric correlation coefficient. Psychometrika, 44(4), 443-460. Olsson, U., Drasgow, F., & Dorans, N. J. (1982). The polyserial correlation coefficient. Psychometrika, 47(3), 337-347. } \seealso{ \code{\link{lavaan}} } \examples{ # Holzinger and Swineford (1939) example HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5", "x6","x7","x8","x9")] # Pearson correlations lavCor(HS9) # ordinal version, with three categories HS9ord <- as.data.frame( lapply(HS9, cut, 3, labels = FALSE) ) # polychoric correlations, two-stage estimation lavCor(HS9ord, ordered=names(HS9ord)) # thresholds only lavCor(HS9ord, ordered=names(HS9ord), output = "th") # polychoric correlations, with standard errors lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output = "est") # polychoric correlations, full output fit.un <- lavCor(HS9ord, ordered=names(HS9ord), se = "standard", output = "fit") summary(fit.un) } lavaan/man/lavTablesFit.Rd0000644000176200001440000001154214622072672015151 0ustar liggesusers\name{lavTablesFitCp} \alias{lavTablesFit} \alias{lavTablesFitCp} \alias{lavTablesFitCf} \alias{lavTablesFitCm} \title{Pairwise maximum likelihood fit statistics} \description{ Three measures of fit for the pairwise maximum likelihood estimation method that are based on likelihood ratios (LR) are defined: \eqn{C_F}, \eqn{C_M}, and \eqn{C_P}. Subscript \eqn{F} signifies a comparison of model-implied proportions of full response patterns with observed sample proportions, subscript \eqn{M} signifies a comparison of model-implied proportions of full response patterns with the proportions implied by the assumption of multivariate normality, and subscript \eqn{P} signifies a comparison of model-implied proportions of pairs of item responses with the observed proportions of pairs of item responses. } \usage{ lavTablesFitCf(object) lavTablesFitCp(object, alpha = 0.05) lavTablesFitCm(object) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{alpha}{The nominal level of signifiance of global fit.} } \references{ Barendse, M. T., Ligtvoet, R., Timmerman, M. E., & Oort, F. J. (2016). Structural Equation Modeling of Discrete data: Model Fit after Pairwise Maximum Likelihood. \emph{Frontiers in psychology, 7}, 1-8. Joreskog, K. G., & Moustaki, I. (2001). Factor analysis of ordinal variables: A comparison of three approaches. \emph{Multivariate Behavioral Research, 36}, 347-387. } \details{ \subsection{\eqn{C_F}}{ The \eqn{C_F} statistic compares the log-likelihood of the model-implied proportions (\eqn{\pi_r}) with the observed proportions (\eqn{p_r}) of the full multivariate responses patterns: \deqn{ C_F = 2N\sum_{r}p_{r}\ln[p_{r}/\hat{\pi}_{r}], } which asymptotically has a chi-square distribution with \deqn{ df_F = m^k - n - 1, } where \eqn{k} denotes the number of items with discrete response scales, \eqn{m} denotes the number of response options, and \eqn{n} denotes the number of parameters to be estimated. Notice that \eqn{C_F} results may be biased because of large numbers of empty cells in the multivariate contingency table. } \subsection{\eqn{C_M}}{ The \eqn{C_M} statistic is based on the \eqn{C_F} statistic, and compares the proportions implied by the model of interest (Model 1) with proportions implied by the assumption of an underlying multivariate normal distribution (Model 0): \deqn{ C_M = C_{F1} - C_{F0}, } where \eqn{C_{F0}} is \eqn{C_F} for Model 0 and \eqn{C_{F1}} is \eqn{C_F} for Model 1. Statistic \eqn{C_M} has a chi-square distribution with degrees of freedom \deqn{ df_M = k(k-1)/2 + k(m-1) - n_{1}, } where \eqn{k} denotes the number of items with discrete response scales, \eqn{m} denotes the number of response options, and \eqn{k(k-1)/2} denotes the number of polychoric correlations, \eqn{k(m-1)} denotes the number of thresholds, and \eqn{n_1} is the number of parameters of the model of interest. Notice that \eqn{C_M} results may be biased because of large numbers of empty cells in the multivariate contingency table. However, bias may cancels out as both Model 1 and Model 0 contain the same pattern of empty responses. } \subsection{\eqn{C_P}}{ With the \eqn{C_P} statistic we only consider pairs of responses, and compare observed sample proportions (\eqn{p}) with model-implied proportions of pairs of responses(\eqn{\pi}). For items \eqn{i} and \eqn{j} we obtain a pairwise likelihood ratio test statistic \eqn{C_{P_{ij}}} \deqn{ C_{P_{ij}}=2N\sum_{c_i=1}^m \sum_{c_j=1}^m p_{c_i,c_j}\ln[p_{c_i,c_j}/\hat{\pi}_{c_i,c_j}], } where \eqn{m} denotes the number of response options and \eqn{N} denotes sample size. The \eqn{C_P} statistic has an asymptotic chi-square distribution with degrees of freedom equal to the information \eqn{(m^2 -1)} minus the number of parameters (2(m-1) thresholds and 1 correlation), \deqn{ df_P = m^{2} - 2(m - 1) - 2. } As \eqn{k} denotes the number of items, there are \eqn{k(k-1)/2} possible pairs of items. The \eqn{C_P} statistic should therefore be applied with a Bonferroni adjusted level of significance \eqn{\alpha^*}, with \deqn{ \alpha^*= \alpha /(k(k-1)/2)), } to keep the family-wise error rate at \eqn{\alpha}. The hypothesis of overall goodness-of-fit is tested at \eqn{\alpha} and rejected as soon as \eqn{C_P} is significant at \eqn{\alpha^*} for at least one pair of items. Notice that with dichotomous items, \eqn{m = 2}, and \eqn{df_P = 0}, so that hypothesis can not be tested. } } \seealso{ \code{\link{lavTables}, \link{lavaan}} } \examples{ # Data HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5", "x6","x7","x8","x9")] HSbinary <- as.data.frame( lapply(HS9, cut, 2, labels=FALSE) ) # Single group example with one latent factor HS.model <- ' trait =~ x1 + x2 + x3 + x4 ' fit <- cfa(HS.model, data=HSbinary[,1:4], ordered=names(HSbinary[,1:4]), estimator="PML") lavTablesFitCm(fit) lavTablesFitCp(fit) lavTablesFitCf(fit) } lavaan/man/Demo.growth.Rd0000644000176200001440000000205714622072672014767 0ustar liggesusers\name{Demo.growth} \alias{Demo.growth} \docType{data} \title{ Demo dataset for a illustrating a linear growth model. } \description{ A toy dataset containing measures on 4 time points (t1,t2, t3 and t4), two predictors (x1 and x2) influencing the random intercept and slope, and a time-varying covariate (c1, c2, c3 and c4). } \usage{data(Demo.growth)} \format{ A data frame of 400 observations of 10 variables. \describe{ \item{\code{t1}}{Measured value at time point 1} \item{\code{t2}}{Measured value at time point 2} \item{\code{t3}}{Measured value at time point 3} \item{\code{t4}}{Measured value at time point 4} \item{\code{x1}}{Predictor 1 influencing intercept and slope} \item{\code{x2}}{Predictor 2 influencing intercept and slope} \item{\code{c1}}{Time-varying covariate time point 1} \item{\code{c2}}{Time-varying covariate time point 2} \item{\code{c3}}{Time-varying covariate time point 3} \item{\code{c4}}{Time-varying covariate time point 4} } } \seealso{ \code{\link{growth}} } \examples{ head(Demo.growth) } lavaan/man/lav_partable.Rd0000644000176200001440000001453614622072672015233 0ustar liggesusers\name{lav_partable} \alias{lav_partable_independence} \alias{lav_partable_unrestricted} \alias{lav_partable_df} \alias{lav_partable_ndat} \alias{lav_partable_npar} \alias{lav_partable_labels} \alias{lav_partable_from_lm} \alias{lav_partable_complete} \alias{lav_partable_attributes} \alias{lav_partable_merge} \alias{lav_partable_add} \title{lavaan partable functions} \description{Utility functions related to the parameter table (partable)} \usage{ # extract information from a parameter table lav_partable_df(partable) lav_partable_ndat(partable) lav_partable_npar(partable) lav_partable_attributes(partable, pta = NULL) # generate parameter labels lav_partable_labels(partable, blocks = c("group", "level"), group.equal = "", group.partial = "", type = "user") # generate parameter table for specific models lav_partable_independence(lavobject = NULL, lavdata = NULL, lavpta = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, sample.cov = NULL, sample.mean = NULL, sample.slopes = NULL, sample.th = NULL, sample.th.idx = NULL, sample.cov.x = NULL, sample.mean.x = NULL) lav_partable_unrestricted(lavobject = NULL, lavdata = NULL, lavpta = NULL, lavoptions = NULL, lavsamplestats = NULL, lavh1 = NULL, sample.cov = NULL, sample.mean = NULL, sample.slopes = NULL, sample.th = NULL, sample.th.idx = NULL, sample.cov.x = NULL, sample.mean.x = NULL) lav_partable_from_lm(object, est = FALSE, label = FALSE, as.data.frame. = FALSE) # complete a parameter table only containing a few columns (lhs,op,rhs) lav_partable_complete(partable = NULL, start = TRUE) # merge two parameter tables lav_partable_merge(pt1 = NULL, pt2 = NULL, remove.duplicated = FALSE, fromLast = FALSE, warn = TRUE) # add a single parameter to an existing parameter table lav_partable_add(partable = NULL, add = list()) } \arguments{ \item{partable}{A parameter table. See \code{\link{lavParTable}} for more information.} \item{blocks}{Character vector. Which columns in the parameter table should be taken to distinguish between different blocks of parameters (and hence be given different labels)? If \code{"blocks"} includes \code{"group"}, a suffix \code{".g"} and the group number (or group label) is added for the parameters of all but the first group. If \code{"blocks"} includes \code{"level"}, a suffix \code{".l"} and the level number is added for the parameters of all but the first level. If \code{"blocks"} includes, say \code{"foo"}, a suffix \code{".foo"} and the corresponding value of \code{"foo"} is added to all parameters.} \item{group.equal}{The same options can be used here as in the fitting functions. Parameters that are constrained to be equal across groups will be given the same label.} \item{group.partial}{A vector of character strings containing the labels of the parameters which should be free in all groups.} \item{type}{Character string. Can be either `user' or `free' to select all entries or only the free parameters from the parameter table respectively.} \item{lavobject}{An object of class `lavaan'. If this argument is provided, it should be the only argument. All the values for the other arguments are extracted from this object.} \item{lavdata}{An object of class `lavData'. The Data slot from a lavaan object.} \item{lavoptions}{A named list. The Options lsot from a lavaan object.} \item{lavsamplestats}{An object of class `lavSampleStats'. The SampleStats slot from a lavaan object.} \item{lavh1}{A named list. The h1 slot from a lavaan object.} \item{lavpta}{The pta (parameter table attributes) slot from a lavaan object.} \item{sample.cov}{Optional list of numeric matrices. Each list element contains a sample variance-covariance matrix for this group. If provided, these values will be used as starting values.} \item{sample.mean}{Optional list of numeric vectors. Each list element contains a sample mean vector for this group. If provided, these values will be used as starting values.} \item{sample.slopes}{Optional list of numeric matrices. Each list element contains the sample slopes for this group (only used when \code{conditional.x = TRUE}). If provided, these values will be used as starting values.} \item{sample.th}{Optional list of numeric vectors. Each list element contains a vector of sample thresholds for this group. If provided (and also sample.th.idx is provided), these values will be used as starting values.} \item{sample.th.idx}{Optional list of integers. Each list contains the threshold indices for this group.} \item{sample.cov.x}{Optional list of numeric matrices. Each list element contains a sample variance-covariance matrix for the exogenous variables for this group (only used when \code{conditional.x = TRUE}). If provided, these values will be used as starting values.} \item{sample.mean.x}{Optional list of numeric vectors. Each list element contains a sample mean vector for the exogenous variables for this group (only used when \code{conditional.x = TRUE}). If provided, these values will be used as starting values.} \item{est}{Logical. If TRUE, include the fitted estimates in the parameter table.} \item{label}{Logical. If TRUE, include parameter labels in the parameter table.} \item{as.data.frame.}{Logical. If TRUE, return the parameter table as a data.frame.} \item{object}{An object of class \code{lm}.} \item{start}{Logical. If TRUE, include a start column, based on the simple method for generating starting values.} \item{pta}{A list containing parameter attributes.} \item{pt1}{A parameter table.} \item{pt2}{A parameter table.} \item{remove.duplicated}{Logical. If \code{TRUE}, remove duplicated elements when merging two parameter tables.} \item{fromLast}{Logical. If \code{TRUE}, duplicated elements are considered from the bottom of the merged parameter table.} \item{warn}{Logical. If \code{TRUE}, a warning is produced when duplicated elements are found, when merging two parameter tables.} \item{add}{A named list. A single row of a parameter table as a named list.} } \examples{ # generate syntax for an independence model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) lav <- lav_partable_independence(fit) as.data.frame(lav, stringsAsFactors = FALSE) # how many free parameters? lav_partable_npar(lav) # how many sample statistics? lav_partable_ndat(lav) } lavaan/man/lavaanList.Rd0000644000176200001440000001100314622072672014657 0ustar liggesusers\name{lavaanList} \alias{lavaanList} \alias{semList} \alias{cfaList} \title{Fit List of Latent Variable Models} \description{ Fit the same latent variable model, for a (potentially large) number of datasets.} \usage{ lavaanList(model = NULL, dataList = NULL, dataFunction = NULL, dataFunction.args = list(), ndat = length(dataList), cmd = "lavaan", ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) semList(model = NULL, dataList = NULL, dataFunction = NULL, dataFunction.args = list(), ndat = length(dataList), ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) cfaList(model = NULL, dataList = NULL, dataFunction = NULL, dataFunction.args = list(), ndat = length(dataList), ..., store.slots = c("partable"), FUN = NULL, show.progress = FALSE, store.failed = FALSE, parallel = c("no", "multicore", "snow"), ncpus = max(1L, parallel::detectCores() - 1L), cl = NULL, iseed = NULL) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{dataList}{List. Each element contains a full data frame containing the observed variables used in the model.} \item{dataFunction}{Function. A function that generated a full data frame containing the observed variables used in the model. It can also be a matrix, if the columns are named.} \item{dataFunction.args}{List. Optional list of arguments that are passed to the \code{dataFunction} function.} \item{ndat}{Integer. The number of datasets that should be generated using the \code{dataFunction} function.} \item{cmd}{Character. Which command is used to run the sem models. The possible choices are \code{"sem"}, \code{"cfa"} or \code{"lavaan"}, determining how we deal with default options.} \item{\dots}{Other named arguments for \code{lavaan} function.} \item{store.slots}{Character vector. Which slots (from a lavaan object) should be stored for each dataset? The possible choices are \code{"timing"}, \code{"partable"}, \code{"data"}, \code{"samplestats"}, \code{"vcov"}, \code{"test"}, \code{"optim"}, \code{"h1"}, \code{"loglik"}, or \code{"implied"}. Finally, \code{"all"} selects all slots.} \item{FUN}{Function. A function which when applied to the \code{\linkS4class{lavaan}} object returns the information of interest.} \item{store.failed}{Logical. If \code{TRUE}, write (to tempdir()) the dataset and (if available) the fitted object when the estimation for a particular dataset somehow failed. This will allow posthoc inspection of the problem.} \item{parallel}{The type of parallel operation to be used (if any). If missing, the default is \code{"no"}.} \item{ncpus}{Integer. The number of processes to be used in parallel operation: typically one would chose this to the number of available CPUs.} \item{cl}{An optional \pkg{parallel} or \pkg{snow} cluster for use if \code{parallel = "snow"}. If not supplied, a cluster on the local machine is created for the duration of the \code{lavaanList} call.} \item{iseed}{An integer to set the seed. Or NULL if no reproducible seeds are needed. To make this work, make sure the first RNGkind() element is \code{"L'Ecuyer-CMRG"}. You can check this by typing \code{RNGkind()} in the console. You can set it by typing \code{RNGkind("L'Ecuyer-CMRG")}, before the lavaanList functions are called.} \item{show.progress}{If \code{TRUE}, show information for each dataset.} } \value{ An object of class \code{\linkS4class{lavaanList}}, for which several methods are available, including a \code{summary} method. } \seealso{ class \code{\linkS4class{lavaanList}} } \examples{ # The Holzinger and Swineford (1939) example HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' # a data generating function generateData <- function() simulateData(HS.model, sample.nobs = 100) set.seed(1234) fit <- semList(HS.model, dataFunction = generateData, ndat = 5, store.slots = "partable") # show parameter estimates, per dataset coef(fit) } lavaan/man/lav_samplestats.Rd0000644000176200001440000000232014622072672015765 0ustar liggesusers\name{lav_samplestats} \alias{lav_samplestats_from_data} \title{lavaan samplestats functions} \description{Utility functions related to the sample statistics} \usage{ # generate samplestats object from full data lav_samplestats_from_data(lavdata = NULL, lavoptions = NULL, WLS.V = NULL, NACOV = NULL) } \arguments{ \item{lavdata}{A lavdata object.} \item{lavoptions}{A named list. The Options lsot from a lavaan object.} \item{WLS.V}{A user provided weight matrix.} \item{NACOV}{A user provided matrix containing the elements of (N times) the asymptotic variance-covariance matrix of the sample statistics. For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group.} } \examples{ # generate syntax for an independence model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) # extract data slot and options lavdata <- fit@Data lavoptions <- lavInspect(fit, "options") # generate sample statistics object sampleStats <- lav_samplestats_from_data(lavdata = lavdata, lavoptions = lavoptions) } lavaan/man/lavNames.Rd0000644000176200001440000000522014622072672014333 0ustar liggesusers\name{lavNames} \alias{lavNames} \alias{lavaanNames} \title{lavaan Names} \description{ Extract variables names from a fitted lavaan object.} \usage{ lavNames(object, type = "ov", ...) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{type}{Character. The type of variables whose names should be extracted. See details for a complete list.} \item{...}{Additional selection variables. For example \code{"group = 2L"} (in a multiple-group analysis) only considers the variables included in the model for the second group.} } \details{ The order of the variable names, as returned by \code{lavNames} determines the order in which the variables are listed in the parameter table, and therefore also in the summary output. The following variable types are available: \itemize{ \item \code{"ov"}: observed variables \item \code{"ov.x"}: (pure) exogenous observed variables (no mediators) \item \code{"ov.nox"}: non-exogenous observed variables \item \code{"ov.model"}: modelled observed variables (joint vs conditional) \item \code{"ov.y"}: (pure) endogenous variables (dependent only) (no mediators) \item \code{"ov.num"}: numeric observed variables \item \code{"ov.ord"}: ordinal observed variables \item \code{"ov.ind"}: observed indicators of latent variables \item \code{"ov.orphan"}: lonely observed variables (only intercepts/variancesappear in the model syntax) \item \code{"ov.interaction"}: interaction terms (defined by the colon operator) \item \code{"th"}: threshold names ordinal variables only \item \code{"th.mean"}: threshold names ordinal + numeric variables (if any) \item \code{"lv"}: latent variables \item \code{"lv.regular"}: latent variables (defined by =~ only) \item \code{"lv.formative"}: latent variables (defined by <~ only) \item \code{"lv.x"}: (pure) exogenous variables \item \code{"lv.y"}: (pure) endogenous variables \item \code{"lv.nox"}: non-exogenous latent variables \item \code{"lv.nonnormal"}: latent variables with non-normal indicators \item \code{"lv.interaction"}: interaction terms at the latent level \item \code{"eqs.y"}: variables that appear as dependent variables in a regression formula (but not indicators of latent variables) \item \code{"eqs.x"}: variables that appear as independent variables in a regression formula } } \seealso{\code{\link{lavaanify}}, \code{\link{parTable}}} \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) lavNames(fit, "ov") } lavaan/man/plot.InformativeTesting.Rd0000644000176200001440000001362514622072672017373 0ustar liggesusers\name{InformativeTesting methods} \alias{print.InformativeTesting} \alias{plot.InformativeTesting} \title{Methods for output InformativeTesting()} \description{The print function shows the results of hypothesis tests Type A and Type B. The plot function plots the distributions of bootstrapped LRT values and plug-in p-values.} \usage{ \method{print}{InformativeTesting}(x, digits = max(3, getOption("digits") - 3), ...) \method{plot}{InformativeTesting}(x, ..., type = c("lr","ppv"), main = "main", xlab = "xlabel", ylab = "Frequency", freq = TRUE, breaks = 15, cex.main = 1, cex.lab = 1, cex.axis = 1, col = "grey", border = par("fg"), vline = TRUE, vline.col = c("red", "blue"), lty = c(1,2), lwd = 1, legend = TRUE, bty = "o", cex.legend = 1, loc.legend = "topright") } \arguments{ \item{x}{object of class "InformativeTesting".} \item{digits}{the number of significant digits to use when printing.} \item{...}{Currently not used.} \item{type}{If \code{"lr"}, a distribution of the first-level bootstrapped LR values is plotted. If \code{"ppv"} a distribution of the bootstrapped plug-in p-values is plotted.} \item{main}{The main title(s) for the plot(s).} \item{xlab}{A label for the x axis, default depends on input type.} \item{ylab}{A label for the y axis.} \item{freq}{Logical; if TRUE, the histogram graphic is a representation of frequencies, the counts component of the result; if \code{FALSE}, probability densities, component density, are plotted (so that the histogram has a total area of one). The default is set to \code{TRUE}.} \item{breaks}{see \code{\link[graphics]{hist}}} \item{cex.main}{The magnification to be used for main titles relative to the current setting of cex.} \item{cex.lab}{The magnification to be used for x and y labels relative to the current setting of cex.} \item{cex.axis}{The magnification to be used for axis annotation relative to the current setting of cex.} \item{col}{A colour to be used to fill the bars. The default of NULL yields unfilled bars.} \item{border}{Color for rectangle border(s). The default means par("fg").} \item{vline}{Logical; if \code{TRUE} a vertical line is drawn at the observed LRT value. If \code{double.bootstrap = "FDB"} a vertical line is drawn at the 1-p* quantile of the second-level LRT values, where p* is the first-level bootstrapped p-value} \item{vline.col}{Color(s) for the vline.LRT.} \item{lty}{The line type. Line types can either be specified as an integer (0=blank, 1=solid (default), 2=dashed, 3=dotted, 4=dotdash, 5=longdash, 6=twodash) or as one of the character strings "blank", "solid", "dashed", "dotted", "dotdash", "longdash", or "twodash", where "blank" uses 'invisible lines' (i.e., does not draw them).} \item{lwd}{The line width, a positive number, defaulting to 1.} \item{legend}{Logical; if \code{TRUE} a legend is added to the plot.} \item{bty}{A character string which determined the type of box which is drawn about plots. If bty is one of "o" (the default), "l", "7", "c", "u", or "]" the resulting box resembles the corresponding upper case letter. A value of "n" suppresses the box.} \item{cex.legend}{A numerical value giving the amount by which the legend text and symbols should be magnified relative to the default. This starts as 1 when a device is opened, and is reset when the layout is changed.} \item{loc.legend}{The location of the legend, specified by a single keyword from the list \code{"bottomright"}, \code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"}, \code{"topright"}, \code{"right"} and \code{"center"}.} } \examples{ \dontrun{ ######################### ### real data example ### ######################### # Multiple group path model for facial burns example. # model syntax with starting values. burns.model <- 'Selfesteem ~ Age + c(m1, f1)*TBSA + HADS + start(-.10, -.20)*TBSA HADS ~ Age + c(m2, f2)*TBSA + RUM + start(.10, .20)*TBSA ' # constraints syntax burns.constraints <- 'f2 > 0 ; m1 < 0 m2 > 0 ; f1 < 0 f2 > m2 ; f1 < m1' # we only generate 2 bootstrap samples in this example; in practice # you may wish to use a much higher number. # the double bootstrap was switched off; in practice you probably # want to set it to "standard". example1 <- InformativeTesting(model = burns.model, data = FacialBurns, R = 2, constraints = burns.constraints, double.bootstrap = "no", group = "Sex") example1 plot(example1) ########################## ### artificial example ### ########################## # Simple ANOVA model with 3 groups (N = 20 per group) set.seed(1234) Y <- cbind(c(rnorm(20,0,1), rnorm(20,0.5,1), rnorm(20,1,1))) grp <- c(rep("1", 20), rep("2", 20), rep("3", 20)) Data <- data.frame(Y, grp) #create model matrix fit.lm <- lm(Y ~ grp, data = Data) mfit <- fit.lm$model mm <- model.matrix(mfit) Y <- model.response(mfit) X <- data.frame(mm[,2:3]) names(X) <- c("d1", "d2") Data.new <- data.frame(Y, X) # model model <- 'Y ~ 1 + a1*d1 + a2*d2' # fit without constraints fit <- sem(model, data = Data.new) # constraints syntax: mu1 < mu2 < mu3 constraints <- ' a1 > 0 a1 < a2 ' # we only generate 10 bootstrap samples in this example; in practice # you may wish to use a much higher number, say > 1000. The double # bootstrap is not necessary in case of an univariate ANOVA model. example2 <- InformativeTesting(model = model, data = Data.new, start = parTable(fit), R = 10L, double.bootstrap = "no", constraints = constraints) example2 # plot(example2) } } \author{ Leonard Vanbrabant \email{lgf.vanbrabant@gmail.com} } lavaan/man/varTable.Rd0000644000176200001440000000320014622072672014321 0ustar liggesusers\name{varTable} \alias{varTable} \alias{vartable} \alias{variableTable} \alias{variabletable} \title{Variable Table} \description{ Summary information about the variables included in either a data.frame, or a fitted lavaan object.} \usage{ varTable(object, ov.names = names(object), ov.names.x = NULL, ordered = NULL, factor = NULL, as.data.frame. = TRUE) } \arguments{ \item{object}{Either a data.frame, or an object of class \code{\linkS4class{lavaan}}.} \item{ov.names}{Only used if object is a data.frame. A character vector containing the variables that need to be summarized.} \item{ov.names.x}{Only used if object is a data.frame. A character vector containing additional variables that need to be summarized.} \item{ordered}{Character vector. Which variables should be treated as ordered factors} \item{factor}{Character vector. Which variables should be treated as (unordered) factors?} \item{as.data.frame.}{If TRUE, return the list as a data.frame.} } \value{ A \code{list} or \code{data.frame} containing summary information about variables in a data.frame. If \code{object} is a fitted lavaan object, it displays the summary information about the observed variables that are included in the model. The summary information includes variable type (numeric, ordered, \ldots), the number of non-missing values, the mean and variance for numeric variables, the number of levels of ordered variables, and the labels for ordered variables. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) varTable(fit) } lavaan/man/lav_func.Rd0000644000176200001440000000417714622072672014374 0ustar liggesusers\name{lav_func} \alias{lav_func_gradient_complex} \alias{lav_func_gradient_simple} \alias{lav_func_jacobian_complex} \alias{lav_func_jacobian_simple} \title{Utility Functions: Gradient and Jacobian} \description{Utility functions for computing the gradient of a scalar-valued function or the Jacobian of a vector-valued function by numerical approximation.} \usage{ lav_func_gradient_complex(func, x, h = .Machine$double.eps, ..., fallback.simple = TRUE) lav_func_jacobian_complex(func, x, h = .Machine$double.eps, ..., fallback.simple = TRUE) lav_func_gradient_simple(func, x, h = sqrt(.Machine$double.eps), ...) lav_func_jacobian_simple(func, x, h = sqrt(.Machine$double.eps), ...) } \arguments{ \item{func}{A real-valued function returning a numeric scalar or a numeric vector.} \item{x}{A numeric vector: the point(s) at which the gradient/Jacobian of the function should be computed.} \item{h}{Numeric value representing a small change in `x' when computing the gradient/Jacobian.} \item{...}{Additional arguments to be passed to the function `func'.} \item{fallback.simple}{Logical. If TRUE, and the function evaluation fails, we call the corresponding simple (non-complex) method instead.} } \details{ The complex versions use complex numbers to gain more precision, while retaining the simplicity (and speed) of the simple forward method (see references). These functions were added to lavaan (around 2012) when the complex functionality was not part of the numDeriv package. They were used internally, and made public in 0.5-17 per request of other package developers. } \references{ Squire, W. and Trapp, G. (1998). Using Complex Variables to Estimate Derivatives of Real Functions. SIAM Review, 40(1), 110-112. } \examples{ # very accurate complex method lav_func_gradient_complex(func = exp, x = 1) - exp(1) # less accurate forward method lav_func_gradient_simple(func = exp, x = 1) - exp(1) # very accurate complex method diag(lav_func_jacobian_complex(func = exp, x = c(1,2,3))) - exp(c(1,2,3)) # less accurate forward method diag(lav_func_jacobian_simple(func = exp, x = c(1,2,3))) - exp(c(1,2,3)) } lavaan/man/lavaan-class.Rd0000644000176200001440000002525314622072672015142 0ustar liggesusers\name{lavaan-class} \docType{class} \alias{lavaan-class} \alias{coef,lavaan-method} \alias{vcov,lavaan-method} \alias{anova,lavaan-method} \alias{predict,lavaan-method} \alias{resid,lavaan-method} \alias{residuals,lavaan-method} \alias{fitted,lavaan-method} \alias{fitted.values,lavaan-method} %% NOTE: no white space here! \alias{nobs} %% not imported in < 2.13 \alias{nobs,lavaan-method} \alias{logLik,lavaan-method} \alias{update,lavaan-method} \alias{show,lavaan-method} \alias{summary,lavaan-method} \title{Class For Representing A (Fitted) Latent Variable Model} \description{The \code{lavaan} class represents a (fitted) latent variable model. It contains a description of the model as specified by the user, a summary of the data, an internal matrix representation, and if the model was fitted, the fitting results.} \section{Objects from the Class}{ Objects can be created via the \code{\link{cfa}}, \code{\link{sem}}, \code{\link{growth}} or \code{\link{lavaan}} functions. } \section{Slots}{ \describe{ \item{\code{version}:}{The lavaan package version used to create this objects} \item{\code{call}:}{The function call as returned by \code{match.call()}.} \item{\code{timing}:}{The elapsed time (user+system) for various parts of the program as a list, including the total time.} \item{\code{Options}:}{Named list of options that were provided by the user, or filled-in automatically.} \item{\code{ParTable}:}{Named list describing the model parameters. Can be coerced to a data.frame. In the documentation, this is called the `parameter table'.} \item{\code{pta}:}{Named list containing parameter table attributes.} \item{\code{Data}:}{Object of internal class \code{"Data"}: information about the data.} \item{\code{SampleStats}:}{Object of internal class \code{"SampleStats"}: sample statistics} \item{\code{Model}:}{Object of internal class \code{"Model"}: the internal (matrix) representation of the model} \item{\code{Cache}:}{List using objects that we try to compute only once, and reuse many times.} \item{\code{Fit}:}{Object of internal class \code{"Fit"}: the results of fitting the model. No longer used.} \item{\code{boot}:}{List. Results and information about the bootstrap.} \item{\code{optim}:}{List. Information about the optimization.} \item{\code{loglik}:}{List. Information about the loglikelihood of the model (if maximum likelihood was used).} \item{\code{implied}:}{List. Model implied statistics.} \item{\code{vcov}:}{List. Information about the variance matrix (vcov) of the model parameters.} \item{\code{test}:}{List. Different test statistics.} \item{\code{h1}:}{List. Information about the unrestricted h1 model (if available).} \item{\code{baseline}:}{List. Information about a baseline model (often the independence model) (if available).} \item{\code{internal}:}{List. For internal use only.} \item{\code{external}:}{List. Empty slot to be used by add-on packages.} } } \section{Methods}{ \describe{ \item{coef}{\code{signature(object = "lavaan", type = "free")}: Returns the estimates of the parameters in the model as a named numeric vector. If \code{type="free"}, only the free parameters are returned. If \code{type="user"}, all parameters listed in the parameter table are returned, including constrained and fixed parameters.} \item{fitted.values}{\code{signature(object = "lavaan")}: Returns the implied moments of the model as a list with two elements (per group): \code{cov} for the implied covariance matrix, and \code{mean} for the implied mean vector. If only the covariance matrix was analyzed, the implied mean vector will be zero.} \item{fitted}{\code{signature(object = "lavaan")}: an alias for \code{fitted.values}.} \item{residuals}{\code{signature(object = "lavaan", type="raw")}: If \code{type = "raw"}, this function returns the raw (= unscaled) difference between the observed and the expected (model-implied) summary statistics. If \code{type = "cor"}, or \code{type = "cor.bollen"}, the observed and model implied covariance matrices are first transformed to a correlation matrix (using \code{cov2cor()}), before the residuals are computed. If \code{type = "cor.bentler"}, both the observed and model implied covariance matrices are rescaled by dividing the elements by the square roots of the corresponding variances of the observed covariance matrix. If \code{type="normalized"}, the residuals are divided by the square root of the asymptotic variance of the corresponding summary statistic (the variance estimate depends on the choice for the \code{se} argument). Unfortunately, the corresponding normalized residuals are not entirely correct, and this option is only available for historical interest. If \code{type="standardized"}, the residuals are divided by the square root of the asymptotic variance of these residuals. The resulting standardized residuals elements can be interpreted as z-scores. If \code{type="standardized.mplus"}, the residuals are divided by the square root of the asymptotic variance of these residuals. However, a simplified formula is used (see the Mplus reference below) which often results in negative estimates for the variances, resulting in many \code{NA} values for the standardized residuals.} \item{resid}{\code{signature(object = "lavaan")}: an alias for \code{residuals}} \item{vcov}{\code{signature(object = "lavaan")}: returns the covariance matrix of the estimated parameters.} \item{predict}{\code{signature(object = "lavaan")}: compute factor scores for all cases that are provided in the data frame. For complete data only.} \item{anova}{\code{signature(object = "lavaan")}: returns model comparison statistics. This method is just a wrapper around the function \code{\link{lavTestLRT}}. If only a single argument (a fitted model) is provided, this model is compared to the unrestricted model. If two or more arguments (fitted models) are provided, the models are compared in a sequential order. Test statistics are based on the likelihood ratio test. For more details and further options, see the \code{\link{lavTestLRT}} page.} \item{update}{\code{signature(object = "lavaan", model, add, ..., evaluate=TRUE)}: update a fitted lavaan object and evaluate it (unless \code{evaluate=FALSE}). Note that we use the environment that is stored within the lavaan object, which is not necessarily the parent frame. The \code{add} argument is analogous to the one described in the \code{\link{lavTestScore}} page, and can be used to add parameters to the specified model rather than passing an entirely new \code{model} argument.} \item{nobs}{\code{signature(object = "lavaan")}: returns the effective number of observations used when fitting the model. In a multiple group analysis, this is the sum of all observations per group.} \item{logLik}{\code{signature(object = "lavaan")}: returns the log-likelihood of the fitted model, if maximum likelihood estimation was used. The \code{\link[stats]{AIC}} and \code{\link[stats]{BIC}} methods automatically work via \code{logLik()}.} \item{show}{\code{signature(object = "lavaan")}: Print a short summary of the model fit} \item{summary}{\code{signature(object = "lavaan", header = TRUE, fit.measures = FALSE, estimates = TRUE, ci = FALSE, fmi = FALSE, standardized = FALSE, std.nox = FALSE, remove.step1 = TRUE, remove.unused = TRUE, cov.std = TRUE, rsquare = FALSE, modindices = FALSE, ci = FALSE, nd = 3L)}: Print a nice summary of the model estimates. If \code{header = TRUE}, the header section (including fit measures) is printed. If \code{fit.measures = TRUE}, additional fit measures are added to the header section. The related \code{fm.args} list allows to set options related to the fit measures. See \code{\link{fitMeasures}} for more details. If \code{estimates = TRUE}, print the parameter estimates section. If \code{ci = TRUE}, add confidence intervals to the parameter estimates section. If \code{fmi = TRUE}, add the fmi (fraction of missing information) column, if it is available. If \code{standardized=TRUE} or a character vector, the standardized solution is also printed (see \code{\link{parameterEstimates}}). Note that \emph{SE}s and tests are still based on unstandardized estimates. Use \code{\link{standardizedSolution}} to obtain \emph{SE}s and test statistics for standardized estimates. The \code{std.nox} argument is deprecated; the \code{standardized} argument allows \code{"std.nox"} solution to be specifically requested. If \code{remove.step1}, the parameters of the measurement part are not shown (only used when using \code{sam()}.) If \code{remove.unused}, automatically added parameters that are fixed to their default (0 or 1) values are removed. If \code{rsquare=TRUE}, the R-Square values for the dependent variables in the model are printed. If \code{efa = TRUE}, EFA related information is printed. The related \code{efa.args} list allows to set options related to the EFA output. See \code{\link{summary.efaList}} for more details. If \code{modindices=TRUE}, modification indices are printed for all fixed parameters. The argument \code{nd} determines the number of digits after the decimal point to be printed (currently only in the parameter estimates section.) Historically, nothing was returned, but since 0.6-12, a list is returned of class \code{lavaan.summary} for which is print function is available.} } } \references{ Yves Rosseel (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \doi{https://doi.org/10.18637/jss.v048.i02} Standardized Residuals in Mplus. Document retrieved from URL https://www.statmodel.com/download/StandardizedResiduals.pdf } \seealso{ \code{\link{cfa}}, \code{\link{sem}}, \code{\link{fitMeasures}}, \code{\link{standardizedSolution}}, \code{\link{parameterEstimates}}, \code{\link{lavInspect}}, \code{\link{modindices}} } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) summary(fit, standardized = TRUE, fit.measures = TRUE, rsquare = TRUE) fitted(fit) coef(fit) resid(fit, type = "normalized") } lavaan/man/lavTables.Rd0000644000176200001440000001242714622072672014511 0ustar liggesusers\name{lavTables} \alias{lavTables} \title{lavaan frequency tables} \description{ Frequency tables for categorical variables and related statistics.} \usage{ lavTables(object, dimension = 2L, type = "cells", categorical = NULL, group = NULL, statistic = "default", G2.min = 3, X2.min = 3, p.value = FALSE, output = "data.frame", patternAsString = TRUE) } \arguments{ \item{object}{Either a \code{data.frame}, or an object of class \code{\linkS4class{lavaan}}.} \item{dimension}{Integer. If 0L, display all response patterns. If 1L, display one-dimensional (one-way) tables; if 2L, display two-dimensional (two-way or pairwise) tables. For the latter, we can change the information per row: if \code{type = "cells"}, each row is a cell in a pairwise table; if \code{type = "table"}, each row is a table.} \item{type}{If \code{"cells"}, display information for each cell in the (one-way or two-way) table. If \code{"table"}, display information per table. If \code{"pattern"}, display response patterns (implying \code{"dimension = 0L"}).} \item{categorical}{Only used if \code{object} is a \code{data.frame}. Specify variables that need to be treated as categorical.} \item{group}{Only used if \code{object} is a \code{data.frame}. Specify a grouping variable.} \item{statistic}{Either a character string, or a vector of character strings requesting one or more statistics for each cell, pattern or table. Always available are \code{X2} and \code{G2} for the Pearson and LRT based goodness-of-fit statistics. A distinction is made between the unrestricted and restricted model. The statistics based on the former have an extension \code{*.un}, as in \code{X2.un} and \code{G2.un}. If object is a \code{data.frame}, the unrestricted versions of the statistics are the only ones available. For one-way tables, additional statistics are the thresholds (\code{th.un} and \code{th}). For two-way tables and \code{type = "table"}, the following statistics are available: \code{X2}, \code{G2}, \code{cor} (polychoric correlation), \code{RMSEA} and the corresponding unrestricted versions (\code{X2.un} etc). Additional statistics are \code{G2.average}, \code{G2.nlarge} and \code{G2.plarge} statistics based on the cell values \code{G2}: \code{G2.average} is the average of the \code{G2} values in each cell of the two-way table; \code{G2.nlarge} is the number of cells with a \code{G2} value larger than \code{G2.min}, and \code{G2.plarge} is the proportion of cells with a \code{G2} value larger than \code{G2.min}. A similar set of statistics based on \code{X2} is also available. If \code{"default"}, the selection of statistics (if any) depends on the \code{dim} and \code{type} arguments, and if the object is a \code{data.frame} or a fitted lavaan object.} \item{G2.min}{Numeric. All cells with a G2 statistic larger than this number are considered `large', as reflected in the (optional) \code{"G2.plarge"} and \code{"G2.nlarge"} columns.} \item{X2.min}{Numeric. All cells with a X2 statistic larger than this number are considered `large', as reflected in the (optional) \code{"X2.plarge"} and \code{"X2.nlarge"} columns.} \item{p.value}{Logical. If \code{"TRUE"}, p-values are computed for requested statistics (eg G2 or X2) if possible.} \item{output}{If \code{"data.frame"}, the output is presented as a data.frame where each row is either a cell, a table, or a response pattern, depending on the \code{"type"} argument. If \code{"table"}, the output is presented as a table (or matrix) or a list of tables. Only a single statistic can be shown in this case, and if the \code{statistic} is empty, the observed frequencies are shown.} \item{patternAsString}{Logical. Only used for response patterns (dimension = 0L). If \code{"TRUE"}, response patterns are displayed as a compact string. If \code{"FALSE"}, as many columns as observed variables are displayed.} } \value{ If \code{output = "data.frame"}, the output is presented as a data.frame where each row is either a cell, a table, or a response pattern, depending on the \code{"type"} argument. If \code{output = "table"} (only for two-way tables), a list of tables (if \code{type = "cells"}) where each list element corresponds to a pairwise table, or if \code{type = "table"}, a single table (per group). In both cases, the table entries are determined by the (single) \code{statistic} argument. } \references{ Joreskog, K.G. & Moustaki, I. (2001). Factor analysis of ordinal variables: A comparison of three approaches. Multivariate Behavioral Research, 36, 347-387. } \seealso{\code{\link{varTable}}.} \examples{ HS9 <- HolzingerSwineford1939[,c("x1","x2","x3","x4","x5", "x6","x7","x8","x9")] HSbinary <- as.data.frame( lapply(HS9, cut, 2, labels=FALSE) ) # using the data only lavTables(HSbinary, dim = 0L, categorical = names(HSbinary)) lavTables(HSbinary, dim = 1L, categorical = names(HSbinary), stat=c("th.un")) lavTables(HSbinary, dim = 2L, categorical = names(HSbinary), type = "table") # fit a model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HSbinary, ordered=names(HSbinary)) lavTables(fit, 1L) lavTables(fit, 2L, type="cells") lavTables(fit, 2L, type="table", stat=c("cor.un", "G2", "cor")) lavTables(fit, 2L, type="table", output="table", stat="X2") } lavaan/man/inspectSampleCov.Rd0000644000176200001440000000207514622072672016051 0ustar liggesusers\name{inspectSampleCov} \alias{inspectSampleCov} \title{Observed Variable Correlation Matrix from a Model and Data} \description{ The lavaan model syntax describes a latent variable model. Often, the user wants to see the covariance matrix generated by their model for diagnostic purposes. However, their data may have far more columns of information than what is contained in their model.} \usage{ inspectSampleCov(model, data, ...) } \arguments{ \item{model}{The model that will be fit by lavaan.} \item{data}{The data frame being used to fit the model.} \item{...}{Other arguments to \code{\link{sem}} for how to deal with multiple groups, missing values, etc.} } \author{Jarrett Byrnes} \details{ One must supply both a model, coded with proper \code{\link{model.syntax}} and a data frame from which a covariance matrix will be calculated. This function essentially calls \code{\link{sem}}, but doesn't fit the model, then uses \code{\link{lavInspect}} to get the sample covariance matrix and meanstructure. } \section{See also}{\code{\link{sem}}, \code{\link{lavInspect}} } lavaan/man/lavaan.Rd0000644000176200001440000001450414622072672014034 0ustar liggesusers\name{lavaan} \alias{lavaan} \title{Fit a Latent Variable Model} \description{ Fit a latent variable model.} \usage{ lavaan(model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, NACOV = NULL, ov.order = "model", slotOptions = NULL, slotParTable = NULL, slotSampleStats = NULL, slotData = NULL, slotModel = NULL, slotCache = NULL, sloth1 = NULL, ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated as numeric (unless they are declared as ordered in the data.frame.) Since 0.6-4, ordered can also be logical. If TRUE, all observed endogenous variables are treated as ordered (ordinal). If FALSE, all observed endogenous variables are considered to be numeric (again, unless they are declared as ordered in the data.frame.)} \item{sampling.weights}{A variable name in the data frame containing sampling weight information. Currently only available for non-clustered data. Depending on the \code{sampling.weights.normalization} option, these weights may be rescaled (or not) so that their sum equals the number of observations (total or per group).} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. For a multiple group analysis, a list with a variance-covariance matrix for each group.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.th}{Vector of sample-based thresholds. For a multiple group analysis, a list with a vector of thresholds for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} \item{group}{Character. A variable name in the data frame defining the groups in a multiple group analysis.} \item{cluster}{Character. A (single) variable name in the data frame defining the clusters in a two-level dataset.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix for each group. The elements of the weight matrix should be in the following order (if all data is continuous): first the means (if a meanstructure is involved), then the lower triangular elements of the covariance matrix including the diagonal, ordered column by column. In the categorical case: first the thresholds (including the means for continuous variables), then the slopes (if any), the variances of continuous variables (if any), and finally the lower triangular elements of the correlation/covariance matrix excluding the diagonal, ordered column by column.} \item{NACOV}{A user provided matrix containing the elements of (N times) the asymptotic variance-covariance matrix of the sample statistics. For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} \item{ov.order}{Character. If \code{"model"} (the default), the order of the observed variable names (as reflected for example in the output of \code{lavNames()}) is determined by the model syntax. If \code{"data"}, the order is determined by the data (either the full data.frame or the sample (co)variance matrix). If the \code{WLS.V} and/or \code{NACOV} matrices are provided, this argument is currently set to \code{"data"}.} \item{slotOptions}{Options slot from a fitted lavaan object. If provided, no new Options slot will be created by this call.} \item{slotParTable}{ParTable slot from a fitted lavaan object. If provided, no new ParTable slot will be created by this call.} \item{slotSampleStats}{SampleStats slot from a fitted lavaan object. If provided, no new SampleStats slot will be created by this call.} \item{slotData}{Data slot from a fitted lavaan object. If provided, no new Data slot will be created by this call.} \item{slotModel}{Model slot from a fitted lavaan object. If provided, no new Model slot will be created by this call.} \item{slotCache}{Cache slot from a fitted lavaan object. If provided, no new Cache slot will be created by this call.} \item{sloth1}{h1 slot from a fitted lavaan object. If provided, no new h1 slot will be created by this call.} \item{...}{Many more additional options can be defined, using 'name = value'. See \code{\link{lavOptions}} for a complete list.} } \value{ An object of class \code{\linkS4class{lavaan}}, for which several methods are available, including a \code{summary} method. } \references{ Yves Rosseel (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \doi{https://doi.org/10.18637/jss.v048.i02}} \seealso{ \code{\link{cfa}}, \code{\link{sem}}, \code{\link{growth}} } \examples{ # The Holzinger and Swineford (1939) example HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- lavaan(HS.model, data=HolzingerSwineford1939, auto.var=TRUE, auto.fix.first=TRUE, auto.cov.lv.x=TRUE) summary(fit, fit.measures=TRUE) } lavaan/man/lavOptions.Rd0000644000176200001440000007725314626372131014737 0ustar liggesusers\name{lavOptions} \alias{lavOptions} \alias{lavoptions} \title{lavaan Options} \description{ Show the default options used by the \code{lavaan()} function. The options can be changed by passing 'name = value' arguments to the \code{lavaan()} function call, where they will be added to the '...' argument. } \usage{ lavOptions(x = NULL, default = NULL, mimic = "lavaan") } \arguments{ \item{x}{Character. A character string holding an option name, or a character string vector holding multiple option names. All option names are converted to lower case.} \item{default}{If a single option is specified but not available, this value is returned.} \item{mimic}{Not used for now.} } \details{ This is the full list of options that are accepted by the \code{lavaan()} function, organized in several sections: Model features (always available): \describe{ \item{\code{meanstructure}:}{If \code{TRUE}, the means of the observed variables enter the model. If \code{"default"}, the value is set based on the user-specified model, and/or the values of other arguments.} \item{\code{int.ov.free}:}{If \code{FALSE}, the intercepts of the observed variables are fixed to zero.} \item{\code{int.lv.free}:}{If \code{FALSE}, the intercepts of the latent variables are fixed to zero.} \item{\code{conditional.x}:}{If \code{TRUE}, we set up the model conditional on the exogenous `x' covariates; the model-implied sample statistics only include the non-x variables. If \code{FALSE}, the exogenous `x' variables are modeled jointly with the other variables, and the model-implied statistics refect both sets of variables. If \code{"default"}, the value is set depending on the estimator, and whether or not the model involves categorical endogenous variables.} \item{\code{fixed.x}:}{If \code{TRUE}, the exogenous `x' covariates are considered fixed variables and the means, variances and covariances of these variables are fixed to their sample values. If \code{FALSE}, they are considered random, and the means, variances and covariances are free parameters. If \code{"default"}, the value is set depending on the mimic option.} \item{\code{orthogonal}:}{If \code{TRUE}, all covariances among latent variables are set to zero.} \item{\code{orthogonal.y}:}{If \code{TRUE}, all covariances among endogenous latent variables only are set to zero.} \item{\code{orthogonal.x}:}{If \code{TRUE}, all covariances among exogenous latent variables only are set to zero.} \item{\code{std.lv}:}{If \code{TRUE}, the metric of each latent variable is determined by fixing their (residual) variances to 1.0. If \code{FALSE}, the metric of each latent variable is determined by fixing the factor loading of the first indicator to 1.0. If there are multiple groups, \code{std.lv = TRUE} and \code{"loadings"} is included in the \code{group.equal} argument, then only the latent variances of the first group will be fixed to 1.0, while the latent variances of other groups are set free.} \item{\code{effect.coding}:}{Can be logical or character string. If logical and \code{TRUE}, this implies \code{effect.coding = c("loadings", "intercepts")}. If logical and \code{FALSE}, it is set equal to the empty string. If \code{"loadings"} is included, equality constraints are used so that the average of the factor loadings (per latent variable) equals 1. Note that this should not be used together with \code{std.lv = TRUE}. If \code{"intercepts"} is included, equality constraints are used so that the sum of the intercepts (belonging to the indicators of a single latent variable) equals zero. As a result, the latent mean will be freely estimated and usually equal the average of the means of the involved indicators.} \item{\code{ceq.simple}:}{Logical. If \code{TRUE}, and no other general (equality or inequality) constraints are used in the model, simple equality constraints are represented in the parameter table as duplicated free parameters (instead of extra rows with \code{op = "=="}).} \item{\code{parameterization}:}{Currently only used if data is categorical. If \code{"delta"}, the delta parameterization is used. If \code{"theta"}, the theta parameterization is used.} \item{\code{correlation}:}{Only used for (single-level) continuous data. If \code{TRUE}, analyze a correlation matrix (instead of a (co)variance matrix). This implies that the residual observed variances are no longer free parameters. Instead, they are set to values to ensure the model-implied variances are unity. This also affects the standard errors. The only available estimators are GLS and WLS, which produce correct standard errors and a correct test statistic under normal and non-normal conditions respectively. Always assuming \code{fixed.x = FALSE} and \code{conditional.x = FALSE} (for now).} } Model features (only available for the \code{lavaan()} function): \describe{ \item{\code{auto.fix.first}:}{If \code{TRUE}, the factor loading of the first indicator is set to 1.0 for every latent variable.} \item{\code{auto.fix.single}:}{If \code{TRUE}, the residual variance (if included) of an observed indicator is set to zero if it is the only indicator of a latent variable.} \item{auto.var}{If \code{TRUE}, the (residual) variances of both observed and latent variables are set free.} \item{\code{auto.cov.lv.x}:}{If \code{TRUE}, the covariances of exogenous latent variables are included in the model and set free.} \item{\code{auto.cov.y}:}{If \code{TRUE}, the covariances of dependent variables (both observed and latent) are included in the model and set free.} \item{\code{auto.th}:}{If \code{TRUE}, thresholds for limited dependent variables are included in the model and set free.} \item{\code{auto.delta}:}{If \code{TRUE}, response scaling parameters for limited dependent variables are included in the model and set free.} \item{\code{auto.efa}:}{If \code{TRUE}, the necessary constraints are imposed to make the (unrotated) exploratory factor analysis blocks identifiable: for each block, factor variances are set to 1, factor covariances are constrained to be zero, and factor loadings are constrained to follow an echelon pattern.} } Data options: \describe{ \item{\code{std.ov}:}{If \code{TRUE}, observed variables are standardized before entering the analysis. By default, these are only the non-exogenous observed variables, unless \code{fixed.x = FALSE}. Use this option with caution; it can be used to test if (for example) nonconvergence was due to scaling issues. But this is still a covariance based analysis, in the sense that no constraints are involved (to ensure the model-implied (co)variance matrix has unit variances), and the standard errors still assume that the input was unstandardized. See also the \code{correlation} option.} \item{\code{missing}:}{The default setting is \code{"listwise"}: all cases with missing values are removed listwise from the data before the analysis starts. This is only valid if the data are missing completely at random (MCAR). Therefore, it may not be the optimal choice, but it can be useful for a first run. If the estimator belongs to the ML family, another option is \code{"ml"} (alias: \code{"fiml"} or \code{"direct"}). This corresponds to the so-called full information maximum likelihood approach (fiml), where we compute the likelihood case by case, using all available data from that case. Note that if the model contains exogenous observed covariates, and \code{fixed.x = TRUE} (the default), all cases with any missing values on these covariates will be deleted first. The option \code{"ml.x"} (alias: \code{"fiml.x"} or \code{"direct.x"}) is similar to \code{"ml"}, but does not delete any cases with missing values for the exogenous covariates, even if \code{fixed.x = TRUE}. (Note: all lavaan versions < 0.6 used \code{"ml.x"} instead of \code{"ml"}). If you wish to use multiple imputation, you need to use an external package (eg. mice) to generate imputed datasets, which can then be analyzed using the \code{\link{semList}} function. The semTools package contains several functions to do this automatically. Another option (with continuous data) is to use \code{"two.stage"} or \code{"robust.two.stage"}. In this approach, we first estimate the sample statistics (mean vector, variance-covariance matrix) using an EM algorithm. Then, we use these estimated sample statistics as input for a regular analysis (as if the data were complete). The standard errors and test statistics are adjusted correctly to reflect the two-step procedure. The \code{"robust.two.stage"} option produces standard errors and a test statistic that are robust against non-normality. If (part of) the data is categorical, and the estimator is from the (W)LS family, the only option (besides listwise deletion) is \code{"pairwise"}. In this three-step approach, missingness is only an issue in the first two steps. In the first step, we compute thresholds (for categorical variables) and means or intercepts (for continuous variables) using univariate information only. In this step, we simply ignore the missing values just like in mean(x, na.rm = TRUE). In the second step, we compute polychoric/polyserial/pearson correlations using (only) two variables at a time. Here we use pairwise deletion: we only keep those observations for which both values are observed (not-missing). And this may change from pair to pair. By default, in the categorical case we use \code{conditional.x = TRUE}. Therefore, any cases with missing values on the exogenous covariates will be deleted listwise from the data first. Finally, if the estimator is \code{"PML"}, the available options are \code{"pairwise"}, \code{"available.cases"} and \code{"doubly.robust"}. See the PML tutorial on the lavaan website for more information about these approaches.} \item{\code{sampling.weights.normalization}:}{If \code{"none"}, the sampling weights (if provided) will not be transformed. If \code{"total"}, the sampling weights are normalized by dividing by the total sum of the weights, and multiplying again by the total sample size. If \code{"group"}, the sampling weights are normalized per group: by dividing by the sum of the weights (in each group), and multiplying again by the group size. The default is \code{"total"}.} \item{\code{samplestats}:}{Logical. If \code{FALSE}, no sample statistics will be computed (and no estimation can take place). This can be useful when only a dummy lavaan object is requested, without any computations. The default is \code{TRUE}.} } Data summary options: \describe{ \item{\code{sample.cov.rescale}:}{If \code{TRUE}, the sample covariance matrix provided by the user is internally rescaled by multiplying it with a factor (N-1)/N. If \code{"default"}, the value is set depending on the estimator and the likelihood option: it is set to \code{TRUE} if maximum likelihood estimation is used and \code{likelihood="normal"}, and \code{FALSE} otherwise.} \item{\code{ridge}:}{Logical. If \code{TRUE} a small constant value will be added the diagonal elements of the covariance (or correlation) matrix before analysis. The value can be set using the \code{ridge.constant} option.} \item{\code{ridge.constant}:}{Numeric. Small constant used for ridging. The default value is 1e-05.} } Multiple group options: \describe{ \item{\code{group.label}:}{A character vector. The user can specify which group (or factor) levels need to be selected from the grouping variable, and in which order. If missing, all grouping levels are selected, in the order as they appear in the data.} \item{\code{group.equal}:}{A vector of character strings. Only used in a multiple group analysis. Can be one or more of the following: \code{"loadings"}, \code{"composite.loadings"}, \code{"intercepts"}, \code{"means"}, \code{"thresholds"}, \code{"regressions"}, \code{"residuals"}, \code{"residual.covariances"}, \code{"lv.variances"} or \code{"lv.covariances"}, specifying the pattern of equality constraints across multiple groups.} \item{\code{group.partial}:}{A vector of character strings containing the labels of the parameters which should be free in all groups (thereby overriding the group.equal argument for some specific parameters).} \item{\code{group.w.free}:}{Logical. If \code{TRUE}, the group frequencies are considered to be free parameters in the model. In this case, a Poisson model is fitted to estimate the group frequencies. If \code{FALSE} (the default), the group frequencies are fixed to their observed values.} } Estimation options: \describe{ \item{\code{estimator}:}{The estimator to be used. Can be one of the following: \code{"ML"} for maximum likelihood, \code{"GLS"} for (normal theory) generalized least squares, \code{"WLS"} for weighted least squares (sometimes called ADF estimation), \code{"ULS"} for unweighted least squares, \code{"DWLS"} for diagonally weighted least squares, and \code{"DLS"} for distributionally-weighted least squares. These are the main options that affect the estimation. For convenience, the \code{"ML"} option can be extended as \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, \code{"MLF"}, and \code{"MLR"}. The estimation will still be plain \code{"ML"}, but now with robust standard errors and a robust (scaled) test statistic. For \code{"MLM"}, \code{"MLMV"}, \code{"MLMVS"}, classic robust standard errors are used (\code{se="robust.sem"}); for \code{"MLF"}, standard errors are based on first-order derivatives (\code{information = "first.order"}); for \code{"MLR"}, `Huber-White' robust standard errors are used (\code{se="robust.huber.white"}). In addition, \code{"MLM"} will compute a Satorra-Bentler scaled (mean adjusted) test statistic (\code{test="satorra.bentler"}) , \code{"MLMVS"} will compute a mean and variance adjusted test statistic (Satterthwaite style) (\code{test="mean.var.adjusted"}), \code{"MLMV"} will compute a mean and variance adjusted test statistic (scaled and shifted) (\code{test="scaled.shifted"}), and \code{"MLR"} will compute a test statistic which is asymptotically equivalent to the Yuan-Bentler T2-star test statistic (\code{test="yuan.bentler.mplus"}). Analogously, the estimators \code{"WLSM"} and \code{"WLSMV"} imply the \code{"DWLS"} estimator (not the \code{"WLS"} estimator) with robust standard errors and a mean or mean and variance adjusted test statistic. Estimators \code{"ULSM"} and \code{"ULSMV"} imply the \code{"ULS"} estimator with robust standard errors and a mean or mean and variance adjusted test statistic.} \item{\code{likelihood}:}{Only relevant for ML estimation. If \code{"wishart"}, the wishart likelihood approach is used. In this approach, the covariance matrix has been divided by N-1, and both standard errors and test statistics are based on N-1. If \code{"normal"}, the normal likelihood approach is used. Here, the covariance matrix has been divided by N, and both standard errors and test statistics are based on N. If \code{"default"}, it depends on the mimic option: if \code{mimic="lavaan"} or \code{mimic="Mplus"}, normal likelihood is used; otherwise, wishart likelihood is used.} \item{\code{link}:}{Not used yet. This is just a placeholder until the MML estimator is back.} \item{\code{information}:}{If \code{"expected"}, the expected information matrix is used (to compute the standard errors). If \code{"observed"}, the observed information matrix is used. If \code{"first.order"}, the information matrix is based on the outer product of the casewise scores. See also the options \code{"h1.information"} and \code{"observed.information"} for further control. If \code{"default"}, the value is set depending on the estimator, the missing argument, and the mimic option. If the argument is a vector with two elements, the first element is used for the computation of the standard errors, while the second element is used for the (robust) test statistic.} \item{\code{h1.information}:}{If \code{"structured"} (the default), the unrestricted (h1) information part of the (expected, first.order or observed if h1 is used) information matrix is based on the structured, or model-implied statistics (model-implied covariance matrix, model-implied mean vector, etc.). If \code{"unstructured"}, the unrestricted (h1) information part is based on sample-based statistics (observed covariance matrix, observed mean vector, etc.) If the argument is a vector with two elements, the first element is used for the computation of the standard errors, while the second element is used for the (robust) test statistic.} \item{\code{observed.information}:}{If \code{"hessian"}, the observed information matrix is based on the hessian of the objective function. If \code{"h1"}, an approximation is used that is based on the observed information matrix of the unrestricted (h1) model. If the argument is a vector with two elements, the first element is used for the computation of the standard errors, while the second element is used for the (robust) test statistic.} \item{\code{se}:}{If \code{"standard"}, conventional standard errors are computed based on inverting the (expected, observed or first.order) information matrix. If \code{"robust.sem"}, conventional robust standard errors are computed. If \code{"robust.huber.white"}, standard errors are computed based on the 'mlr' (aka pseudo ML, Huber-White) approach. If \code{"robust"}, either \code{"robust.sem"} or \code{"robust.huber.white"} is used depending on the estimator, the mimic option, and whether the data are complete or not. If \code{"boot"} or \code{"bootstrap"}, bootstrap standard errors are computed using standard bootstrapping (unless Bollen-Stine bootstrapping is requested for the test statistic; in this case bootstrap standard errors are computed using model-based bootstrapping). If \code{"none"}, no standard errors are computed.} \item{\code{test}:}{Character vector. See the documentation of the \code{\link{lavTest}} function for a full list. Multiple names of test statistics can be provided. If \code{"default"}, the value depends on the values of other arguments. See also the \code{\link{lavTest}} function to extract (alternative) test statistics from a fitted lavaan object.} \item{\code{scaled.test}:}{Character. Choose the test statistic that will be scaled (if a scaled test statistic is requested). The default is \code{"standard"}, but it could also be (for example) \code{"Browne.residual.nt"}.} \item{\code{gamma.n.minus.one}}{Logical. If \code{TRUE}, we divide the Gamma matrix by N-1 (instead of the default N).} \item{\code{gamma.unbiased}}{Logical. If \code{TRUE}, we compute an unbiased version for the Gamma matrix. Only available for single-level complete data and when \code{conditional.x = FALSE} and \code{fixed.x = FALSE} (for now).} \item{\code{bootstrap}:}{Number of bootstrap draws, if bootstrapping is used.} \item{\code{do.fit}:}{If \code{FALSE}, the model is not fit, and the current starting values of the model parameters are preserved.} } Optimization options: \describe{ \item{\code{control}:}{A list containing control parameters passed to the external optimizer. By default, lavaan uses \code{"nlminb"}. See the manpage of \code{\link{nlminb}} for an overview of the control parameters. If another (external) optimizer is selected, see the manpage for that optimizer to see the possible control parameters.} \item{\code{optim.method}:}{Character. The optimizer that should be used. For unconstrained optimization or models with only linear equality constraints (i.e., the model syntax does not include any "==", ">" or "<" operators), the available options are \code{"nlminb"} (the default), \code{"BFGS"}, \code{"L-BFGS-B"}. These are all quasi-newton methods. A basic implementation of Gauss-Newton is also available (\code{optim.method = "GN"}). The latter is the default when \code{estimator = "DLS"}. For constrained optimization, the only available option is \code{"nlminb.constr"}, which uses an augmented Lagrangian minimization algorithm.} \item{\code{optim.force.converged}:}{Logical. If \code{TRUE}, pretend the model has converged, no matter what.} \item{\code{optim.dx.tol}}{Numeric. Tolerance used for checking if the elements of the (unscaled) gradient are all zero (in absolute value). The default value is 0.001.} \item{\code{optim.gn.tol.x}:}{Numeric. Only used when \code{optim.method = "GN"}. Optimization stops when the root mean square of the difference between the old and new parameter values are smaller than this tolerance value. Default is \code{1e-05} for DLS estimation and \code{1e-07} otherwise.} \item{\code{optim.gn.iter.max}:}{Integer. Only used when \code{optim.method = "GN"}. The maximum number of GN iterations. The default is 200.} \item{\code{bounds}:}{Only used if \code{optim.method = "nlminb"}. If logical: \code{FALSE} implies no bounds are imposed on the parameters. If \code{TRUE}, this implies \code{bounds = "wide"}. If character, possible options are \code{"none"} (the default), \code{"standard"}, \code{"wide"}, \code{"pos.var"}, \code{"pos.ov.var"}, and \code{"pos.lv.var"}. If \code{bounds = "pos.ov.var"}, the observed variances are forced to be nonnegative. If \code{bounds = "pos.lv.var"}, the latent variances are forced to be nonnegative. If \code{bounds = "pos.var"}, both observed and latent variances are forced to be nonnegative. If \code{bounds = "standard"}, lower and upper bounds are computed for observed and latent variances, and factor loadings. If \code{bounds = "wide"}, lower and upper bounds are computed for observed and latent variances, and factor loadings; but the range of the bounds is enlarged (allowing again for slightly negative variances).} \item{\code{optim.bounds}:}{List. This can be used instead of the \code{bounds} argument to allow more control. Possible elements of the list are \code{lower}, \code{upper}, \code{lower.factor} and \code{upper.factor}. All of these accept a vector. The \code{lower} and \code{upper} elements indicate for which type of parameters bounds should be computed. Possible choice are \code{"ov.var"}, \code{"lv.var"}, \code{"loadings"} and \code{"covariances"}. The \code{lower.factor} and \code{upper.factor} elements should have the same length as the \code{lower} and \code{upper} elements respectively. They indicate the factor by which the range of the bounds should be enlarged (for example, 1.1 or 1.2; the default is 1.0). Other elements are \code{min.reliability.marker} which sets the lower bound for the reliability of the marker indicator (if any) of each factor (default is 0.1). Finally, the \code{min.var.lv.endo} element indicates the lower bound of the variance of any endogenous latent variance (default is 0.0).} } Parallelization options (currently only used for bootstrapping): \describe{ \item{parallel}{The type of parallel operation to be used (if any). If missing, the default is \code{"no"}. } \item{ncpus}{Integer: number of processes to be used in parallel operation: typically one would chose this to the number of available CPUs. By By default this is the number of cores (as detected by \code{parallel::detectCores()}) minus one.} \item{cl}{An optional \pkg{parallel} or \pkg{snow} cluster for use if \code{parallel = "snow"}. If not supplied, a cluster on the local machine is created for the duration of the \code{bootstrapLavaan} or \code{bootstrapLRT} call.} \item{iseed}{An integer to set the seed. Or NULL if no reproducible results are needed. This works for both serial (non-parallel) and parallel settings. Internally, \code{RNGkind()} is set to \code{"L'Ecuyer-CMRG"} if \code{parallel = "multicore"}. If \code{parallel = "snow"} (under windows), \code{parallel::clusterSetRNGStream()} is called which automatically switches to \code{"L'Ecuyer-CMRG"}. When \code{iseed} is not NULL, \code{.Random.seed} (if it exists) in the global environment is left untouched.} } Categorical estimation options: \describe{ \item{\code{zero.add}:}{A numeric vector containing two values. These values affect the calculation of polychoric correlations when some frequencies in the bivariate table are zero. The first value only applies for 2x2 tables. The second value for larger tables. This value is added to the zero frequency in the bivariate table. If \code{"default"}, the value is set depending on the \code{"mimic"} option. By default, lavaan uses \code{zero.add = c(0.5. 0.0)}.} \item{\code{zero.keep.margins}:}{Logical. This argument only affects the computation of polychoric correlations for 2x2 tables with an empty cell, and where a value is added to the empty cell. If \code{TRUE}, the other values of the frequency table are adjusted so that all margins are unaffected. If \code{"default"}, the value is set depending on the \code{"mimic"}. The default is \code{TRUE}.} \item{\code{zero.cell.warn}:}{Logical. Only used if some observed endogenous variables are categorical. If \code{TRUE}, give a warning if one or more cells of a bivariate frequency table are empty.} } Starting values options: \describe{ \item{\code{start}:}{If it is a character string, the two options are currently \code{"simple"} and \code{"Mplus"}. In the first case, all parameter values are set to zero, except the factor loadings and (residual) variances, which are set to one. When \code{start} is \code{"Mplus"}, the factor loadings are estimated using the fabin3 estimator (tsls) per factor. The residual variances of observed variables are set tot half the observed variance, and all other (residual) variances are set to 0.05. The remaining parameters (regression coefficients, covariances) are set to zero. If \code{start} is a fitted object of class \code{\linkS4class{lavaan}}, the estimated values of the corresponding parameters will be extracted. If it is a parameter table, for example the output of the \code{paramaterEstimates()} function, the values of the \code{est} or \code{start} or \code{ustart} column (whichever is found first) will be extracted.} } Check options: \describe{ \item{\code{check.start}:}{Logical. If \code{TRUE}, the starting values are checked for possibly inconsistent values (for example values implying correlations larger than one). If needed, a warning is given.} \item{\code{check.gradient}:}{Logical. If \code{TRUE}, and the model converged, a warning is given if the optimizer decided that a (local) solution has been found, while not all elements of the (unscaled) gradient (as seen by the optimizer) are (near) zero, as they should be (the tolerance used is 0.001).} \item{\code{check.post}:}{Logical. If \code{TRUE}, and the model converged, a check is performed after (post) fitting, to verify if the solution is admissible. This implies that all variances are non-negative, and all the model-implied covariance matrices are positive (semi-)definite. For the latter test, we tolerate a tiny negative eigenvalue that is smaller than .Machine$double.eps^(3/4), treating it as being zero.} \item{\code{check.vcov}:}{Logical. If \code{TRUE}, and the model converged, we check if the variance-covariance matrix of the free parameters is positive definite. We take into account possible equality and acitive inequality constraints. If needed, a warning is given.} \item{\code{check.lv.names}:}{Logical. If \code{TRUE}, and latent variables are defined in the model, lavaan will stop with an error message if a latent variable name also occurs in the data (implying it is also an observed variable).} } Verbosity options: \describe{ \item{\code{verbose}:}{If \code{TRUE}, show what lavaan is doing. During estimation, the function value is printed out during each iteration.} \item{\code{warn}:}{If \code{FALSE}, suppress all lavaan-specific warning messages.} \item{\code{debug}:}{If \code{TRUE}, debugging information is printed out.} } Miscellaneous: \describe{ \item{\code{model.type}:}{Set the model type: possible values are \code{"cfa"}, \code{"sem"} or \code{"growth"}. This may affect how starting values are computed, and may be used to alter the terminology used in the summary output, or the layout of path diagrams that are based on a fitted lavaan object.} \item{\code{mimic}:}{If \code{"Mplus"}, an attempt is made to mimic the Mplus program. If \code{"EQS"}, an attempt is made to mimic the EQS program. If \code{"default"}, the value is (currently) set to to \code{"lavaan"}, which is very close to \code{"Mplus"}.} \item{\code{representation}:}{If \code{"LISREL"} the classical LISREL matrix representation is used to represent the model (using the all-y variant). No other options are available (for now).} \item{\code{implied}:}{Logical. If \code{TRUE}, compute the model-implied statistics, and store them in the implied slot.} \item{\code{h1}:}{Logical. If \code{TRUE}, compute the unrestricted model and store the unrestricted summary statistics (and perhaps a loglikelihood) in the h1 slot.} \item{\code{baseline:}}{Logical. If \code{TRUE}, compute a baseline model (currently always the independence model, assuming all variables are uncorrelated) and store the results in the baseline slot.} \item{\code{baseline.conditional.x.free.slopes}:}{Logical. If \code{TRUE}, and \code{conditional.x = TRUE}, the (default) baseline model will allow the slopestructure to be unrestricted.} \item{\code{store.vcov}}{Logical. If \code{TRUE}, and \code{se=} is not set to \code{"none"}, store the full variance-covariance matrix of the model parameters in the vcov slot of the fitted lavaan object.} \item{\code{parser}}{Character. If \code{"new"} (the default), the new parser is used to parse the model syntax. If \code{"old"}, the original (pre 0.6-18) parser is used.} } } \seealso{ \code{\link{lavaan}} } \examples{ lavOptions() lavOptions("std.lv") lavOptions(c("std.lv", "orthogonal")) } lavaan/man/sem.Rd0000644000176200001440000001354114622072672013356 0ustar liggesusers\name{sem} \alias{sem} \title{Fit Structural Equation Models} \description{ Fit a Structural Equation Model (SEM).} \usage{ sem(model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, NACOV = NULL, ov.order = "model", ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated as numeric (unless they are declared as ordered in the data.frame.) Since 0.6-4, ordered can also be logical. If TRUE, all observed endogenous variables are treated as ordered (ordinal). If FALSE, all observed endogenous variables are considered to be numeric (again, unless they are declared as ordered in the data.frame.)} \item{sampling.weights}{A variable name in the data frame containing sampling weight information. Currently only available for non-clustered data. Depending on the \code{sampling.weights.normalization} option, these weights may be rescaled (or not) so that their sum equals the number of observations (total or per group).} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. For a multiple group analysis, a list with a variance-covariance matrix for each group.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.th}{Vector of sample-based thresholds. For a multiple group analysis, a list with a vector of thresholds for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} \item{group}{Character. A variable name in the data frame defining the groups in a multiple group analysis.} \item{cluster}{Character. A (single) variable name in the data frame defining the clusters in a two-level dataset.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix for each group. The elements of the weight matrix should be in the following order (if all data is continuous): first the means (if a meanstructure is involved), then the lower triangular elements of the covariance matrix including the diagonal, ordered column by column. In the categorical case: first the thresholds (including the means for continuous variables), then the slopes (if any), the variances of continuous variables (if any), and finally the lower triangular elements of the correlation/covariance matrix excluding the diagonal, ordered column by column.} \item{NACOV}{A user provided matrix containing the elements of (N times) the asymptotic variance-covariance matrix of the sample statistics. For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} \item{ov.order}{Character. If \code{"model"} (the default), the order of the observed variable names (as reflected for example in the output of \code{lavNames()}) is determined by the model syntax. If \code{"data"}, the order is determined by the data (either the full data.frame or the sample (co)variance matrix). If the \code{WLS.V} and/or \code{NACOV} matrices are provided, this argument is currently set to \code{"data"}.} \item{...}{Many more additional options can be defined, using 'name = value'. See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{sem} function is a wrapper for the more general \code{\link{lavaan}} function, but setting the following default options: \code{int.ov.free = TRUE}, \code{int.lv.free = FALSE}, \code{auto.fix.first = TRUE} (unless \code{std.lv = TRUE}), \code{auto.fix.single = TRUE}, \code{auto.var = TRUE}, \code{auto.cov.lv.x = TRUE}, \code{auto.efa = TRUE}, \code{auto.th = TRUE}, \code{auto.delta = TRUE}, and \code{auto.cov.y = TRUE}. } \value{ An object of class \code{\linkS4class{lavaan}}, for which several methods are available, including a \code{summary} method. } \references{ Yves Rosseel (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \doi{https://doi.org/10.18637/jss.v048.i02}} \seealso{ \code{\link{lavaan}} } \examples{ ## The industrialization and Political Democracy Example ## Bollen (1989), page 332 model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fit <- sem(model, data = PoliticalDemocracy) summary(fit, fit.measures = TRUE) } lavaan/man/lav_constraints.Rd0000644000176200001440000000515114622072672016001 0ustar liggesusers\name{lav_constraints} \alias{lav_constraints_parse} \alias{lav_partable_constraints_ceq} \alias{lav_partable_constraints_ciq} \alias{lav_partable_constraints_def} \title{Utility Functions: Constraints} \description{Utility functions for equality and inequality constraints.} \usage{ lav_constraints_parse(partable = NULL, constraints = NULL, theta = NULL, debug = FALSE) lav_partable_constraints_ceq(partable, con = NULL, debug = FALSE, txtOnly = FALSE) lav_partable_constraints_ciq(partable, con = NULL, debug = FALSE, txtOnly = FALSE) lav_partable_constraints_def(partable, con = NULL, debug = FALSE, txtOnly = FALSE) } \arguments{ \item{partable}{A lavaan parameter table.} \item{constraints}{A character string containing the constraints.} \item{theta}{A numeric vector. Optional vector with values for the model parameters in the parameter table.} \item{debug}{Logical. If TRUE, show debugging information.} \item{con}{An optional partable where the operator is one of `==', `>', `<' or `:='} \item{txtOnly}{Logical. If TRUE, only the body of the function is returned as a character string. If FALSE, a function is returned.} } \details{ This is a collection of lower-level constraint related functions that are used in the lavaan code. They are made public per request of package developers. Below is a brief description of what they do: The \code{lav_constraints_parse} function parses the constraints specification (provided as a string, see example), and generates a list with useful information about the constraints. The \code{lav_partable_constraints_ceq} function creates a function which takes the (unconstrained) parameter vector as input, and returns the slack values for each equality constraint. If the equality constraints hold perfectly, this function returns zeroes. The \code{lav_partable_constraints_ciq} function creates a function which takes the (unconstrained) parameter vector as input, and returns the slack values for each inequality constraint. The \code{lav_partable_constraints_def} function creates a function which takes the (unconstrained) parameter vector as input, and returns the computed values of the defined parameters. } \examples{ myModel <- 'x1 ~ a*x2 + b*x3 + c*x4' myParTable <- lavaanify(myModel, as.data.frame. = FALSE) con <- ' a == 2*b b - c == 5 ' conInfo <- lav_constraints_parse(myParTable, constraints = con) myModel2 <- 'x1 ~ a*x2 + b*x3 + c*x4 a == 2*b b - c == 5 ' ceq <- lav_partable_constraints_ceq(partable = lavaanify(myModel2)) ceq( c(2,3,4) ) } lavaan/man/lavPredictY_cv.Rd0000644000176200001440000000616414624603671015514 0ustar liggesusers\name{lavPredictY_cv} \alias{lavPredictY_cv} \title{Determine an optimal lambda penalty value through cross-validation} \description{ This function can be used to determine an optimal lambda value for the \code{lavPredictY} function. based on cross-validation.} \usage{ lavPredictY_cv(object, data = NULL, xnames = lavNames(object, "ov.x"), ynames = lavNames(object, "ov.y"), n.folds = 10L, lambda.seq = seq(0, 1, 0.1)) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{data}{A data.frame, containing the same variables as the data.frame that was used when fitting the model in \code{object}.} \item{xnames}{The names of the observed variables that should be treated as the x-variables. Can also be a list to allow for a separate set of variable names per group (or block).} \item{ynames}{The names of the observed variables that should be treated as the y-variables. It is for these variables that the function will predict the (model-based) values for each observation. Can also be a list to allow for a separate set of variable names per group (or block).} \item{n.folds}{Integer. The number of folds to be used during cross-validation.} \item{lambda.seq}{An R \code{seq()} containing the range of lambda penalty values to be tested during cross-validation.} } \details{ This function is used to generate an optimal lambda value for \code{lavPredictY} predictions to improve prediction accuracy. } \seealso{ \code{\link{lavPredictY}} to predict the values of (observed) y-variables given the values of (observed) x-variables in a structural equation model. } \references{ de Rooij, M., Karch, J.D., Fokkema, M., Bakk, Z., Pratiwi, B.C, and Kelderman, H. (2022) SEM-Based Out-of-Sample Predictions, Structural Equation Modeling: A Multidisciplinary Journal. DOI:10.1080/10705511.2022.2061494 Molina, M. D., Molina, L., & Zappaterra, M. W. (2024). Aspects of Higher Consciousness: A Psychometric Validation and Analysis of a New Model of Mystical Experience. \doi{https://doi.org/10.31219/osf.io/cgb6e} } \examples{ colnames(PoliticalDemocracy) <- c("z1", "z2", "z3", "z4", "y1", "y2", "y3", "y4", "x1", "x2", "x3") model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ z1 + z2 + z3 + z4 dem65 =~ y1 + y2 + y3 + y4 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations z1 ~~ y1 z2 ~~ z4 + y2 z3 ~~ y3 z4 ~~ y4 y2 ~~ y4 ' fit <- sem(model, data = PoliticalDemocracy, meanstructure = TRUE) percent <- 0.5 nobs <- lavInspect(fit, "ntotal") idx <- sort(sample(x = nobs, size = floor(percent*nobs))) xnames = c("z1", "z2", "z3", "z4", "x1", "x2", "x3") ynames = c("y1", "y2", "y3", "y4") reg.results <- lavPredictY_cv( fit, PoliticalDemocracy[-idx, ], xnames = xnames, ynames = ynames, n.folds = 10L, lambda.seq = seq(from = .6, to = 2.5, by = .1) ) lam <- reg.results$lambda.min lavPredictY(fit, newdata = PoliticalDemocracy[idx,], ynames = ynames, xnames = xnames, lambda = lam) } lavaan/man/lav_model.Rd0000644000176200001440000000400414622072672014526 0ustar liggesusers\name{lav_model} \alias{lav_model_get_parameters} \alias{lav_model_set_parameters} \alias{lav_model_implied} \alias{lav_model_vcov_se} \title{lavaan model functions} \description{Utility functions related to internal model representation (lavmodel)} \usage{ # set/get free parameters lav_model_set_parameters(lavmodel, x = NULL) lav_model_get_parameters(lavmodel, GLIST = NULL, type = "free", extra = TRUE) # compute model-implied statistics lav_model_implied(lavmodel, GLIST = NULL, delta = TRUE) # compute standard errors lav_model_vcov_se(lavmodel, lavpartable, VCOV = NULL, BOOT = NULL) } \arguments{ \item{lavmodel}{An internal representation of a lavaan model.} \item{x}{Numeric. A vector containing the values of all the free model parameters.} \item{GLIST}{List. A list of model matrices, similar to the output of \code{lavInspect(object, "est")}.} \item{type}{Character string. If \code{"free"}, only return the free model parameters. If \code{"user"}, return all the parameters (free and fixed) as they appear in the user-specified parameter table.} \item{extra}{Logical. If \code{TRUE}, also include values for rows in the parameter table where the operator is one of \code{":="}, \code{"=="}, \code{"<"} or \code{">"}.} \item{delta}{Logical. If \code{TRUE}, and a Delta matrix is present in GLIST, use the (diagonal) values of the Delta matrix to rescale the covariance matrix. This is usually needed in the categorical setting to convert covariances to correlations.} \item{lavpartable}{A parameter table.} \item{VCOV}{Numeric matrix containing an estimate of the variance covariance matrix of the free model parameters.} \item{BOOT}{Numeric matrix containing the bootstrap based parameter estimates (in the columns) for each bootstrap sample (in the rows).} } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) lavmodel <- fit@Model est <- lav_model_get_parameters(lavmodel) est } lavaan/man/summary.efaList.Rd0000644000176200001440000001073714622072672015661 0ustar liggesusers\name{summary.efaList} \alias{summary.efaList} \alias{efaList} \alias{print.efaList.summary} \title{Summarizing EFA Fits} \description{ S3 summary and print methods for class \code{efaList}.} \usage{ \method{summary}{efaList}(object, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, lambda = TRUE, theta = TRUE, psi = TRUE, fit.table = TRUE, fs.determinacy = FALSE, eigenvalues = TRUE, sumsq.table = TRUE, lambda.structure = FALSE, se = FALSE, zstat = FALSE, pvalue = FALSE, \dots) \method{print}{efaList.summary}(x, nd = 3L, cutoff = 0.3, dot.cutoff = 0.1, alpha.level = 0.01, \dots) } \arguments{ \item{object}{An object of class \code{efaList}, usually, a result of a call to \code{\link{efa}} with (the default) \code{output = "efa"}.} \item{x}{An object of class \code{summary.efaList}, usually, a result of a call to \code{summary.efaList}.} \item{nd}{Integer. The number of digits that are printed after the decimal point in the output.} \item{cutoff}{Numeric. Factor loadings smaller that this value (in absolute value) are not printed (even if they are significantly different from zero). The idea is that only medium to large factor loadings are printed, to better see the overall structure.} \item{dot.cutoff}{Numeric. Factor loadings larger (in absolute value) than this value, but smaller (in absolute value) than the cutoff value are shown as a dot. They represent small loadings that may still need your attention.} \item{alpha.level}{Numeric. If the the p-value of a factor loading is smaller than this value, a significance star is printed to the right of the factor loading. To switch this off, use \code{alpha.level = 0}.} \item{lambda}{Logical. If \code{TRUE}, include the (standardized) factor loadings in the summary.} \item{theta}{Logical. If \code{TRUE}, include the unique variances and the communalities in the table of factor loadings.} \item{psi}{Logical. If \code{TRUE}, include the factor correlations in the summary. Ignored if only a single factor is used.} \item{fit.table}{Logical. If \code{TRUE}, show fit information for each model.} \item{fs.determinacy}{Logical. If \code{TRUE}, show the factor score determinacy values per factor (assuming regression factor scores are used) and their squared values.} \item{eigenvalues}{Logical. If \code{TRUE}, include the eigenvalues of the sample variance-covariance matrix in the summary.} \item{sumsq.table}{Logical. If \code{TRUE}, include a table including sums of squares of factor loadings (and related measures) in the summary. The sums of squares are computed as the diagonal elements of Lambda times Psi (where Psi is the matrix of factor correlations.). If orthogonal rotation was used, Psi is diagonal and the sums of squares are identical to the sums of the squared column elements of the Lambda matrix (i.e., the factor loadings). This is no longer the case when obique rotation has been used. But in both cases (orthgonal or oblique), the (total) sum of the sums of squares equals the sum of the communalities. In the second row of the table (Proportion of total), the sums of squares are divided by the total. In the third row of the table (Proportion var), the sums of squares are divided by the number of items.} \item{lambda.structure}{Logical. If \code{TRUE}, show the structure matrix (i.e., the factor loadings multiplied by the factor correlations).} \item{se}{Logical. If \code{TRUE}, include the standard errors of the standardized lambda, theta and psi elements in the summary.} \item{zstat}{Logical. If \code{TRUE}, include the Z-statistics of the standardized lambda, theta and psi elements in the summary.} \item{pvalue}{Logical. If \code{TRUE}, include the P-values of the standardized lambda, theta and psi elements in the summary.} \item{...}{Further arguments passed to or from other methods.} } \value{ The function \code{summary.efaList} computes and returns a list of summary statistics for the list of EFA models in \code{object}. } \examples{ ## The famous Holzinger and Swineford (1939) example fit <- efa(data = HolzingerSwineford1939, ov.names = paste("x", 1:9, sep = ""), nfactors = 1:3, rotation = "geomin", rotation.args = list(geomin.epsilon = 0.01, rstarts = 1)) summary(fit, nd = 3L, cutoff = 0.2, dot.cutoff = 0.05, lambda.structure = TRUE, pvalue = TRUE) } lavaan/man/bootstrap.Rd0000644000176200001440000001430214622072672014603 0ustar liggesusers\name{bootstrapLavaan} \alias{bootstrapLavaan} \alias{bootstrapLRT} \title{Bootstrapping a Lavaan Model} \description{Bootstrap the LRT, or any other statistic (or vector of statistics) you can extract from a fitted lavaan object.} \usage{ bootstrapLavaan(object, R = 1000L, type = "ordinary", verbose = FALSE, FUN = "coef", keep.idx = FALSE, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, iseed = NULL, h0.rmsea = NULL, ...) bootstrapLRT(h0 = NULL, h1 = NULL, R = 1000L, type="bollen.stine", verbose = FALSE, return.LRT = FALSE, double.bootstrap = "no", double.bootstrap.R = 500L, double.bootstrap.alpha = 0.05, parallel = c("no", "multicore", "snow"), ncpus = 1L, cl = NULL, iseed = NULL) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{h0}{An object of class \code{\linkS4class{lavaan}}. The restricted model.} \item{h1}{An object of class \code{\linkS4class{lavaan}}. The unrestricted model.} \item{R}{Integer. The number of bootstrap draws.} \item{type}{If \code{"ordinary"} or \code{"nonparametric"}, the usual (naive) bootstrap method is used. If \code{"bollen.stine"}, the data is first transformed such that the null hypothesis holds exactly in the resampling space. If \code{"yuan"}, the data is first transformed by combining data and theory (model), such that the resampling space is closer to the population space. Note that both \code{"bollen.stine"} and \code{"yuan"} require the data to be continuous. They will not work with ordinal data. If \code{"parametric"}, the parametric bootstrap approach is used; currently, this is only valid for continuous data following a multivariate normal distribution. See references for more details.} \item{FUN}{A function which when applied to the \code{\linkS4class{lavaan}} object returns a vector containing the statistic(s) of interest. The default is \code{FUN="coef"}, returning the estimated values of the free parameters in the model.} \item{\dots}{Other named arguments for \code{FUN} which are passed unchanged each time it is called.} \item{verbose}{If \code{TRUE}, show information for each bootstrap draw.} \item{keep.idx}{If \code{TRUE}, store the indices of each bootstrap run (i.e., the observations that were used for this bootstrap run) as an attribute.} \item{return.LRT}{If \code{TRUE}, return the LRT values as an attribute to the pvalue.} \item{parallel}{The type of parallel operation to be used (if any). If missing, the default is \code{"no"}. } \item{ncpus}{Integer: number of processes to be used in parallel operation. By default this is the number of cores (as detected by \code{parallel::detectCores()}) minus one.} \item{cl}{An optional \pkg{parallel} or \pkg{snow} cluster for use if \code{parallel = "snow"}. If not supplied, a cluster on the local machine is created for the duration of the \code{bootstrapLavaan} or \code{bootstrapLRT} call.} \item{iseed}{An integer to set the seed. Or NULL if no reproducible results are needed. This works for both serial (non-parallel) and parallel settings. Internally, \code{RNGkind()} is set to \code{"L'Ecuyer-CMRG"} if \code{parallel = "multicore"}. If \code{parallel = "snow"} (under windows), \code{parallel::clusterSetRNGStream()} is called which automatically switches to \code{"L'Ecuyer-CMRG"}. When \code{iseed} is not NULL, \code{.Random.seed} (if it exists) in the global environment is left untouched.} \item{h0.rmsea}{Only used if \code{type="yuan"}. Allows one to do the Yuan bootstrap under the hypothesis that the population RMSEA equals a specified value.} \item{double.bootstrap}{If \code{"standard"} the genuine double bootstrap is used to compute an additional set of plug-in p-values for each boostrap sample. If \code{"FDB"}, the fast double bootstrap is used to compute second level LRT-values for each bootstrap sample. If \code{"no"}, no double bootstrap is used. The default is set to \code{"FDB"}.} \item{double.bootstrap.R}{Integer. The number of bootstrap draws to be use for the double bootstrap.} \item{double.bootstrap.alpha}{The significance level to compute the adjusted alpha based on the plugin p-values.} } \author{Yves Rosseel and Leonard Vanbrabant. Ed Merkle contributed Yuan's bootstrap. Improvements to Yuan's bootstrap were contributed by Hao Wu and Chuchu Cheng. The handling of iseed was contributed by Shu Fai Cheung.} \value{For \code{bootstrapLavaan()}, the bootstrap distribution of the value(s) returned by \code{FUN}, when the object can be simplified to a vector. For \code{bootstrapLRT()}, a bootstrap \emph{p} value, calculated as the proportion of bootstrap samples with a LRT statistic at least as large as the LRT statistic for the original data.} \details{The FUN function can return either a scalar or a numeric vector. This function can be an existing function (for example \code{coef}) or can be a custom defined function. For example: \preformatted{ myFUN <- function(x) { # require(lavaan) modelImpliedCov <- fitted(x)$cov vech(modelImpliedCov) } } If \code{parallel="snow"}, it is imperative that the \code{require(lavaan)} is included in the custom function.} \references{ Bollen, K. and Stine, R. (1992) Bootstrapping Goodness of Fit Measures in Structural Equation Models. Sociological Methods and Research, 21, 205--229. Yuan, K.-H., Hayashi, K., & Yanagihara, H. (2007). A class of population covariance matrices in the bootstrap approach to covariance structure analysis. Multivariate Behavioral Research, 42, 261--281. } \examples{ # fit the Holzinger and Swineford (1939) example HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939, se="none") # get the test statistic for the original sample T.orig <- fitMeasures(fit, "chisq") # bootstrap to get bootstrap test statistics # we only generate 10 bootstrap sample in this example; in practice # you may wish to use a much higher number T.boot <- bootstrapLavaan(fit, R=10, type="bollen.stine", FUN=fitMeasures, fit.measures="chisq") # compute a bootstrap based p-value pvalue.boot <- length(which(T.boot > T.orig))/length(T.boot) } lavaan/man/standardizedSolution.Rd0000644000176200001440000000757114622072672017011 0ustar liggesusers\name{standardizedSolution} \alias{standardizedSolution} \alias{standardizedsolution} \title{Standardized Solution} \description{ Standardized solution of a latent variable model.} \usage{ standardizedSolution(object, type = "std.all", se = TRUE, zstat = TRUE, pvalue = TRUE, ci = TRUE, level = 0.95, cov.std = TRUE, remove.eq = TRUE, remove.ineq = TRUE, remove.def = FALSE, partable = NULL, GLIST = NULL, est = NULL, output = "data.frame") } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{type}{If \code{"std.lv"}, the standardized estimates are on the variances of the (continuous) latent variables only. If \code{"std.all"}, the standardized estimates are based on both the variances of both (continuous) observed and latent variables. If \code{"std.nox"}, the standardized estimates are based on both the variances of both (continuous) observed and latent variables, but not the variances of exogenous covariates.} \item{se}{Logical. If TRUE, standard errors for the standardized parameters will be computed, together with a z-statistic and a p-value.} \item{zstat}{Logical. If \code{TRUE}, an extra column is added containing the so-called z-statistic, which is simply the value of the estimate divided by its standard error.} \item{pvalue}{Logical. If \code{TRUE}, an extra column is added containing the pvalues corresponding to the z-statistic, evaluated under a standard normal distribution.} \item{ci}{If \code{TRUE}, simple symmetric confidence intervals are added to the output} \item{level}{The confidence level required.} \item{cov.std}{Logical. If TRUE, the (residual) observed covariances are scaled by the square root of the `Theta' diagonal elements, and the (residual) latent covariances are scaled by the square root of the `Psi' diagonal elements. If FALSE, the (residual) observed covariances are scaled by the square root of the diagonal elements of the observed model-implied covariance matrix (Sigma), and the (residual) latent covariances are scaled by the square root of diagonal elements of the model-implied covariance matrix of the latent variables.} \item{remove.eq}{Logical. If TRUE, filter the output by removing all rows containing equality constraints, if any.} \item{remove.ineq}{Logical. If TRUE, filter the output by removing all rows containing inequality constraints, if any.} \item{remove.def}{Logical. If TRUE, filter the ouitput by removing all rows containing parameter definitions, if any.} \item{GLIST}{List of model matrices. If provided, they will be used instead of the GLIST inside the object@Model slot. Only works if the \code{est} argument is also provided. See Note.} \item{est}{Numeric. Parameter values (as in the `est' column of a parameter table). If provided, they will be used instead of the parameters that can be extract from object. Only works if the \code{GLIST} argument is also provided. See Note.} \item{partable}{A custom \code{list} or \code{data.frame} in which to store the standardized parameter values. If provided, it will be used instead of the parameter table inside the object@ParTable slot.} \item{output}{Character. If \code{"data.frame"}, the parameter table is displayed as a standard (albeit lavaan-formatted) data.frame. If \code{"text"} (or alias \code{"pretty"}), the parameter table is prettyfied, and displayed with subsections (as used by the summary function).} } \note{ The \code{est}, \code{GLIST}, and \code{partable} arguments are not meant for everyday users, but for authors of external R packages that depend on \code{lavaan}. Only to be used with great caution. } \value{ A data.frame containing standardized model parameters. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) standardizedSolution(fit) } lavaan/man/fitMeasures.Rd0000644000176200001440000001203614622072672015057 0ustar liggesusers\name{fitMeasures} \alias{fitMeasures} \alias{fitmeasures} \alias{fitMeasures,lavaan-method} \alias{fitmeasures,lavaan-method} \alias{fitindices} \title{Fit Measures for a Latent Variable Model} \description{ This function computes a variety of fit measures to assess the global fit of a latent variable model.} \usage{ fitMeasures(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list(standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE), output = "vector", ...) fitmeasures(object, fit.measures = "all", baseline.model = NULL, h1.model = NULL, fm.args = list(standard.test = "default", scaled.test = "default", rmsea.ci.level = 0.90, rmsea.close.h0 = 0.05, rmsea.notclose.h0 = 0.08, robust = TRUE, cat.check.pd = TRUE), output = "vector", ...) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{fit.measures}{If \code{"all"}, all fit measures available will be returned. If only a single or a few fit measures are specified by name, only those are computed and returned.} \item{baseline.model}{If not NULL, an object of class \code{\linkS4class{lavaan}}, representing a user-specified baseline model. If a baseline model is provided, all fit indices relying on a baseline model (eg. CFI or TLI) will use the test statistics from this user-specified baseline model, instead of the default baseline model.} \item{h1.model}{If not NULL, an object of class \code{\linkS4class{lavaan}}, representing a user-specified alternative to the default unrestricted model. If \code{h1.model} is provided, all fit indices calculated from chi-squared will use the chi-squared \emph{difference} test statistics from \code{\link{lavTestLRT}}, which compare the user-provided \code{h1.model} to \code{object}.} \item{fm.args}{List. Additional options for certain fit measures. The \code{standard.test} element determines the main test statistic (chi-square value) that will be used to compute all the fit measures that depend on this test statistic. Usually this is \code{"standard"}. The \code{scaled.test} element determines which scaling method is to be used for the scaled fit measures (in case multiple scaling methods were requested). The \code{rmsea.ci.level} element determines the level of the confidence interval for the rmsea value. The \code{rmsea.close.h0} element is the rmsea value that is used under the null hypothesis that \code{rmsea <= rmsea.close.h0}. The \code{rmsea.notclose.h0} element is the rmsea value that is used under the null hypothesis that \code{rsmsea >= rmsea.notclose.h0}. The \code{robust} element can be set to \code{FALSE} to avoid computing the so-called robust rmsea/cfi measures (for example if the computations take too long). The \code{cat.check.pd} element is only used when data is categorical. If \code{TRUE}, robust values for RMSEA and CFI are only computed if the input correlation matrix is positive-definite (for all groups).} \item{output}{Character. If \code{"vector"} (the default), display the output as a named (lavaan-formatted) vector. If \code{"matrix"}, display the output as a 1-column matrix. If \code{"text"}, display the output using subsections and verbose descriptions. The latter is used in the summary output, and does not print the chi-square test by default. In addition, \code{fit.measures} should contain the main ingredient (for example \code{"rmsea"}) if related fit measures are requested (for example \code{"rmsea.ci.lower"}). Otherwise, nothing will be printed in that section. See the examples how to add the chi-square test in the text output.} \item{...}{Further arguments passed to or from other methods. Not currently used for \code{lavaan} objects.} } \value{ A named numeric vector of fit measures. } %\details{ %The following fit measures are available: % %} \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) fitMeasures(fit) fitMeasures(fit, "cfi") fitMeasures(fit, c("chisq", "df", "pvalue", "cfi", "rmsea")) fitMeasures(fit, c("chisq", "df", "pvalue", "cfi", "rmsea"), output = "matrix") fitMeasures(fit, c("chisq", "df", "pvalue", "cfi", "rmsea"), output = "text") ## fit a more restricted model fit0 <- cfa(HS.model, data = HolzingerSwineford1939, orthogonal = TRUE) ## Calculate RMSEA_D (Savalei et al., 2023) ## See https://psycnet.apa.org/doi/10.1037/met0000537 fitMeasures(fit0, "rmsea", h1.model = fit) } lavaan/man/parTable.Rd0000644000176200001440000000134614622072672014324 0ustar liggesusers\name{parTable} \alias{parameterTable} \alias{parametertable} \alias{parTable} \alias{partable} \title{Parameter Table} \description{ Show the parameter table of a fitted model.} \usage{ parameterTable(object) parTable(object) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} } \value{ A \code{data.frame} containing the model parameters. This is simply the output of the \code{\link{lavaanify}} function coerced to a \code{data.frame} (with \code{stringsAsFactors = FALSE}). } \seealso{\code{\link{lavaanify}}.} \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) parTable(fit) } lavaan/man/modificationIndices.Rd0000644000176200001440000001045214622072672016534 0ustar liggesusers\name{modificationIndices} \alias{modificationIndices} \alias{modificationindices} \alias{modindices} \title{Modification Indices} \description{ Given a fitted lavaan object, compute the modification indices (= univariate score tests) for a selected set of fixed-to-zero parameters. } \usage{ modificationIndices(object, standardized = TRUE, cov.std = TRUE, information = "expected", power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, sort. = FALSE, minimum.value = 0, maximum.number = nrow(LIST), free.remove = TRUE, na.remove = TRUE, op = NULL) modindices(object, standardized = TRUE, cov.std = TRUE, information = "expected", power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, sort. = FALSE, minimum.value = 0, maximum.number = nrow(LIST), free.remove = TRUE, na.remove = TRUE, op = NULL) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{standardized}{If \code{TRUE}, two extra columns (sepc.lv and sepc.all) will contain standardized values for the EPCs. In the first column (sepc.lv), standardization is based on the variances of the (continuous) latent variables. In the second column (sepc.all), standardization is based on both the variances of both (continuous) observed and latent variables. (Residual) covariances are standardized using (residual) variances.} \item{cov.std}{Logical. See \code{\link{standardizedSolution}}.} \item{information}{\code{character} indicating the type of information matrix to use (check \code{\link{lavInspect}} for available options). \code{"expected"} information is the default, which provides better control of Type I errors.} \item{power}{If \code{TRUE}, the (post-hoc) power is computed for each modification index, using the values of \code{delta} and \code{alpha}.} \item{delta}{The value of the effect size, as used in the post-hoc power computation, currently using the unstandardized metric of the epc column.} \item{alpha}{The significance level used for deciding if the modification index is statistically significant or not.} \item{high.power}{If the computed power is higher than this cutoff value, the power is considered `high'. If not, the power is considered `low'. This affects the values in the 'decision' column in the output.} \item{sort.}{Logical. If TRUE, sort the output using the values of the modification index values. Higher values appear first.} \item{minimum.value}{Numeric. Filter output and only show rows with a modification index value equal or higher than this minimum value.} \item{maximum.number}{Integer. Filter output and only show the first maximum number rows. Most useful when combined with the \code{sort.} option.} \item{free.remove}{Logical. If TRUE, filter output by removing all rows corresponding to free (unconstrained) parameters in the original model.} \item{na.remove}{Logical. If TRUE, filter output by removing all rows with NA values for the modification indices.} \item{op}{Character string. Filter the output by selecting only those rows with operator \code{op}.} } \value{ A data.frame containing modification indices and EPC's. } \details{ Modification indices are just 1-df (or univariate) score tests. The modification index (or score test) for a single parameter reflects (approximately) the improvement in model fit (in terms of the chi-square test statistic), if we would refit the model but allow this parameter to be free. This function is a convenience function in the sense that it produces a (hopefully sensible) table of currently fixed-to-zero (or fixed to another constant) parameters. For each of these parameters, a modification index is computed, together with an expected parameter change (epc) value. It is important to realize that this function will only consider fixed-to-zero parameters. If you have equality constraints in the model, and you wish to examine what happens if you release all (or some) of these equality constraints, use the \code{\link{lavTestScore}} function. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) modindices(fit, minimum.value = 10, sort = TRUE) } lavaan/man/getCov.Rd0000644000176200001440000000551214622072672014020 0ustar liggesusers\name{getCov} \alias{getCov} \alias{cor2cov} \alias{char2num} \title{Utility Functions For Covariance Matrices} \description{Convenience functions to deal with covariance and correlation matrices.} \usage{ getCov(x, lower = TRUE, diagonal = TRUE, sds = NULL, names = paste("V", 1:nvar, sep="")) char2num(s) cor2cov(R, sds, names = NULL) } \arguments{ \item{x}{The elements of the covariance matrix. Either inside a character string or as a numeric vector. In the former case, the function \code{char2num} is used to convert the numbers (inside the character string) to numeric values.} \item{lower}{Logical. If \code{TRUE}, the numeric values in \code{x} are the lower-triangular elements of the (symmetric) covariance matrix only. If \code{FALSE}, \code{x} contains the upper triangular elements only. Note we always assumed the elements are provided row-wise!} \item{diagonal}{Logical. If \code{TRUE}, the numeric values in \code{x} include the diagonal elements. If \code{FALSE}, a unit diagonal is assumed.} \item{sds}{A numeric vector containing the standard deviations to be used to scale the elements in \code{x} or the correlation matrix \code{R} into a covariance matrix.} \item{names}{The variable names of the observed variables.} \item{s}{Character string containing numeric values; comma's and semi-colons are ignored.} \item{R}{A correlation matrix, to be scaled into a covariance matrix.} } \details{ The \code{getCov} function is typically used to input the lower (or upper) triangular elements of a (symmetric) covariance matrix. In many examples found in handbooks, only those elements are shown. However, lavaan needs a full matrix to proceed. The \code{cor2cov} function is the inverse of the \code{\link[stats]{cov2cor}} function, and scales a correlation matrix into a covariance matrix given the standard deviations of the variables. Optionally, variable names can be given. } \examples{ # The classic Wheaton et. al. (1977) model # panel data on he stability of alienation lower <- ' 11.834, 6.947, 9.364, 6.819, 5.091, 12.532, 4.783, 5.028, 7.495, 9.986, -3.839, -3.889, -3.841, -3.625, 9.610, -21.899, -18.831, -21.748, -18.775, 35.522, 450.288 ' # convert to a full symmetric covariance matrix with names wheaton.cov <- getCov(lower, names=c("anomia67","powerless67", "anomia71", "powerless71","education","sei")) # the model wheaton.model <- ' # measurement model ses =~ education + sei alien67 =~ anomia67 + powerless67 alien71 =~ anomia71 + powerless71 # equations alien71 ~ alien67 + ses alien67 ~ ses # correlated residuals anomia67 ~~ anomia71 powerless67 ~~ powerless71 ' # fitting the model fit <- sem(wheaton.model, sample.cov=wheaton.cov, sample.nobs=932) # showing the results summary(fit, standardized=TRUE) } lavaan/man/lavaanList-class.Rd0000644000176200001440000000703614622072672015775 0ustar liggesusers\name{lavaanList-class} \docType{class} \alias{lavaanList-class} \alias{coef,lavaanList-method} \alias{summary,lavaanList-method} \title{Class For Representing A List of (Fitted) Latent Variable Models} \description{The \code{lavaanList} class represents a collection of (fitted) latent variable models, for a (potentially large) number of datasets. It contains information about the model (which is always the same), and for every dataset a set of (user-specified) slots from a regular lavaan object.} \section{Objects from the Class}{ Objects can be created via the \code{\link{cfaList}}, \code{\link{semList}}, or \code{\link{lavaanList}} functions. } \section{Slots}{ \describe{ \item{\code{version}:}{The lavaan package version used to create this objects} \item{\code{call}:}{The function call as returned by \code{match.call()}.} \item{\code{Options}:}{Named list of options that were provided by the user, or filled-in automatically.} \item{\code{ParTable}:}{Named list describing the model parameters. Can be coerced to a data.frame. In the documentation, this is called the `parameter table'.} \item{\code{pta}:}{Named list containing parameter table attributes.} \item{\code{Data}:}{Object of internal class \code{"Data"}: information about the data.} \item{\code{Model}:}{Object of internal class \code{"Model"}: the internal (matrix) representation of the model} \item{\code{meta}:}{List containing additional flags. For internal use only.} \item{\code{timingList}:}{List. Timing slot per dataset.} \item{\code{ParTableList}:}{List. ParTable slot per dataset.} \item{\code{DataList}:}{List. Data slot per dataset.} \item{\code{SampleStatsList}:}{List. SampleStats slot per dataset.} \item{\code{CacheList}:}{List. Cache slot per dataset.} \item{\code{vcovList}:}{List. vcov slot per dataset.} \item{\code{testList}:}{List. test slot per dataset.} \item{\code{optimList}:}{List. optim slot per dataset.} \item{\code{impliedList}:}{List. implied slot per dataset.} \item{\code{h1List}:}{List. h1 slot per dataset.} \item{\code{loglikList}:}{List. loglik slot per dataset.} \item{\code{baselineList}:}{List. baseline slot per dataset.} \item{\code{funList}:}{List. fun slot per dataset.} \item{\code{internalList}:}{List. internal slot per dataset.} \item{\code{external}:}{List. Empty slot to be used by add-on packages.} } } \section{Methods}{ \describe{ \item{coef}{\code{signature(object = "lavaanList", type = "free")}: Returns the estimates of the parameters in the model as the columns in a matrix; each column corresponds to a different dataset. If \code{type="free"}, only the free parameters are returned. If \code{type="user"}, all parameters listed in the parameter table are returned, including constrained and fixed parameters.} \item{summary}{\code{signature(object = "lavaanList", header = TRUE, estimates = TRUE, nd = 3L)}: Print a summary of the collection of fitted models. If \code{header = TRUE}, the header section is printed. If \code{estimates = TRUE}, print the parameter estimates section. The argument \code{nd} determines the number of digits after the decimal point to be printed (currently only in the parameter estimates section.) Nothing is returned (use \code{parameterEstimates} or another extractor function to extract information from this object).} } } \seealso{ \code{\link{cfaList}}, \code{\link{semList}}, \code{\link{lavaanList}} } lavaan/man/lav_export_estimation.Rd0000644000176200001440000000666014622072672017215 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lav_export_estimation.R \name{lav_export_estimation} \alias{lav_export_estimation} \title{lav_export_estimation} \usage{ lav_export_estimation(lavaan_model) } \arguments{ \item{lavaan_model}{a fitted lavaan model} } \value{ List with: \itemize{ \item get_coef - When working with equality constraints, lavaan internally uses some transformations. get_coef is a functions that recreates the coef function for the parameters. \item starting_values - starting_values to be used in the optimization \item objective_function - objective function, expecting the current parameter values and the lavaan model \item gradient_function - gradient function, expecting the current parameter values and the lavaan model \item lower - lower bounds for parameters \item upper - upper bound for parameters } } \description{ lavaan provides a range of optimization methods with the optim.method argument (nlminb, BFGS, L-BFGS-B, GN, and nlminb.constr). `lav_export_estimation` allows exporting objects and functions necessary to pass a lavaan model into any optimizer that takes a combination of (1) starting values, (2) fit-function, (3) gradient-function, and (4) upper and lower bounds. This allows testing new optimization frameworks. } \examples{ library(lavaan) model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + y2 + y3 + y4 dem65 =~ y5 + a*y6 + y7 + y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 ' fit <- sem(model, data = PoliticalDemocracy, do.fit = FALSE) est <- lav_export_estimation(lavaan_model = fit) # The starting values are: est$starting_values # Note that these do not have labels (and may also differ from coef(fit) # in case of equality constraints): coef(fit) # To get the same parameters, use: est$get_coef(parameter_values = est$starting_values, lavaan_model = fit) # The objective function can be used to compute the fit at the current estimates: est$objective_function(parameter_values = est$starting_values, lavaan_model = fit) # The gradient function can be used to compute the gradients at the current estimates: est$gradient_function(parameter_values = est$starting_values, lavaan_model = fit) # Together, these elements provide the means to estimate the parameters with a large # range of optimizers. For simplicity, here is an example using optim: est_fit <- optim(par = est$starting_values, fn = est$objective_function, gr = est$gradient_function, lavaan_model = fit, method = "BFGS") est$get_coef(parameter_values = est_fit$par, lavaan_model = fit) # This is identical to coef(sem(model, data = PoliticalDemocracy)) # Example using ridge regularization for parameter a fn_ridge <- function(parameter_values, lavaan_model, est, lambda){ return(est$objective_function(parameter_values = parameter_values, lavaan_model = lavaan_model) + lambda * parameter_values[6]^2) } ridge_fit <- optim(par = est$get_coef(est$starting_values, lavaan_model = fit), fn = fn_ridge, lavaan_model = fit, est = est, lambda = 10) est$get_coef(parameter_values = ridge_fit$par, lavaan_model = fit) } lavaan/man/cfa.Rd0000644000176200001440000001325114622072672013321 0ustar liggesusers\name{cfa} \alias{cfa} \title{Fit Confirmatory Factor Analysis Models} \description{ Fit a Confirmatory Factor Analysis (CFA) model.} \usage{ cfa(model = NULL, data = NULL, ordered = NULL, sampling.weights = NULL, sample.cov = NULL, sample.mean = NULL, sample.th = NULL, sample.nobs = NULL, group = NULL, cluster = NULL, constraints = "", WLS.V = NULL, NACOV = NULL, ov.order = "model", ...) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{data}{An optional data frame containing the observed variables used in the model. If some variables are declared as ordered factors, lavaan will treat them as ordinal variables.} \item{ordered}{Character vector. Only used if the data is in a data.frame. Treat these variables as ordered (ordinal) variables, if they are endogenous in the model. Importantly, all other variables will be treated as numeric (unless they are declared as ordered in the data.frame.) Since 0.6-4, ordered can also be logical. If TRUE, all observed endogenous variables are treated as ordered (ordinal). If FALSE, all observed endogenous variables are considered to be numeric (again, unless they are declared as ordered in the data.frame.)} \item{sampling.weights}{A variable name in the data frame containing sampling weight information. Currently only available for non-clustered data. Depending on the \code{sampling.weights.normalization} option, these weights may be rescaled (or not) so that their sum equals the number of observations (total or per group).} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. For a multiple group analysis, a list with a variance-covariance matrix for each group.} \item{sample.mean}{A sample mean vector. For a multiple group analysis, a list with a mean vector for each group.} \item{sample.th}{Vector of sample-based thresholds. For a multiple group analysis, a list with a vector of thresholds for each group.} \item{sample.nobs}{Number of observations if the full data frame is missing and only sample moments are given. For a multiple group analysis, a list or a vector with the number of observations for each group.} \item{group}{Character. A variable name in the data frame defining the groups in a multiple group analysis.} \item{cluster}{Character. A (single) variable name in the data frame defining the clusters in a two-level dataset.} \item{constraints}{Additional (in)equality constraints not yet included in the model syntax. See \code{\link{model.syntax}} for more information.} \item{WLS.V}{A user provided weight matrix to be used by estimator \code{"WLS"}; if the estimator is \code{"DWLS"}, only the diagonal of this matrix will be used. For a multiple group analysis, a list with a weight matrix for each group. The elements of the weight matrix should be in the following order (if all data is continuous): first the means (if a meanstructure is involved), then the lower triangular elements of the covariance matrix including the diagonal, ordered column by column. In the categorical case: first the thresholds (including the means for continuous variables), then the slopes (if any), the variances of continuous variables (if any), and finally the lower triangular elements of the correlation/covariance matrix excluding the diagonal, ordered column by column.} \item{NACOV}{A user provided matrix containing the elements of (N times) the asymptotic variance-covariance matrix of the sample statistics. For a multiple group analysis, a list with an asymptotic variance-covariance matrix for each group. See the \code{WLS.V} argument for information about the order of the elements.} \item{ov.order}{Character. If \code{"model"} (the default), the order of the observed variable names (as reflected for example in the output of \code{lavNames()}) is determined by the model syntax. If \code{"data"}, the order is determined by the data (either the full data.frame or the sample (co)variance matrix). If the \code{WLS.V} and/or \code{NACOV} matrices are provided, this argument is currently set to \code{"data"}.} \item{...}{Many more additional options can be defined, using 'name = value'. See \code{\link{lavOptions}} for a complete list.} } \details{ The \code{cfa} function is a wrapper for the more general \code{\link{lavaan}} function, using the following default arguments: \code{int.ov.free = TRUE}, \code{int.lv.free = FALSE}, \code{auto.fix.first = TRUE} (unless \code{std.lv = TRUE}), \code{auto.fix.single = TRUE}, \code{auto.var = TRUE}, \code{auto.cov.lv.x = TRUE}, \code{auto.efa = TRUE}, \code{auto.th = TRUE}, \code{auto.delta = TRUE}, and \code{auto.cov.y = TRUE}. } \value{ An object of class \code{\linkS4class{lavaan}}, for which several methods are available, including a \code{summary} method. } \references{ Yves Rosseel (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \doi{https://doi.org/10.18637/jss.v048.i02}} \seealso{ \code{\link{lavaan}} } \examples{ ## The famous Holzinger and Swineford (1939) example HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) summary(fit, fit.measures = TRUE) } lavaan/man/efa.Rd0000644000176200001440000001214614622072672013325 0ustar liggesusers\name{efa} \alias{efa} \alias{rotation} \title{Exploratory Factor Analysis} \description{ Fit one or more Exploratory Factor Analysis (EFA) model(s).} \usage{ efa(data = NULL, nfactors = 1L, sample.cov = NULL, sample.nobs = NULL, rotation = "geomin", rotation.args = list(), ov.names = names(data), bounds = "pos.var", ..., output = "efa") } \arguments{ \item{data}{A data frame containing the observed variables we need for the EFA. If only a subset of the observed variables is needed, use the \code{ov.names} argument.} \item{nfactors}{Integer or Integer vector. The desired number of factors to extract. Can be a single number, or a vector of numbers (e.g., \code{nfactors = 1:4}.), For each different number, a model is fitted.} \item{sample.cov}{Numeric matrix. A sample variance-covariance matrix. The rownames and/or colnames must contain the observed variable names. Unlike sem and CFA, the matrix may be a correlation matrix.} \item{sample.nobs}{Number of observations if the full data frame is missing and only the sample variance-covariance matrix is given.} \item{rotation}{Character. The rotation method to be used. Possible options are varimax, quartimax, orthomax, oblimin, quartimin, geomin, promax, entropy, mccammon, infomax, tandem1, tandem2, oblimax, bentler, simplimax, target, pst (=partially specified target), cf, crawford-ferguson, cf-quartimax, cf-varimax, cf-equamax, cf-parsimax, cf-facparsim, biquartimin, bigeomin. The latter two are for bifactor rotation only. The rotation algorithms (except promax) are similar to those from the GPArotation package, but have been reimplemented for better control. The promax method is taken from the stats package.} \item{rotation.args}{List. Options related to the rotation algorithm. The default options (and their alternatives) are \code{orthogonal = FALSE}, \code{row.weights = "default"} (or \code{"kaiser"}, \code{"cureton.mulaik"} or \code{"none"}), \code{std.ov = TRUE}, \code{algorithm = "gpa"} (or \code{"pairwise"}), \code{rstarts = 30}, \code{gpa.tol = 1e-05}, \code{tol = 1e-08}, \code{max.iter = 10000L}, \code{warn = FALSE}, \code{verbose = FALSE}, \code{reflect = TRUE}, \code{order.lv.by = "index"} (or \code{"sumofsquares"} or \code{"none"}). Other options are specific for a particular rotation criterion: \code{geomin.epsilon = 0.001}, \code{orthomax.gamma = 1}, \code{promax.kappa = 4}, \code{cf.gamma = 0}, and \code{oblimin.gamma = 0}.} \item{ov.names}{Character vector. The variables names that are needed for the EFA. Should be a subset of the variables names in the data.frame. By default, all the variables in the data are used.} \item{bounds}{Per default, \code{bounds = "pos.var"} forces all variances of both observed and latent variables to be strictly nonnegative. See the entry in \code{\link{lavOptions}} for more options.} \item{...}{Aditional options to be passed to lavaan, using 'name = value'. See \code{\link{lavOptions}} for a complete list.} \item{output}{Character. If \code{"efa"} (the default), the output mimics the typical output of an EFA. If \code{"lavaan"}, a lavaan object returned. The latter is only possible if nfactors contains a single (integer) number.} } \details{ The \code{efa} function is essentially a wrapper around the \code{lavaan} function. It generates the model syntax (for a given number of factors) and then calls \code{lavaan()} treating the factors as a single block that should be rotated. The function only supports a single group. Categorical data is handled as usual by first computing an appropriate (e.g., tetrachoric or polychoric) correlation matrix, which is then used as input for the EFA. There is also (limited) support for twolevel data. The same number of factors is then extracted at the within and the between level. The promax rotation method (taken from the stats package) is only provided for convenience. Because promax is a two-step algorithm (first varimax, then oblique rotation to get simple structure), it does not use the gpa or pairwise rotation algorithms, and as a result, no standard errors are provided. } \value{ If \code{output = "lavaan"}, an object of class \code{\linkS4class{lavaan}}. If \code{output = "efa"}, a list of class \code{efaList} for which a \code{print()}, \code{summary()} and \code{fitMeasures()} method are available. Because we added the (standardized) loadings as an extra element, the \code{loadings} function (which is not a generic function) from the stats package will also work on \code{efaList} objects. } \seealso{ \code{\link{summary.efaList}} for a summary method if the output is of class \code{efaList}. } \examples{ ## The famous Holzinger and Swineford (1939) example fit <- efa(data = HolzingerSwineford1939, ov.names = paste("x", 1:9, sep = ""), nfactors = 1:3, rotation = "geomin", rotation.args = list(geomin.epsilon = 0.01, rstarts = 1)) summary(fit, nd = 3L, cutoff = 0.2, dot.cutoff = 0.05) fitMeasures(fit, fit.measures = "all") } lavaan/man/estfun.Rd0000644000176200001440000000445114622072672014076 0ustar liggesusers\name{estfun} \alias{estfun.lavaan} \alias{lavScores} \title{Extract Empirical Estimating Functions} \description{ A function for extracting the empirical estimating functions of a fitted lavaan model. This is the derivative of the objective function with respect to the parameter vector, evaluated at the observed (case-wise) data. In other words, this function returns the case-wise scores, evaluated at the fitted model parameters. } \usage{ estfun.lavaan(object, scaling = FALSE, ignore.constraints = FALSE, remove.duplicated = TRUE, remove.empty.cases = TRUE) lavScores(object, scaling = FALSE, ignore.constraints = FALSE, remove.duplicated = TRUE, remove.empty.cases = TRUE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{scaling}{Only used for the ML estimator. If \code{TRUE}, the scores are scaled to reflect the specific objective function used by lavaan. If \code{FALSE} (the default), the objective function is the loglikelihood function assuming multivariate normality.} \item{ignore.constraints}{Logical. If \code{TRUE}, the scores do not reflect the (equality or inequality) constraints. If \code{FALSE}, the scores are computed by taking the unconstrained scores, and adding the term \code{t(R) lambda}, where \code{lambda} are the (case-wise) Lagrange Multipliers, and \code{R} is the Jacobian of the constraint function. Only in the latter case will the sum of the columns be (almost) equal to zero.} \item{remove.duplicated}{If \code{TRUE}, and all the equality constraints have a simple form (eg. a == b), the unconstrained scores are post-multiplied with a transformation matrix in order to remove the duplicated parameters.} \item{remove.empty.cases}{If \code{TRUE}, empty cases with only missing values will be removed from the output.} } \value{A n x k matrix corresponding to n observations and k parameters.} \author{Ed Merkle for the ML case; the \code{remove.duplicated}, \code{ignore.constraints} and \code{remove.empty.cases} arguments were added by Yves Rosseel; Franz Classe for the WLS case.} \examples{ ## The famous Holzinger and Swineford (1939) example HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) head(lavScores(fit)) } lavaan/man/lavTestScore.Rd0000644000176200001440000001164214622072672015210 0ustar liggesusers\name{lavTestScore} \alias{lavTestScore} \alias{lavtestscore} \alias{score} \alias{Score} \alias{lavScoreTest} \title{Score test} \description{ Score test (or Lagrange Multiplier test) for releasing one or more fixed or constrained parameters in model.} \usage{ lavTestScore(object, add = NULL, release = NULL, univariate = TRUE, cumulative = FALSE, epc = FALSE, standardized = epc, cov.std = epc, verbose = FALSE, warn = TRUE, information = "expected") } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{add}{Either a character string (typically between single quotes) or a parameter table containing additional (currently fixed-to-zero) parameters for which the score test must be computed.} \item{release}{Vector of Integers. The indices of the constraints that should be released. The indices correspond to the order of the equality constraints as they appear in the parameter table.} \item{univariate}{Logical. If \code{TRUE}, compute the univariate score statistics, one for each constraints.} \item{cumulative}{Logical. If \code{TRUE}, order the univariate score statistics from large to small, and compute a series of multivariate score statistics, each time adding an additional constraint.} \item{epc}{Logical. If \code{TRUE}, and we are releasing existing constraints, compute the expected parameter changes for the existing (free) parameters, for each released constraint.} \item{standardized}{If \code{TRUE}, two extra columns (sepc.lv and sepc.all) in the \code{$epc} table will contain standardized values for the EPCs. In the first column (sepc.lv), standardization is based on the variances of the (continuous) latent variables. In the second column (sepc.all), standardization is based on both the variances of both (continuous) observed and latent variables. (Residual) covariances are standardized using (residual) variances.} \item{cov.std}{Logical. See \code{\link{standardizedSolution}}.} \item{verbose}{Logical. Not used for now.} \item{warn}{Logical. If \code{TRUE}, print out warnings if they occur.} \item{information}{\code{character} indicating the type of information matrix to use (check \code{\link{lavInspect}} for available options). \code{"expected"} information is the default, which provides better control of Type I errors.} } \details{ This function can be used to compute both multivariate and univariate score tests. There are two modes: 1) releasing fixed-to-zero parameters (using the \code{add} argument), and 2) releasing existing equality constraints (using the \code{release} argument). The two modes can not be used simultaneously. When adding new parameters, they should not already be part of the model (i.e. not listed in the parameter table). If you want to test for a parameter that was explicitly fixed to a constant (say to zero), it is better to label the parameter, and use an explicit equality constraint. } \value{ A list containing at least one \code{data.frame}: \itemize{ \item{\code{$test}: The total score test, with columns for the score test statistic (\code{X2}), the degrees of freedom (\code{df}), and a \emph{p} value under the \eqn{\chi^2} distribution (\code{p.value}).} \item{\code{$uni}: Optional (if \code{univariate=TRUE}). Each 1-\emph{df} score test, equivalent to modification indices. If \code{epc=TRUE} when \code{add}ing parameters (not when releasing constraints), an unstandardized EPC is provided for each added parameter, as would be returned by \code{\link{modificationIndices}}.} \item{\code{$cumulative}: Optional (if \code{cumulative=TRUE}). Cumulative score tests.} \item{\code{$epc}: Optional (if \code{epc=TRUE}). Parameter estimates, expected parameter changes, and expected parameter values if all the tested constraints were freed.} } } \references{ Bentler, P. M., & Chou, C. P. (1993). Some new covariance structure model improvement statistics. Sage Focus Editions, 154, 235-255. } \examples{ HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 b1 == b2 b2 == b3 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) # test 1: release both two equality constraints lavTestScore(fit, cumulative = TRUE) # test 2: the score test for adding two (currently fixed # to zero) cross-loadings newpar = ' visual =~ x9 textual =~ x3 ' lavTestScore(fit, add = newpar) # equivalently, "add" can be a parameter table specifying parameters to free, # but must include some additional information: PT.add <- data.frame(lhs = c("visual","textual"), op = c("=~","=~"), rhs = c("x9","x3"), user = 10L, # needed to identify new parameters free = 1, # arbitrary numbers > 0 start = 0) # null-hypothesized value PT.add lavTestScore(fit, add = PT.add) # same result as above } lavaan/man/HolzingerSwineford1939.Rd0000644000176200001440000000361314622072672016741 0ustar liggesusers\name{HolzingerSwineford1939} \alias{HolzingerSwineford1939} \docType{data} \title{ Holzinger and Swineford Dataset (9 Variables) } \description{ The classic Holzinger and Swineford (1939) dataset consists of mental ability test scores of seventh- and eighth-grade children from two different schools (Pasteur and Grant-White). In the original dataset (available in the \code{MBESS} package), there are scores for 26 tests. However, a smaller subset with 9 variables is more widely used in the literature (for example in Joreskog's 1969 paper, which also uses the 145 subjects from the Grant-White school only). } \usage{data(HolzingerSwineford1939)} \format{ A data frame with 301 observations of 15 variables. \describe{ \item{\code{id}}{Identifier} \item{\code{sex}}{Gender} \item{\code{ageyr}}{Age, year part} \item{\code{agemo}}{Age, month part} \item{\code{school}}{School (Pasteur or Grant-White)} \item{\code{grade}}{Grade} \item{\code{x1}}{Visual perception} \item{\code{x2}}{Cubes} \item{\code{x3}}{Lozenges} \item{\code{x4}}{Paragraph comprehension} \item{\code{x5}}{Sentence completion} \item{\code{x6}}{Word meaning} \item{\code{x7}}{Speeded addition} \item{\code{x8}}{Speeded counting of dots} \item{\code{x9}}{Speeded discrimination straight and curved capitals} } } \source{ This dataset was originally retrieved from \verb{http://web.missouri.edu/~kolenikovs/stata/hs-cfa.dta} (link no longer active) and converted to an R dataset. } \references{ Holzinger, K., and Swineford, F. (1939). A study in factor analysis: The stability of a bifactor solution. Supplementary Educational Monograph, no. 48. Chicago: University of Chicago Press. Joreskog, K. G. (1969). A general approach to confirmatory maximum likelihood factor analysis. \emph{Psychometrika}, 34, 183-202. } \seealso{ \code{\link{cfa}} } \examples{ head(HolzingerSwineford1939) } lavaan/man/lavPredictY.Rd0000644000176200001440000001030714624603671015016 0ustar liggesusers\name{lavPredictY} \alias{lavPredictY} \title{Predict the values of y-variables given the values of x-variables} \description{ This function can be used to predict the values of (observed) y-variables given the values of (observed) x-variables in a structural equation model.} \usage{ lavPredictY(object, newdata = NULL, ynames = lavNames(object, "ov.y"), xnames = lavNames(object, "ov.x"), method = "conditional.mean", label = TRUE, assemble = TRUE, force.zero.mean = FALSE, lambda = 0) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{newdata}{An optional data.frame, containing the same variables as the data.frame that was used when fitting the model in \code{object}. This data.frame should also include the y-variables (although their values will be ignored). Note that if no meanstructure was used in the original fit, we will use the saturated sample means of the original fit as substitutes for the model-implied means. Alternatively, refit the model using \code{meanstructure = TRUE}.} \item{ynames}{The names of the observed variables that should be treated as the y-variables. It is for these variables that the function will predict the (model-based) values for each observation. Can also be a list to allow for a separate set of variable names per group (or block).} \item{xnames}{The names of the observed variables that should be treated as the x-variables. Can also be a list to allow for a separate set of variable names per group (or block).} \item{method}{A character string. The only available option for now is \code{"conditional.mean"}. See Details.} \item{label}{Logical. If TRUE, the columns of the output are labeled.} \item{assemble}{Logical. If TRUE, the predictions of the separate multiple groups in the output are reassembled again to form a single data.frame with a group column, having the same dimensions as the original (or newdata) dataset.} \item{force.zero.mean}{Logical. Only relevant if there is no mean structure. If \code{TRUE}, the (model-implied) mean vector is set to the zero vector. If \code{FALSE}, the (model-implied) mean vector is set to the (unrestricted) sample mean vector.} \item{lambda}{Numeric. A lambda regularization penalty term.} } \details{ This function can be used for (SEM-based) out-of-sample predictions of outcome (y) variables, given the values of predictor (x) variables. This is in contrast to the \code{lavPredict()} function which (historically) only `predicts' the (factor) scores for latent variables, ignoring the structural part of the model. When \code{method = "conditional.mean"}, predictions (for y given x) are based on the (joint y and x) model-implied variance-covariance (Sigma) matrix and mean vector (Mu), and the standard expression for the conditional mean of a multivariate normal distribution. Note that if the model is saturated (and hence df = 0), the SEM-based predictions are identical to ordinary least squares predictions. Lambda is a regularization penalty term to improve prediction accuracy that can be determined using the \code{lavPredictY_cv} function. } \seealso{ \code{\link{lavPredict}} to compute scores for latent variables. \code{\link{lavPredictY_cv}} to determine an optimal lambda to increase prediction accuracy. } \references{ de Rooij, M., Karch, J.D., Fokkema, M., Bakk, Z., Pratiwi, B.C, and Kelderman, H. (2022) SEM-Based Out-of-Sample Predictions, Structural Equation Modeling: A Multidisciplinary Journal. DOI:10.1080/10705511.2022.2061494 Molina, M. D., Molina, L., & Zappaterra, M. W. (2024). Aspects of Higher Consciousness: A Psychometric Validation and Analysis of a New Model of Mystical Experience. \doi{https://doi.org/10.31219/osf.io/cgb6e} } \examples{ model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fit <- sem(model, data = PoliticalDemocracy) lavPredictY(fit, ynames = c("y5", "y6", "y7", "y8"), xnames = c("x1", "x2", "x3", "y1", "y2", "y3", "y4")) } lavaan/man/lavListInspect.Rd0000644000176200001440000001253414622072672015537 0ustar liggesusers\name{lavListInspect} \alias{lavListInspect} \alias{lavListTech} \title{Inspect or extract information from a lavaanList object} \description{ The \code{lavListInspect()} and \code{lavListTech()} functions can be used to inspect/extract information that is stored inside (or can be computed from) a lavaanList object. } \usage{ lavListInspect(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) lavListTech(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaanList}}.} \item{what}{Character. What needs to be inspected/extracted? See Details for a full list. Note: the \code{what} argument is not case-sensitive (everything is converted to lower case.)} \item{add.labels}{If \code{TRUE}, variable names are added to the vectors and/or matrices.} \item{add.class}{If \code{TRUE}, vectors are given the `lavaan.vector' class; matrices are given the `lavaan.matrix' class, and symmetric matrices are given the `lavaan.matrix.symmetric' class. This only affects the way they are printed on the screen.} \item{list.by.group}{Logical. Only used when the output are model matrices. If \code{TRUE}, the model matrices are nested within groups. If \code{FALSE}, a flattened list is returned containing all model matrices, with repeated names for multiple groups.} \item{drop.list.single.group}{If \code{FALSE}, the results are returned as a list, where each element corresponds to a group (even if there is only a single group.) If \code{TRUE}, the list will be unlisted if there is only a single group.} } \details{ The \code{lavListInspect()} and \code{lavListTech()} functions only differ in the way they return the results. The \code{lavListInspect()} function will prettify the output by default, while the \code{lavListTech()} will not attempt to prettify the output by default. Below is a list of possible values for the \code{what} argument, organized in several sections: Model matrices: \describe{ \item{\code{"free"}:}{A list of model matrices. The non-zero integers represent the free parameters. The numbers themselves correspond to the position of the free parameter in the parameter vector. This determines the order of the model parameters in the output of for example \code{coef()} and \code{vcov()}.} \item{\code{"partable"}:}{A list of model matrices. The non-zero integers represent both the fixed parameters (for example, factor loadings fixed at 1.0), and the free parameters if we ignore any equality constraints. They correspond with all entries (fixed or free) in the parameter table. See \code{\link{parTable}}.} \item{\code{"start"}:}{A list of model matrices. The values represent the starting values for all model parameters. Alias: \code{"starting.values"}.} } Information about the data (including missing patterns): \describe{ \item{\code{"group"}:}{A character string. The group variable in the data.frame (if any).} \item{\code{"ngroups"}:}{Integer. The number of groups.} \item{\code{"group.label"}:}{A character vector. The group labels.} \item{\code{"level.label"}:}{A character vector. The level labels.} \item{\code{"cluster"}:}{A character vector. The cluster variable(s) in the data.frame (if any).} \item{\code{"nlevels"}:}{Integer. The number of levels.} \item{\code{"ordered"}:}{A character vector. The ordered variables.} \item{\code{"nobs"}:}{Integer vector. The number of observations in each group that were used in the analysis (in each dataset).} \item{\code{"norig"}:}{Integer vector. The original number of observations in each group (in each dataset).} \item{\code{"ntotal"}:}{Integer. The total number of observations that were used in the analysis. If there is just a single group, this is the same as the \code{"nobs"} option; if there are multiple groups, this is the sum of the \code{"nobs"} numbers for each group (in each dataset).} } Model features: \describe{ \item{\code{"meanstructure"}:}{Logical. \code{TRUE} if a meanstructure was included in the model.} \item{\code{"categorical"}:}{Logical. \code{TRUE} if categorical endogenous variables were part of the model.} \item{\code{"fixed.x"}:}{Logical. \code{TRUE} if the exogenous x-covariates are treated as fixed.} \item{\code{"parameterization"}:}{Character. Either \code{"delta"} or \code{"theta"}.} } \describe{ \item{\code{"list"}:}{The parameter table. The same output as given by \code{parTable()}.} \item{\code{"options"}:}{List. The option list.} \item{\code{"call"}:}{List. The call as returned by match.call, coerced to a list.} } } \seealso{ \code{\link{lavaanList}} } \examples{ # fit model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' # a data generating function generateData <- function() simulateData(HS.model, sample.nobs = 100) set.seed(1234) fit <- semList(HS.model, dataFunction = generateData, ndat = 5, store.slots = "partable") # extract information lavListInspect(fit, "free") lavListTech(fit, "free") } lavaan/man/model.syntax.Rd0000644000176200001440000005774214622072672015232 0ustar liggesusers\name{model.syntax} \alias{model.syntax} \alias{lavaanify} \alias{lavParTable} \alias{lavpartable} \alias{lavPartable} \alias{parseModelString} \alias{lavParseModelString} \title{The Lavaan Model Syntax} \description{ The lavaan model syntax describes a latent variable model. The function \code{lavaanify} turns it into a table that represents the full model as specified by the user. We refer to this table as the parameter table.} \usage{ lavaanify(model = NULL, meanstructure = FALSE, int.ov.free = FALSE, int.lv.free = FALSE, marker.int.zero = FALSE, orthogonal = FALSE, orthogonal.y = FALSE, orthogonal.x = FALSE, orthogonal.efa = FALSE, std.lv = FALSE, correlation = FALSE, effect.coding = "", conditional.x = FALSE, fixed.x = FALSE, parameterization = "delta", constraints = NULL, ceq.simple = FALSE, auto = FALSE, model.type = "sem", auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = FALSE, auto.cov.lv.x = FALSE, auto.cov.y = FALSE, auto.th = FALSE, auto.delta = FALSE, auto.efa = FALSE, varTable = NULL, ngroups = 1L, nthresholds = NULL, group.equal = NULL, group.partial = NULL, group.w.free = FALSE, debug = FALSE, warn = TRUE, as.data.frame. = TRUE) lavParTable(model = NULL, meanstructure = FALSE, int.ov.free = FALSE, int.lv.free = FALSE, marker.int.zero = FALSE, orthogonal = FALSE, orthogonal.y = FALSE, orthogonal.x = FALSE, orthogonal.efa = FALSE, std.lv = FALSE, correlation = FALSE, effect.coding = "", conditional.x = FALSE, fixed.x = FALSE, parameterization = "delta", constraints = NULL, ceq.simple = FALSE, auto = FALSE, model.type = "sem", auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = FALSE, auto.cov.lv.x = FALSE, auto.cov.y = FALSE, auto.th = FALSE, auto.delta = FALSE, auto.efa = FALSE, varTable = NULL, ngroups = 1L, nthresholds = NULL, group.equal = NULL, group.partial = NULL, group.w.free = FALSE, debug = FALSE, warn = TRUE, as.data.frame. = TRUE) lavParseModelString(model.syntax = '', as.data.frame. = FALSE, parser = "new", warn = TRUE, debug = FALSE) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax; see details for more information. Alternatively, a parameter table (e.g., the output of \code{lavParseModelString} is also accepted.} \item{model.syntax}{The model syntax specifying the model. Must be a literal string.} \item{meanstructure}{If \code{TRUE}, intercepts/means will be added to the model both for both observed and latent variables.} \item{int.ov.free}{If \code{FALSE}, the intercepts of the observed variables are fixed to zero.} \item{int.lv.free}{If \code{FALSE}, the intercepts of the latent variables are fixed to zero.} \item{marker.int.zero}{Logical. Only relevant if the metric of each latent variable is set by fixing the first factor loading to unity. If \code{TRUE}, it implies \code{meanstructure = TRUE} and \code{std.lv = FALSE}, and it fixes the intercepts of the marker indicators to zero, while freeing the means/intercepts of the latent variables. Only works correcly for single group, single level models.} \item{orthogonal}{If \code{TRUE}, all covariances among latent variables are set to zero.} \item{orthogonal.y}{If \code{TRUE}, all covariances among endogenous latent variables only are set to zero.} \item{orthogonal.x}{If \code{TRUE}, all covariances among exogenous latent variables only are set to zero.} \item{orthogonal.efa}{If \code{TRUE}, all covariances among latent variables involved in rotation only are set to zero.} \item{std.lv}{If \code{TRUE}, the metric of each latent variable is determined by fixing their variances to 1.0. If \code{FALSE}, the metric of each latent variable is determined by fixing the factor loading of the first indicator to 1.0. If there are multiple groups, \code{std.lv = TRUE} and \code{"loadings"} is included in the \code{group.label} argument, then only the latent variances i of the first group will be fixed to 1.0, while the latent variances of other groups are set free.} \item{correlation}{If \code{TRUE}, a correlation structure is fitted. For continuous data, this implies that the (residual) variances are no longer parameters of the model.} \item{effect.coding}{Can be logical or character string. If logical and \code{TRUE}, this implies \code{effect.coding = c("loadings", "intercepts")}. If logical and \code{FALSE}, it is set equal to the empty string. If \code{"loadings"} is included, equality constraints are used so that the average of the factor loadings (per latent variable) equals 1. Note that this should not be used together with \code{std.lv = TRUE}. If \code{"intercepts"} is included, equality constraints are used so that the sum of the intercepts (belonging to the indicators of a single latent variable) equals zero. As a result, the latent mean will be freely estimated and usually equal the average of the means of the involved indicators.} \item{conditional.x}{If \code{TRUE}, we set up the model conditional on the exogenous `x' covariates; the model-implied sample statistics only include the non-x variables. If \code{FALSE}, the exogenous `x' variables are modeled jointly with the other variables, and the model-implied statistics refect both sets of variables.} \item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered fixed variables and the means, variances and covariances of these variables are fixed to their sample values. If \code{FALSE}, they are considered random, and the means, variances and covariances are free parameters.} \item{parameterization}{Currently only used if data is categorical. If \code{"delta"}, the delta parameterization is used. If \code{"theta"}, the theta parameterization is used.} \item{constraints}{Additional (in)equality constraints. See details for more information.} \item{ceq.simple}{If \code{TRUE}, and no other general constraints are used in the model, simple equality constraints are represented in the parameter table as duplicated free parameters (instead of extra rows with \code{op = "=="}).} \item{auto}{If \code{TRUE}, the default values are used for the auto.* arguments, depending on the value of \code{model.type}.} \item{model.type}{Either \code{"sem"} or \code{"growth"}; only used if \code{auto=TRUE}.} \item{auto.fix.first}{If \code{TRUE}, the factor loading of the first indicator is set to 1.0 for every latent variable.} \item{auto.fix.single}{If \code{TRUE}, the residual variance (if included) of an observed indicator is set to zero if it is the only indicator of a latent variable.} \item{auto.var}{If \code{TRUE}, the (residual) variances of both observed and latent variables are set free.} \item{auto.cov.lv.x}{If \code{TRUE}, the covariances of exogenous latent variables are included in the model and set free.} \item{auto.cov.y}{If \code{TRUE}, the covariances of dependent variables (both observed and latent) are included in the model and set free.} \item{auto.th}{If \code{TRUE}, thresholds for limited dependent variables are included in the model and set free.} \item{auto.delta}{If \code{TRUE}, response scaling parameters for limited dependent variables are included in the model and set free.} \item{auto.efa}{If \code{TRUE}, the necessary constraints are imposed to make the (unrotated) exploratory factor analysis blocks identifiable: for each block, factor variances are set to 1, factor covariances are constrained to be zero, and factor loadings are constrained to follow an echelon pattern.} \item{varTable}{The variable table containing information about the observed variables in the model.} \item{ngroups}{The number of (independent) groups.} \item{nthresholds}{Either a single integer or a named vector of integers. If \code{nthresholds} is a single integer, all endogenous variables are assumed to be ordered with \code{nthresholds} indicating the number of thresholds needed in the model. If \code{nthresholds} is a named vector, it indicates the number of thresholds for these ordered variables only. This argument should not be used in combination with varTable.} \item{group.equal}{A vector of character strings. Only used in a multiple group analysis. Can be one or more of the following: \code{"loadings"}, \code{"intercepts"}, \code{"means"}, \code{"regressions"}, \code{"residuals"} or \code{"covariances"}, specifying the pattern of equality constraints across multiple groups. When (in the model syntax) a vector of labels is used as a modifier for a certain parameter, this will override the group.equal setting if it applies to this parameter. See also the Multiple groups section below for using modifiers in multiple groups.} \item{group.partial}{A vector of character strings containing the labels of the parameters which should be free in all groups (thereby overriding the group.equal argument for some specific parameters).} \item{group.w.free}{Logical. If \code{TRUE}, the group frequencies are considered to be free parameters in the model. In this case, a Poisson model is fitted to estimate the group frequencies. If \code{FALSE} (the default), the group frequencies are fixed to their observed values.} \item{as.data.frame.}{If \code{TRUE}, return the list of model parameters as a \code{data.frame}.} \item{parser}{Character. If \code{"old"}, use the original/classic parser. If \code{"new"}, use the new/ldw parser. The default is \code{"new"}.} \item{warn}{If \code{TRUE}, some (possibly harmless) warnings are printed out.} \item{debug}{If \code{TRUE}, debugging information is printed out.} } \details{ The model syntax consists of one or more formula-like expressions, each one describing a specific part of the model. The model syntax can be read from a file (using \code{\link{readLines}}), or can be specified as a literal string enclosed by single quotes as in the example below. \preformatted{myModel <- ' # 1. latent variable definitions f1 =~ y1 + y2 + y3 f2 =~ y4 + y5 + y6 f3 =~ y7 + y8 + y9 + y10 f4 =~ y11 + y12 + y13 ! this is also a comment # 2. regressions f1 ~ f3 + f4 f2 ~ f4 y1 + y2 ~ x1 + x2 + x3 # 3. (co)variances y1 ~~ y1 y2 ~~ y4 + y5 f1 ~~ f2 # 4. intercepts f1 ~ 1; y5 ~ 1 # 5. thresholds y11 | t1 + t2 + t3 y12 | t1 y13 | t1 + t2 # 6. scaling factors y11 ~*~ y11 y12 ~*~ y12 y13 ~*~ y13 # 7. formative factors f5 <~ z1 + z2 + z3 + z4 ' } Blank lines and comments can be used in between the formulas, and formulas can be split over multiple lines. Both the sharp (#) and the exclamation (!) characters can be used to start a comment. Multiple formulas can be placed on a single line if they are separated by a semicolon (;). There can be seven types of formula-like expressions in the model syntax: \enumerate{ \item Latent variable definitions: The \code{"=~"} operator can be used to define (continuous) latent variables. The name of the latent variable is on the left of the \code{"=~"} operator, while the terms on the right, separated by \code{"+"} operators, are the indicators of the latent variable. The operator \code{"=~"} can be read as ``is manifested by''. \item Regressions: The \code{"~"} operator specifies a regression. The dependent variable is on the left of a \code{"~"} operator and the independent variables, separated by \code{"+"} operators, are on the right. These regression formulas are similar to the way ordinary linear regression formulas are used in R, but they may include latent variables. Interaction terms are currently not supported. \item Variance-covariances: The \code{"~~"} (`double tilde') operator specifies (residual) variances of an observed or latent variable, or a set of covariances between one variable, and several other variables (either observed or latent). Several variables, separated by \code{"+"} operators can appear on the right. This way, several pairwise (co)variances involving the same left-hand variable can be expressed in a single expression. The distinction between variances and residual variances is made automatically. \item Intercepts: A special case of a regression formula can be used to specify an intercept (or a mean) of either an observed or a latent variable. The variable name is on the left of a \code{"~"} operator. On the right is only the number \code{"1"} representing the intercept. Including an intercept formula in the model automatically implies \code{meanstructure = TRUE}. The distinction between intercepts and means is made automatically. \item Thresholds: The \code{"|"} operator can be used to define the thresholds of categorical endogenous variables (on the left hand side of the operator). By convention, the thresholds (on the right hand sided, separated by the \code{"+"} operator, are named \code{"t1"}, \code{"t2"}, etcetera. \item Scaling factors: The \code{"~*~"} operator defines a scale factor. The variable name on the left hand side must be the same as the variable name on the right hand side. Scale factors are used in the Delta parameterization, in a multiple group analysis when factor indicators are categorical. \item Formative factors: The \code{"<~"} operator can be used to define a formative factor (on the right hand side of the operator), in a similar way to how a reflexive factor is defined (using the \code{"=~"} operator). This is just syntax sugar to define a phantom latent variable (equivalent to using \code{"f =~ 0"}). And in addition, the (residual) variance of the formative factor is fixed to zero. } There are 4 additional operators, also with left- and right-hand sides, that can be included in model syntax. Three of them are used to specify (in)equality constraints on estimated parameters (\code{==}, \code{>}, and \code{<}), and those are demonstrated in a later section about \bold{(In)equality constraints}. The final additional operator (\code{:=}) can be used to define ``new'' parameters that are functions of one or more other estimated parameters. The \code{:=} operator is demonstrated in a section about \bold{User-defined parameters}. Usually, only a single variable name appears on the left side of an operator. However, if multiple variable names are specified, separated by the \code{"+"} operator, the formula is repeated for each element on the left side (as for example in the third regression formula in the example above). The only exception are scaling factors, where only a single element is allowed on the left hand side. In the right-hand side of these formula-like expressions, each element can be modified (using the \code{"*"} operator) by either a numeric constant, an expression resulting in a numeric constant, an expression resulting in a character vector, or one of three special functions: \code{start()}, \code{label()} and \code{equal()}. This provides the user with a mechanism to fix parameters, to provide alternative starting values, to label the parameters, and to define equality constraints among model parameters. All \code{"*"} expressions are referred to as \emph{modifiers}. They are explained in more detail in the following sections. } \section{Fixing parameters}{ It is often desirable to fix a model parameter that is otherwise (by default) free. Any parameter in a model can be fixed by using a modifier resulting in a numerical constaint. Here are some examples: \itemize{ \item Fixing the regression coefficient of the predictor \code{x2}: \preformatted{y ~ x1 + 2.4*x2 + x3} \item Specifying an orthogonal (zero) covariance between two latent variables: \preformatted{f1 ~~ 0*f2} \item Specifying an intercept and a linear slope in a growth model: \preformatted{i =~ 1*y11 + 1*y12 + 1*y13 + 1*y14 s =~ 0*y11 + 1*y12 + 2*y13 + 3*y14} } Instead of a numeric constant, one can use a mathematical function that returns a numeric constant, for example \code{sqrt(10)}. Multiplying with \code{NA} will force the corresponding parameter to be free. Additionally, the \code{==} operator can be used to set a \emph{labeled} parameter equal to a specific numeric value. This will be demonstrated in the section below about \bold{(In)equality constraints}. } \section{Starting values}{ User-provided starting values can be given by using the special function \code{start()}, containing a numeric constant. For example: \preformatted{y ~ x1 + start(1.0)*x2 + x3} Note that if a starting value is provided, the parameter is not automatically considered to be free. } \section{Parameter labels and equality constraints}{ Each free parameter in a model is automatically given a name (or label). The name given to a model parameter consists of three parts, coerced to a single character vector. The first part is the name of the variable in the left-hand side of the formula where the parameter was implied. The middle part is based on the special `operator' used in the formula. This can be either one of \code{"=~"}, \code{"~"} or \code{"~~"}. The third part is the name of the variable in the right-hand side of the formula where the parameter was implied, or \code{"1"} if it is an intercept. The three parts are pasted together in a single string. For example, the name of the fixed regression coefficient in the regression formula \code{y ~ x1 + 2.4*x2 + x3} is the string \code{"y~x2"}. The name of the parameter corresponding to the covariance between two latent variables in the formula \code{f1 ~~ f2} is the string \code{"f1~~f2"}. Although this automatic labeling of parameters is convenient, the user may specify its own labels for specific parameters simply by pre-multiplying the corresponding term (on the right hand side of the operator only) by a character string (starting with a letter). For example, in the formula \code{f1 =~ x1 + x2 + mylabel*x3}, the parameter corresponding with the factor loading of \code{x3} will be named \code{"mylabel"}. An alternative way to specify the label is as follows: \code{f1 =~ x1 + x2 + label("mylabel")*x3}, where the label is the argument of special function \code{label()}; this can be useful if the label contains a space, or an operator (like "~"). To constrain a parameter to be equal to another target parameter, there are two ways. If you have specified your own labels, you can use the fact that \emph{equal labels imply equal parameter values}. If you rely on automatic parameter labels, you can use the special function \code{equal()}. The argument of \code{equal()} is the (automatic or user-specified) name of the target parameter. For example, in the confirmatory factor analysis example below, the intercepts of the three indicators of each latent variable are constrained to be equal to each other. For the first three, we have used the default names. For the last three, we have provided a custom label for the \code{y2a} intercept. \preformatted{model <- ' # two latent variables with fixed loadings f1 =~ 1*y1a + 1*y1b + 1*y1c f2 =~ 1*y2a + 1*y2b + 1*y2c # intercepts constrained to be equal # using the default names y1a ~ 1 y1b ~ equal("y1a~1") * 1 y1c ~ equal("y1a~1") * 1 # intercepts constrained to be equal # using a custom label y2a ~ int2*1 y2b ~ int2*1 y2c ~ int2*1 ' } } \section{Multiple groups}{ In a multiple group analysis, modifiers that contain a single element should be replaced by a vector, having the same length as the number of groups. If you provide a single element, it will be recycled for all the groups. This may be dangerous, in particular when the modifier is a label. In that case, the (same) label is copied across all groups, and this would imply an equality constraint across groups. Therefore, when using modifiers in a multiple group setting, it is always safer (and cleaner) to specify the same number of elements as the number of groups. Consider this example with two groups: \preformatted{ HS.model <- ' visual =~ x1 + 0.5*x2 + c(0.6, 0.8)*x3 textual =~ x4 + start(c(1.2, 0.6))*x5 + x6 speed =~ x7 + x8 + c(x9.group1, x9.group2)*x9 ' } In this example, the factor loading of the `x2' indicator is fixed to the value 0.5 for both groups. However, the factor loadings of the `x3' indicator are fixed to 0.6 and 0.8 for group 1 and group 2 respectively. The same logic is used for all modifiers. Note that character vectors can contain unquoted strings. } \section{Multiple modifiers}{ In the model syntax, you can specify a variable more than once on the right hand side of an operator; therefore, several `modifiers' can be applied simultaneously; for example, if you want to fix the value of a parameter and also label that parameter, you can use something like: \preformatted{ f1 =~ x1 + x2 + 4*x3 + x3.loading*x3} } \section{(In)equality constraints}{ The \code{==} operator can be used either to fix a parameter to a specific value, or to set an estimated parameter equal to another parameter. Adapting the example in the \bold{Parameter labels and equality constraints} section, we could have used different labels for the second factor's intercepts: \preformatted{ y2a ~ int1*1 y2b ~ int2*1 y2c ~ int3*1 } Then, we could fix the first intercept to zero by including in the syntax an operation that indicates the parameter's label equals that value: \preformatted{ int1 == 0 } Whereas we could still estimate the other two intercepts under an equality constraint by setting their different labels equal to each other: \preformatted{ int2 == int3 } Optimization can be less efficient when constraining parameters this way (see the documentation linked under \bold{See also} for more information). But the flexibility might be advantageous. For example, the constraints could be specified in a separate character-string object, which can be passed to the \code{lavaan(..., constraints=)} argument, enabling users to compare results with(out) the constraints. Inequality constraints work much the same way, using the \code{<} or \code{>} operator indicate which estimated parameter is hypothesized to be greater/less than either a specific value or another estimated parameter. For example, a variance can be constrained to be nonnegative: \preformatted{ y1a ~~ var1a*y1a ## hypothesized constraint: var1a > 0 } Or the factor loading of a particular indicator might be expected to exceed other indicators' loadings: \preformatted{ f1 =~ L1*y1a + L2*y1b + L3*y1c ## hypothesized constraints: L1 > L2 L3 < L1 } } \section{User-defined parameters}{ Functions of parameters can be useful to test particular hypotheses. Following from the \code{Multiple groups} example, we might be interested in which group's factor loading is larger (i.e., an estimate of differential item functioning (DIF) when the latent scales are linked by anchor items with equal loadings). \preformatted{ speed =~ c(L7, L7)*x7 + c(L8, L8)*x8 + c(L9.group1, L9.group2)*x9 ' ## user-defined parameter: DIF_L9 := L9.group1 - L9.group2 } Note that this hypothesis is easily tested without a user-defined parameter by using the \code{lavTestWald()} function. However, a user-defined parameter additionally provides an estimate of the parameter being tested. User-defined parameters are particularly useful for specifying indirect effects in models of mediation. For example: \preformatted{ model <- ' # direct effect Y ~ c*X # mediator M ~ a*X Y ~ b*M # user defined parameters: # indirect effect (a*b) ab := a*b # total effect (defined using another user-defined parameter) total := ab + c ' } } \references{ Rosseel, Y. (2012). \code{lavaan}: An R package for structural equation modeling. \emph{Journal of Statistical Software, 48}(2), 1--36. \doi{https://doi.org/10.18637/jss.v048.i02} } lavaan/man/simulateData.Rd0000644000176200001440000001620114622072672015203 0ustar liggesusers\name{simulateData} \alias{simulateData} \title{Simulate Data From a Lavaan Model Syntax} \description{Simulate data starting from a lavaan model syntax.} \usage{ simulateData(model = NULL, model.type = "sem", meanstructure = FALSE, int.ov.free = TRUE, int.lv.free = FALSE, marker.int.zero = FALSE, conditional.x = FALSE, fixed.x = FALSE, orthogonal = FALSE, std.lv = TRUE, auto.fix.first = FALSE, auto.fix.single = FALSE, auto.var = TRUE, auto.cov.lv.x = TRUE, auto.cov.y = TRUE, ..., sample.nobs = 500L, ov.var = NULL, group.label = paste("G", 1:ngroups, sep = ""), skewness = NULL, kurtosis = NULL, seed = NULL, empirical = FALSE, return.type = "data.frame", return.fit = FALSE, debug = FALSE, standardized = FALSE) } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{model.type}{Set the model type: possible values are \code{"cfa"}, \code{"sem"} or \code{"growth"}. This may affect how starting values are computed, and may be used to alter the terminology used in the summary output, or the layout of path diagrams that are based on a fitted lavaan object.} \item{meanstructure}{If \code{TRUE}, the means of the observed variables enter the model. If \code{"default"}, the value is set based on the user-specified model, and/or the values of other arguments.} \item{int.ov.free}{If \code{FALSE}, the intercepts of the observed variables are fixed to zero.} \item{int.lv.free}{If \code{FALSE}, the intercepts of the latent variables are fixed to zero.} \item{marker.int.zero}{Logical. Only relevant if the metric of each latent variable is set by fixing the first factor loading to unity. If \code{TRUE}, it implies \code{meanstructure = TRUE} and \code{std.lv = FALSE}, and it fixes the intercepts of the marker indicators to zero, while freeing the means/intercepts of the latent variables. Only works correcly for single group, single level models.} \item{conditional.x}{If \code{TRUE}, we set up the model conditional on the exogenous `x' covariates; the model-implied sample statistics only include the non-x variables. If \code{FALSE}, the exogenous `x' variables are modeled jointly with the other variables, and the model-implied statistics refect both sets of variables. If \code{"default"}, the value is set depending on the estimator, and whether or not the model involves categorical endogenous variables.} \item{fixed.x}{If \code{TRUE}, the exogenous `x' covariates are considered fixed variables and the means, variances and covariances of these variables are fixed to their sample values. If \code{FALSE}, they are considered random, and the means, variances and covariances are free parameters. If \code{"default"}, the value is set depending on the mimic option.} \item{orthogonal}{If \code{TRUE}, the exogenous latent variables are assumed to be uncorrelated.} \item{std.lv}{If \code{TRUE}, the metric of each latent variable is determined by fixing their variances to 1.0. If \code{FALSE}, the metric of each latent variable is determined by fixing the factor loading of the first indicator to 1.0.} \item{auto.fix.first}{If \code{TRUE}, the factor loading of the first indicator is set to 1.0 for every latent variable.} \item{auto.fix.single}{If \code{TRUE}, the residual variance (if included) of an observed indicator is set to zero if it is the only indicator of a latent variable.} \item{auto.var}{If \code{TRUE}, the (residual) variances of both observed and latent variables are set free.} \item{auto.cov.lv.x}{If \code{TRUE}, the covariances of exogenous latent variables are included in the model and set free.} \item{auto.cov.y}{If \code{TRUE}, the covariances of dependent variables (both observed and latent) are included in the model and set free.} \item{\dots}{additional arguments passed to the \code{\link{lavaan}} function.} \item{sample.nobs}{Number of observations. If a vector, multiple datasets are created. If \code{return.type = "matrix"} or \code{return.type = "cov"}, a list of \code{length(sample.nobs)} is returned, with either the data or covariance matrices, each one based on the number of observations as specified in \code{sample.nobs}. If \code{return.type = "data.frame"}, all datasets are merged and a \code{group} variable is added to mimic a multiple group dataset.} \item{ov.var}{The user-specified variances of the observed variables.} \item{group.label}{The group labels that should be used if multiple groups are created.} \item{skewness}{Numeric vector. The skewness values for the observed variables. Defaults to zero.} \item{kurtosis}{Numeric vector. The kurtosis values for the observed variables. Defaults to zero.} \item{seed}{Set random seed.} \item{empirical}{Logical. If \code{TRUE}, the implied moments (Mu and Sigma) specify the empirical not population mean and covariance matrix.} \item{return.type}{If \code{"data.frame"}, a data.frame is returned. If \code{"matrix"}, a numeric matrix is returned (without any variable names). If \code{"cov"}, a covariance matrix is returned (without any variable names).} \item{return.fit}{If \code{TRUE}, return the fitted model that has been used to generate the data as an attribute (called \code{"fit"}); this may be useful for inspection.} \item{debug}{If \code{TRUE}, debugging information is displayed.} \item{standardized}{If \code{TRUE}, the residual variances of the observed variables are set in such a way such that the model implied variances are unity. This allows regression coefficients and factor loadings (involving observed variables) to be specified in a standardized metric.} } \details{Model parameters can be specified by fixed values in the lavaan model syntax. If no fixed values are specified, the value zero will be assumed, except for factor loadings and variances, which are set to unity by default. By default, multivariate normal data are generated. However, by providing skewness and/or kurtosis values, nonnormal multivariate data can be generated, using the Vale & Maurelli (1983) method.} \value{The generated data. Either as a data.frame (if \code{return.type="data.frame"}), a numeric matrix (if \code{return.type="matrix"}), or a covariance matrix (if \code{return.type="cov"}).} \examples{ # specify population model population.model <- ' f1 =~ x1 + 0.8*x2 + 1.2*x3 f2 =~ x4 + 0.5*x5 + 1.5*x6 f3 =~ x7 + 0.1*x8 + 0.9*x9 f3 ~ 0.5*f1 + 0.6*f2 ' # generate data set.seed(1234) myData <- simulateData(population.model, sample.nobs=100L) # population moments fitted(sem(population.model)) # sample moments round(cov(myData), 3) round(colMeans(myData), 3) # fit model myModel <- ' f1 =~ x1 + x2 + x3 f2 =~ x4 + x5 + x6 f3 =~ x7 + x8 + x9 f3 ~ f1 + f2 ' fit <- sem(myModel, data=myData) summary(fit) } lavaan/man/lav_data.Rd0000644000176200001440000000236014622072672014342 0ustar liggesusers\name{lav_data} \alias{lav_data_update} \title{lavaan data functions} \description{Utility functions related to the Data slot} \usage{ # update data slot with new data (of the same size) lav_data_update(lavdata = NULL, newX = NULL, BOOT.idx = NULL, lavoptions = NULL) } \arguments{ \item{lavdata}{A lavdata object.} \item{newX}{A list of (new) data matrices (per group) of the same size. They will replace the data stored in the internal dataslot.} \item{BOOT.idx}{A list of integers. If bootstrapping was used to produce the data in newX, use these indices to adapt the remaining slots.} \item{lavoptions}{A named list. The Options lsot from a lavaan object.} } \examples{ # generate syntax for an independence model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) # extract data slot and options lavdata <- fit@Data lavoptions <- lavInspect(fit, "options") # create bootstrap sample boot.idx <- sample(x = nobs(fit), size = nobs(fit), replace = TRUE) newX <- list(lavdata@X[[1]][boot.idx,]) # generate update lavdata object newdata <- lav_data_update(lavdata = lavdata, newX = newX, lavoptions = lavoptions) } lavaan/man/lavPredict.Rd0000644000176200001440000001440514622072672014667 0ustar liggesusers\name{lavPredict} \alias{lavPredict} \alias{lavpredict} \title{Predict the values of latent variables (and their indicators).} \description{ The main purpose of the \code{lavPredict()} function is to compute (or `predict') estimated values for the latent variables in the model (`factor scores'). NOTE: the goal of this function is NOT to predict future values of dependent variables as in the regression framework! (For models with only continuous observed variables, the function \code{lavPredictY()} supports this.)} \usage{ lavPredict(object, newdata = NULL, type = "lv", method = "EBM", transform = FALSE, se = "none", acov = "none", label = TRUE, fsm = FALSE, mdist = FALSE, append.data = FALSE, assemble = FALSE, level = 1L, optim.method = "bfgs", ETA = NULL, drop.list.single.group = TRUE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{newdata}{An optional data.frame, containing the same variables as the data.frame used when fitting the model in object.} \item{type}{A character string. If \code{"lv"}, estimated values for the latent variables in the model are computed. If \code{"ov"}, model predicted values for the indicators of the latent variables in the model are computed. If \code{"yhat"}, the estimated value for the observed indicators, given user-specified values for the latent variables provided by de \code{ETA} argument. If \code{"fy"}, densities (or probabilities) for each observed indicator, given user-specified values for the latent variables provided by de \code{ETA} argument.} \item{method}{A character string. In the linear case (when the indicators are continuous), the possible options are \code{"regression"} or \code{"Bartlett"}. In the categorical case, the two options are \code{"EBM"} for the Empirical Bayes Modal approach, and \code{"ML"} for the maximum likelihood approach.} \item{transform}{Logical. If \code{TRUE}, transform the factor scores (per group) so that their mean and variance-covariance matrix matches the model-implied mean and variance-covariance matrix. This may be useful if the individual factor scores will be used in a follow-up (regression) analysis. Note: the standard errors (if requested) and the factor score matrix (if requested) are not transformed (yet).} \item{se}{Character. If \code{"none"}, no standard errors are computed. If \code{"standard"}, naive standard errors are computed (assuming the parameters of the measurement model are known). The standard errors are returned as an attribute. Currently only available for complete continuous data.} \item{acov}{Similar to the \code{"se"} argument, but optionally returns the full sampling covariance matrix of factor scores as an attribute. Currently only available for complete continuous data.} \item{label}{Logical. If TRUE, the columns in the output are labeled.} \item{fsm}{Logical. If TRUE, return the factor score matrix as an attribute. Only for numeric data.} \item{mdist}{Logical. If TRUE, the (squared) Mahalanobis distances of the factor scores (if \code{type = "lv"}) or the casewise residuals (if \code{type = "resid"}) are returned as an attribute.} \item{append.data}{Logical. Only used when \code{type = "lv"}. If TRUE, the original data (or the data provided in the newdata argument) is appended to the factor scores.} \item{assemble}{Logical. If TRUE, the separate multiple groups are reassembled again to form a single data.frame with a group column, having the same dimensions are the original (or newdata) dataset.} \item{level}{Integer. Only used in a multilevel SEM. If \code{level = 1}, only factor scores for latent variable defined at the first (within) level are computed; if \code{level = 2}, only factor scores for latent variables defined at the second (between) level are computed.} \item{optim.method}{Character string. Only used in the categorical case. If \code{"nlminb"} (the default in 0.5), the \code{"nlminb()"} function is used for the optimization. If \code{"bfgs"} or \code{"BFGS"} (the default in 0.6), the \code{"optim()"} function is used with the BFGS method.} \item{ETA}{An optional matrix or list, containing latent variable values for each observation. Used for computations when \code{type = "ov"}.} \item{drop.list.single.group}{Logical. If \code{FALSE}, the results are returned as a list, where each element corresponds to a group (even if there is only a single group). If \code{TRUE}, the list will be unlisted if there is only a single group.} } \details{ The \code{predict()} function calls the \code{lavPredict()} function with its default options. If there are no latent variables in the model, \code{type = "ov"} will simply return the values of the observed variables. Note that this function can not be used to `predict' values of dependent variables, given the values of independent values (in the regression sense). In other words, the structural component is completely ignored (for now). } \seealso{ \code{\link{lavPredictY}} to predict y-variables given x-variables. } \examples{ data(HolzingerSwineford1939) ## fit model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) head(lavPredict(fit)) head(lavPredict(fit, type = "ov")) ## ------------------------------------------ ## merge factor scores to original data.frame ## ------------------------------------------ idx <- lavInspect(fit, "case.idx") fscores <- lavPredict(fit) ## loop over factors for (fs in colnames(fscores)) { HolzingerSwineford1939[idx, fs] <- fscores[ , fs] } head(HolzingerSwineford1939) ## multigroup models return a list of factor scores (one per group) data(HolzingerSwineford1939) mgfit <- update(fit, group = "school", group.equal = c("loadings","intercepts")) idx <- lavInspect(mgfit, "case.idx") # list: 1 vector per group fscores <- lavPredict(mgfit) # list: 1 matrix per group ## loop over groups and factors for (g in seq_along(fscores)) { for (fs in colnames(fscores[[g]])) { HolzingerSwineford1939[ idx[[g]], fs] <- fscores[[g]][ , fs] } } head(HolzingerSwineford1939) ## ------------------------------------- ## Use factor scores in susequent models ## ------------------------------------- ## see Examples in semTools package: ?plausibleValues } lavaan/man/lavMatrixRepresentation.Rd0000644000176200001440000000301614622072672017460 0ustar liggesusers\name{lavMatrixRepresentation} \alias{lavMatrixRepresentation} \title{lavaan matrix representation} \description{ Extend the parameter table with a matrix representation. } \usage{ lavMatrixRepresentation(partable, representation = "LISREL", add.attributes = FALSE, as.data.frame. = TRUE) } \arguments{ \item{partable}{A lavaan parameter table (as extracted by the \code{\link{parTable}} function, or generated by the \code{\link{lavPartable}} function).} \item{representation}{Character. The matrix representation style. Currently, only the all-y version of the LISREL representation is supported.} \item{add.attributes}{Logical. If \code{TRUE}, additional information about the model matrix representation is added as attributes.} \item{as.data.frame.}{Logical. If \code{TRUE}, the extended parameter table is returned as a data.frame.} } \value{ A list or a data.frame containing the original parameter table, plus three columns: a \code{"mat"} column containing matrix names, and a \code{"row"} and \code{"col"} column for the row and column indices of the model parameters in the model matrices. } \seealso{\code{\link{lavParTable}}, \code{\link{parTable}}} \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) # extract partable partable <- parTable(fit) # add matrix representation (and show only a few columns) lavMatrixRepresentation(partable)[,c("lhs","op","rhs","mat","row","col")] } lavaan/man/lavTest.Rd0000644000176200001440000000600214622072672014206 0ustar liggesusers\name{lavTest} \alias{lavTest} \alias{lavtest} \title{Test of exact fit} \description{ Compute a variety of test statistics evaluating the global fit of the model.} \usage{ lavTest(lavobject, test = "standard", scaled.test = "standard", output = "list", drop.list.single = TRUE) } \arguments{ \item{lavobject}{An object of class \code{\linkS4class{lavaan}}.} \item{test}{Character vector. Multiple names of test statistics can be provided. If \code{"standard"} is included, a conventional chi-square test is computed. If \code{"Browne.residual.adf"} is included, Browne's residual-based test statistic using ADF theory is computed. If \code{"Browne.residual.nt"} is included, Browne's residual-based test statistic using normal theory is computed. If \code{"Satorra.Bentler"} is included, a Satorra-Bentler scaled test statistic is computed. If \code{"Yuan.Bentler"} is included, a Yuan-Bentler scaled test statistic is computed. If \code{"Yuan.Bentler.Mplus"} is included, a test statistic is computed that is asymptotically equal to the Yuan-Bentler scaled test statistic. If \code{"mean.var.adjusted"} or \code{"Satterthwaite"} is included, a mean and variance adjusted test statistic is computed. If \code{"scaled.shifted"} is included, an alternative mean and variance adjusted test statistic is computed (as in Mplus version 6 or higher). If \code{"boot"} or \code{"bootstrap"} or \code{"Bollen.Stine"} is included, the Bollen-Stine bootstrap is used to compute the bootstrap probability value of the (regular) test statistic.} \item{scaled.test}{Character. Choose the test statistic that will be scaled (if a scaled test statistic is requested). The default is \code{"standard"}, but it could also be (for example) \code{"Browne.residual.nt"}.} \item{output}{Character. If \code{"list"} (the default), return a list with all test statistics. If \code{"text"}, display the output as text with verbose descriptions (as in the summary output). If any scaled test statistics are included, they are printed first in a two-column format. Next come the other test statistics in a one-column format.} \item{drop.list.single}{Logical. Only used when \code{output = "list"}. If \code{TRUE} and the list is of length one (i.e. only a single test statistic), drop the outer list. If \code{FALSE}, return a nested list with as many elements as we have test statistics.} } \value{ If \code{output = "list"}: a nested list with test statistics, or if only a single test statistic is requested (and \code{drop.list.single = TRUE}), a list with details for this test statistic. If \code{output = "text"}: the text is printed, and a nested list of test statistics (including an info attribute) is returned. } \examples{ HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939) lavTest(fit, test = "browne.residual.adf") } lavaan/man/lavTestWald.Rd0000644000176200001440000000314114622072672015017 0ustar liggesusers\name{lavTestWald} \alias{lavTestWald} \alias{lavtestwald} \alias{wald} \alias{Wald} \alias{lavWaldTest} \title{Wald test} \description{ Wald test for testing a linear hypothesis about the parameters of fitted lavaan object.} \usage{ lavTestWald(object, constraints = NULL, verbose = FALSE) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{constraints}{A character string (typically between single quotes) containing one or more equality constraints. See examples for more details.} \item{verbose}{Logical. If \code{TRUE}, print out the restriction matrix and the estimated restricted values.} } \details{ The constraints are specified using the \code{"=="} operator. Both the left-hand side and the right-hand side of the equality can contain a linear combination of model parameters, or a constant (like zero). The model parameters must be specified by their user-specified labels. Names of defined parameters (using the \code{":="} operator) can be included too. } \value{ A list containing three elements: the Wald test statistic (stat), the degrees of freedom (df), and a p-value under the chi-square distribution (p.value). } \examples{ HS.model <- ' visual =~ x1 + b1*x2 + x3 textual =~ x4 + b2*x5 + x6 speed =~ x7 + b3*x8 + x9 ' fit <- cfa(HS.model, data=HolzingerSwineford1939) # test 1: test about a single parameter # this is the 'chi-square' version of the # z-test from the summary() output lavTestWald(fit, constraints = "b1 == 0") # test 2: several constraints con = ' 2*b1 == b3 b2 - b3 == 0 ' lavTestWald(fit, constraints = con) } lavaan/man/sam.Rd0000644000176200001440000001153714622072672013355 0ustar liggesusers\name{sam} \alias{sam} \alias{fsr} \title{Fit Structural Equation Models using the SAM approach} \description{ Fit a Structural Equation Model (SEM) using the Structural After Measurement (SAM) approach.} \usage{ sam(model = NULL, data = NULL, cmd = "sem", se = "twostep", mm.list = NULL, mm.args = list(bounds = "wide.zerovar"), struc.args = list(estimator = "ML"), sam.method = "local", ..., local.options = list(M.method = "ML", lambda.correction = TRUE, alpha.correction = 0L, twolevel.method = "h1"), global.options = list(), output = "lavaan") } \arguments{ \item{model}{A description of the user-specified model. Typically, the model is described using the lavaan model syntax. See \code{\link{model.syntax}} for more information. Alternatively, a parameter table (eg. the output of the \code{lavaanify()} function) is also accepted.} \item{data}{A data frame containing the observed variables used in the model.} \item{cmd}{Character. Which command is used to run the sem models. The possible choices are \code{"sem"}, \code{"cfa"} or \code{"lavaan"}, determining how we deal with default options.} \item{se}{Character. The type of standard errors that are used in the final (structural) model. If \code{"twostep"} (the default), the standard errors take the estimation uncertainty of the first (measurement) stage into account. If \code{"standard"}, this uncertainty is ignored, and we treat the measurement information as known. If \code{"none"}, no standard errors are computed.} \item{mm.list}{List. Define the measurement blocks. Each element of the list should be either a single name of a latent variable, or a vector of latent variable names. If omitted, a separate measurement block is used for each latent variable.} \item{mm.args}{List. Optional arguments for the fitting function(s) of the measurement block(s) only. See \code{\link{lavOptions}} for a complete list.} \item{struc.args}{List. Optional arguments for the fitting function of the structural part only. See \code{\link{lavOptions}} for a complete list.} \item{sam.method}{Character. Can be set to \code{"local"}, \code{"global"} or \code{"fsr"}. In the latter case, the results are the same as if Bartlett factor scores were used, without any bias correction.} \item{...}{Many more additional options can be defined, using 'name = value'. See \code{\link{lavOptions}} for a complete list. These options affect both the measurement blocks and the structural part.} \item{local.options}{List. Options specific for local SAM method (these options may change over time). If \code{lambda.correction = TRUE}, we ensure that the variance matrix of the latent variables (VETA) is positive definite. The \code{alpha.correction} options must be an integer. Acceptable values are in the range 0 till N-1. If zero (the default), no small sample correction is performed, and the bias-correction is the same as with local SAM. When equal to N-1, the bias-correction is eliminated, and the results are the same as naive FSR. Typical values are 0, P+1 (where P is the number of predictors in the structural model), P+5, and (N-1)/2.} \item{global.options}{List. Options specific for global SAM method (not used for now).} \item{output}{Character. If \code{"lavaan"}, a lavaan object returned. If \code{"list"}, a list is returned with all the ingredients from the different stages.} } \details{ The \code{sam} function tries to automate the SAM approach, by first estimating the measurement part of the model, and then the structural part of the model. See reference for more details. Note that in the current implementation, all indicators of latent variables have to be observed. This implies: no support for second-order factor structures (for now). } \value{ If \code{output = "lavaan"}, an object of class \code{\linkS4class{lavaan}}, for which several methods are available, including a \code{summary} method. If \code{output = "list"}, a list. } \references{ Rosseel and Loh (2021). A structural-after-measurement approach to Structural Equation Modeling. Psychological Methods. Advance online publication. https://dx.doi.org/10.1037/met0000503 } \seealso{ \code{\link{lavaan}} } \examples{ ## The industrialization and Political Democracy Example ## Bollen (1989), page 332 model <- ' # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 ' fit.sam <- sam(model, data = PoliticalDemocracy, mm.list = list(ind = "ind60", dem = c("dem60", "dem65"))) summary(fit.sam) } lavaan/man/FacialBurns.Rd0000644000176200001440000000156314622072672014764 0ustar liggesusers\name{FacialBurns} \alias{FacialBurns} \docType{data} \title{Dataset for illustrating the InformativeTesting function.} \description{ A dataset from the Dutch burn center (http://www.adbc.nl). The data were used to examine psychosocial functioning in patients with facial burn wounds. Psychosocial functioning was measured by Anxiety and depression symptoms (HADS), and self-esteem (Rosenberg's self-esteem scale).} \usage{data(FacialBurns)} \format{ A data frame of 77 observations of 6 variables. \describe{ \item{\code{Selfesteem}}{Rosenberg's self-esteem scale} \item{\code{HADS}}{Anxiety and depression scale} \item{\code{Age}}{Age measured in years, control variable} \item{\code{TBSA}}{Total Burned Surface Area} \item{\code{RUM}}{Rumination, control variable} \item{\code{Sex}}{Gender, grouping variable} } } \examples{ head(FacialBurns) } lavaan/man/lavInspect.Rd0000644000176200001440000005513614622072672014710 0ustar liggesusers\name{lavInspect} \alias{lavInspect} \alias{inspect} \alias{lavTech} \title{Inspect or extract information from a fitted lavaan object} \description{ The \code{lavInspect()} and \code{lavTech()} functions can be used to inspect/extract information that is stored inside (or can be computed from) a fitted lavaan object. Note: the (older) \code{inspect()} function is now simply a shortcut for \code{lavInspect()} with default arguments. } \usage{ lavInspect(object, what = "free", add.labels = TRUE, add.class = TRUE, list.by.group = TRUE, drop.list.single.group = TRUE) lavTech(object, what = "free", add.labels = FALSE, add.class = FALSE, list.by.group = FALSE, drop.list.single.group = FALSE) inspect(object, what = "free", ...) } \arguments{ \item{object}{An object of class \code{\linkS4class{lavaan}}.} \item{what}{Character. What needs to be inspected/extracted? See Details for a full list. Note: the \code{what} argument is not case-sensitive (everything is converted to lower case.)} \item{add.labels}{If \code{TRUE}, variable names are added to the vectors and/or matrices.} \item{add.class}{If \code{TRUE}, vectors are given the `lavaan.vector' class; matrices are given the `lavaan.matrix' class, and symmetric matrices are given the `lavaan.matrix.symmetric' class. This only affects the way they are printed on the screen.} \item{list.by.group}{Logical. Only used when the output are model matrices. If \code{TRUE}, the model matrices are nested within groups. If \code{FALSE}, a flattened list is returned containing all model matrices, with repeated names for multiple groups.} \item{drop.list.single.group}{If \code{FALSE}, the results are returned as a list, where each element corresponds to a group (even if there is only a single group). If \code{TRUE}, the list will be unlisted if there is only a single group.} \item{...}{Additional arguments. Not used by lavaan, but by other packages.} } \details{ The \code{lavInspect()} and \code{lavTech()} functions only differ in the way they return the results. The \code{lavInspect()} function will prettify the output by default, while the \code{lavTech()} will not attempt to prettify the output by default. The (older) \code{inspect()} function is a simplified version of \code{lavInspect()} with only the first two arguments. Below is a list of possible values for the \code{what} argument, organized in several sections: Model matrices: \describe{ \item{\code{"free"}:}{A list of model matrices. The non-zero integers represent the free parameters. The numbers themselves correspond to the position of the free parameter in the parameter vector. This determines the order of the model parameters in the output of for example \code{coef()} and \code{vcov()}.} \item{\code{"partable"}:}{A list of model matrices. The non-zero integers represent both the fixed parameters (for example, factor loadings fixed at 1.0), and the free parameters if we ignore any equality constraints. They correspond with all entries (fixed or free) in the parameter table. See \code{\link{parTable}}.} \item{\code{"se"}:}{A list of model matrices. The non-zero numbers represent the standard errors for the free parameters in the model. If two parameters are constrained to be equal, they will have the same standard error for both parameters. Aliases: \code{"std.err"} and \code{"standard.errors"}.} \item{\code{"start"}:}{A list of model matrices. The values represent the starting values for all model parameters. Alias: \code{"starting.values"}.} \item{\code{"est"}:}{A list of model matrices. The values represent the estimated model parameters. Aliases: \code{"estimates"}, and \code{"x"}.} \item{\code{"dx.free"}:}{A list of model matrices. The values represent the gradient (first derivative) values of the model parameters. If two parameters are constrained to be equal, they will have the same gradient value.} \item{\code{"dx.all"}:}{A list of model matrices. The values represent the first derivative with respect to all possible matrix elements. Currently, this is only available when the estimator is \code{"ML"} or \code{"GLS"}.} \item{\code{"std"}:}{A list of model matrices. The values represent the (completely) standardized model parameters (the variances of both the observed and the latent variables are set to unity). Aliases: \code{"std.all"}, \code{"standardized"}.} \item{\code{"std.lv"}:}{A list of model matrices. The values represent the standardized model parameters (only the variances of the latent variables are set to unity.)} \item{\code{"std.nox"}:}{A list of model matrices. The values represent the (completely) standardized model parameters (the variances of both the observed and the latent variables are set to unity; however, the variances of any observed exogenous variables are not set to unity; hence no-x.)} } Information about the data: \describe{ \item{\code{"data"}:}{A matrix containing the observed variables that have been used to fit the model. No column/row names are provided. Column names correspond to the output of \code{lavNames(object)}, while the rows correspond to the output of \code{lavInspect(object, "case.idx"}.} \item{\code{"ordered"}:}{A character vector. The ordered variables.} \item{\code{"nobs"}:}{Integer vector. The number of observations in each group that were used in the analysis.} \item{\code{"norig"}:}{Integer vector. The original number of observations in each group.} \item{\code{"ntotal"}:}{Integer. The total number of observations that were used in the analysis. If there is just a single group, this is the same as the \code{"nobs"} option; if there are multiple groups, this is the sum of the \code{"nobs"} numbers for each group.} \item{\code{"case.idx"}:}{Integer vector. The case/observation numbers that were used in the analysis. In the case of multiple groups: a list of numbers.} \item{\code{"empty.idx"}:}{The case/observation numbers of those cases/observations that contained missing values only (at least for the observed variables that were included in the model). In the case of multiple groups: a list of numbers.} \item{\code{"patterns"}:}{A binary matrix. The rows of the matrix are the missing data patterns where 1 and 0 denote non-missing and missing values for the corresponding observed variables respectively (or \code{TRUE} and \code{FALSE} if \code{lavTech()} is used.) If the data is complete (no missing values), there will be only a single pattern. In the case of multiple groups: a list of pattern matrices.} \item{\code{"coverage"}:}{A symmetric matrix where each element contains the proportion of observed datapoints for the corresponding pair of observed variables. In the case of multiple groups: a list of coverage matrices.} \item{\code{"group"}:}{A character string. The group variable in the data.frame (if any).} \item{\code{"ngroups"}:}{Integer. The number of groups.} \item{\code{"group.label"}:}{A character vector. The group labels.} \item{\code{"level.label"}:}{A character vector. The level labels.} \item{\code{"cluster"}:}{A character vector. The cluster variable(s) in the data.frame (if any).} \item{\code{"nlevels"}:}{Integer. The number of levels.} \item{\code{"nclusters"}:}{Integer. The number of clusters that were used in the analysis.} \item{\code{"ncluster.size"}:}{Integer. The number of different cluster sizes.} \item{\code{"cluster.size"}:}{Integer vector. The number of observations within each cluster. For multigroup multilevel models, a list of integer vectors, indicating cluster sizes within each group.} \item{\code{"cluster.id"}:}{Integer vector. The cluster IDs identifying the clusters. For multigroup multilevel models, a list of integer vectors, indicating cluster IDs within each group.} \item{\code{"cluster.idx"}:}{Integer vector. The cluster index for each observation. The cluster index ranges from 1 to the number of clusters. For multigroup multilevel models, a list of integer vectors, indicating cluster indices within each group.} \item{\code{"cluster.label"}:}{Integer vector. The cluster ID for each observation. For multigroup multilevel models, a list of integer vectors, indicating the cluster ID for each observation within each group.} \item{\code{"cluster.sizes"}:}{Integer vector. The different cluster sizes that were used in the analysis. For multigroup multilevel models, a list of integer vectors, indicating the different cluster sizes within each group.} \item{\code{"average.cluster.size"}:}{Integer. The average cluster size (using the formula \code{s = (N^2 - sum(cluster.size^2)) / (N*(nclusters - 1L))}). For multigroup multilevel models, a list containing the average cluster size per group.} } Observed sample statistics: \describe{ \item{\code{"sampstat"}:}{Observed sample statistics. Aliases: \code{"obs"}, \code{"observed"}, \code{"samp"}, \code{"sample"}, \code{"samplestatistics"}. Since 0.6-3, we always check if an h1 slot is available (the estimates for the unrestricted model); if present, we extract the sample statistics from this slot. This implies that if variables are continuous, and \code{missing = "ml"} (or \code{"fiml"}), we return the covariance matrix (and mean vector) as computed by the EM algorithm under the unrestricted (h1) model. If the h1 is not present (perhaps, because the model was fitted with \code{h1 = FALSE}), we return the sample statistics from the SampleStats slot. Here, pairwise deletion is used for the elements of the covariance matrix (or correlation matrix), and listwise deletion for all univariate statistics (means, intercepts and thresholds).} \item{\code{"sampstat.h1"}:}{Deprecated. Do not use any longer.} \item{\code{"wls.obs"}:}{The observed sample statistics (covariance elements, intercepts/thresholds, etc.) in a single vector.} \item{\code{"wls.v"}:}{The weight vector as used in weighted least squares estimation.} \item{\code{"gamma"}:}{N times the asymptotic variance matrix of the sample statistics. Alias: \code{"sampstat.nacov"}.} } Model features: \describe{ \item{\code{"meanstructure"}:}{Logical. \code{TRUE} if a meanstructure was included in the model.} \item{\code{"categorical"}:}{Logical. \code{TRUE} if categorical endogenous variables were part of the model.} \item{\code{"fixed.x"}:}{Logical. \code{TRUE} if the exogenous x-covariates are treated as fixed.} \item{\code{"parameterization"}:}{Character. Either \code{"delta"} or \code{"theta"}.} } Model-implied sample statistics: \describe{ \item{\code{"implied"}:}{The model-implied summary statistics. Alias: \code{"fitted"}, \code{"expected"}, \code{"exp"}.} \item{\code{"resid"}:}{The difference between observed and model-implied summary statistics. Alias: \code{"residuals"}, \code{"residual"}, \code{"res"}.} \item{\code{"cov.lv"}:}{The model-implied variance-covariance matrix of the latent variables. Alias: \code{"veta"} [for V(eta)].} \item{\code{"cor.lv"}:}{The model-implied correlation matrix of the latent variables.} \item{\code{"mean.lv"}:}{The model-implied mean vector of the latent variables. Alias: \code{"eeta"} [for E(eta)].} \item{\code{"cov.ov"}:}{The model-implied variance-covariance matrix of the observed variables. Aliases: \code{"sigma"}, \code{"sigma.hat"}.} \item{\code{"cor.ov"}:}{The model-implied correlation matrix of the observed variables.} \item{\code{"mean.ov"}:}{The model-implied mean vector of the observed variables. Aliases: \code{"mu"}, \code{"mu.hat"}.} \item{\code{"cov.all"}:}{The model-implied variance-covariance matrix of both the observed and latent variables.} \item{\code{"cor.all"}:}{The model-implied correlation matrix of both the observed and latent variables.} \item{\code{"th"}:}{The model-implied thresholds. Alias: \code{"thresholds"}.} \item{\code{"wls.est"}:}{The model-implied sample statistics (covariance elements, intercepts/thresholds, etc.) in a single vector.} \item{\code{"vy"}:}{The model-implied unconditional variances of the observed variables.} \item{\code{"rsquare"}:}{The R-square value for all endogenous variables. Aliases: \code{"r-square"}, \code{"r2"}.} } Diagnostics: \describe{ \item{\code{"mdist2.fs"}:}{The squared Mahalanobis distances for the (Bartlett) factor scores.} \item{\code{"mdist.fs"}:}{The Mahalanobis distances for the (Bartlett) factor scores.} \item{\code{"mdist2.resid"}:}{The squared Mahalanobis distances for the (Bartlett-based) casewise residuals.} \item{\code{"mdist.fs"}:}{The Mahalanobis distances for the (Bartlett-based) casewise residuals.} } Optimizer information: \describe{ \item{\code{"converged"}:}{Logical. \code{TRUE} if the optimizer has converged; \code{FALSE} otherwise.} \item{\code{"iteratons"}:}{Integer. The number of iterations used by the optimizer.} \item{\code{"optim"}:}{List. All available information regarding the optimization results.} \item{\code{"npar"}:}{Integer. Number of free parameters (ignoring constraints).} } Gradient, Hessian, observed, expected and first.order information matrices: \describe{ \item{\code{"gradient"}:}{Numeric vector containing the first derivatives of the discrepancy function with respect to the (free) model parameters.} \item{\code{"hessian"}:}{Matrix containing the second derivatives of the discrepancy function with respect to the (free) model parameters.} \item{\code{"information"}:}{Matrix containing either the observed or the expected information matrix (depending on the information option of the fitted model). This is unit-information, not total-information.} \item{\code{"information.expected"}:}{Matrix containing the expected information matrix for the free model parameters.} \item{\code{"information.observed"}:}{Matrix containing the observed information matrix for the free model parameters.} \item{\code{"information.first.order"}:}{Matrix containing the first.order information matrix for the free model parameters. This is the outer product of the gradient elements (the first derivative of the discrepancy function with respect to the (free) model parameters). Alias: \code{"first.order"}.} \item{\code{"augmented.information"}:}{Matrix containing either the observed or the expected augmented (or bordered) information matrix (depending on the information option of the fitted model. Only relevant if constraints have been used in the model.} \item{\code{"augmented.information.expected"}:}{Matrix containing the expected augmented (or bordered) information matrix. Only relevant if constraints have been used in the model.} \item{\code{"augmented.information.observed"}:}{Matrix containing the observed augmented (or bordered) information matrix. Only relevant if constraints have been used in the model.} \item{\code{"augmented.information.first.order"}:}{Matrix containing the first.order augmented (or bordered) information matrix. Only relevant if constraints have been used in the model.} \item{\code{"inverted.information"}:}{Matrix containing either the observed or the expected inverted information matrix (depending on the information option of the fitted model.} \item{\code{"inverted.information.expected"}:}{Matrix containing the inverted expected information matrix for the free model parameters.} \item{\code{"inverted.information.observed"}:}{Matrix containing the inverted observed information matrix for the free model parameters.} \item{\code{"inverted.information.first.order"}:}{Matrix containing the inverted first.order information matrix for the free model parameters.} \item{\code{"h1.information"}:}{Matrix containing either the observed, expected or first.order information matrix (depending on the information option of the fitted model) of the unrestricted h1 model. This is unit-information, not total-information.} \item{\code{"h1.information.expected"}:}{Matrix containing the expected information matrix for the unrestricted h1 model.} \item{\code{"h1.information.observed"}:}{Matrix containing the observed information matrix for the unrestricted h1 model.} \item{\code{"h1.information.first.order"}:}{Matrix containing the first.order information matrix for the the unrestricted h1 model. Alias: \code{"h1.first.order"}.} } Variance covariance matrix of the model parameters: \describe{ \item{\code{"vcov"}:}{Matrix containing the variance covariance matrix of the estimated model parameters.} \item{\code{"vcov.std.all"}:}{Matrix containing the variance covariance matrix of the standardized estimated model parameters. Standardization is done with respect to both observed and latent variables.} \item{\code{"vcov.std.lv"}:}{Matrix containing the variance covariance matrix of the standardized estimated model parameters. Standardization is done with respect to the latent variables only.} \item{\code{"vcov.std.nox"}:}{Matrix containing the variance covariance matrix of the standardized estimated model parameters. Standardization is done with respect to both observed and latent variables, but ignoring any exogenous observed covariates.} \item{\code{"vcov.def"}:}{Matrix containing the variance covariance matrix of the user-defined (using the := operator) parameters.} \item{\code{"vcov.def.std.all"}:}{Matrix containing the variance covariance matrix of the standardized user-defined parameters. Standardization is done with respect to both observed and latent variables.} \item{\code{"vcov.def.std.lv"}:}{Matrix containing the variance covariance matrix of the standardized user-defined parameters. Standardization is done with respect to the latent variables only.} \item{\code{"vcov.def.std.nox"}:}{Matrix containing the variance covariance matrix of the standardized user-defined parameters. Standardization is done with respect to both observed and latent variables, but ignoring any exogenous observed covariates.} \item{\code{"vcov.def.joint"}:}{Matrix containing the joint variance covariance matrix of both the estimated model parameters and the defined (using the := operator) parameters.} \item{\code{"vcov.def.joint.std.all"}:}{Matrix containing the joint variance covariance matrix of both the standardized model parameters and the user-defined parameters. Standardization is done with respect to both observed and latent variables.} \item{\code{"vcov.def.joint.std.lv"}:}{Matrix containing the joint variance covariance matrix of both the standardized model parameters and the user-defined parameters. Standardization is done with respect to the latent variables only.} \item{\code{"vcov.def.joint.std.nox"}:}{Matrix containing the joint variance covariance matrix of both the standardized model parameters and the user-defined parameters. Standardization is done with respect to both observed and latent variables, but ignoring any exogenous observed covariates.} } Miscellaneous: \describe{ \item{\code{"coef.boot"}:}{Matrix containing estimated model parameters for for each bootstrap sample. Only relevant when bootstrapping was used.} \item{\code{"UGamma"}:}{Matrix containing the product of 'U' and 'Gamma' matrices as used by the Satorra-Bentler correction. The trace of this matrix, divided by the degrees of freedom, gives the scaling factor.} \item{\code{"UfromUGamma"}:}{Matrix containing the 'U' matrix as used by the Satorra-Bentler correction. Alias: \code{"U"}.} \item{\code{"list"}:}{The parameter table. The same output as given by \code{parTable()}.} \item{\code{"fit"}:}{The fit measures. Aliases: \code{"fitmeasures"}, \code{"fit.measures"}, \code{"fit.indices"}. The same output as given by \code{fitMeasures()}.} \item{\code{"mi"}:}{The modification indices. Alias: \code{"modindices"}, \code{"modification.indices"}. The same output as given by \code{modindices()}.} \item{\code{"loglik.casewise"}:}{Vector containing the casewise loglikelihood contributions. Only available if estimator = \code{"ML"}.} \item{\code{"options"}:}{List. The option list.} \item{\code{"call"}:}{List. The call as returned by match.call, coerced to a list.} \item{\code{"timing"}:}{List. The timing (in milliseconds) of various lavaan subprocedures.} \item{\code{"test"}:}{List. All available information regarding the (goodness-of-fit) test statistic(s).} \item{\code{"baseline.test"}:}{List. All available information regarding the (goodness-of-fit) test statistic(s) of the baseline model.} \item{\code{"baseline.partable"}:}{Data.frame. The parameter table of the (internal) baseline model.} \item{\code{"post.check"}:}{Post-fitting check if the solution is admissible. A warning is raised if negative variances are found, or if either \code{lavInspect(fit, "cov.lv")} or \code{lavInspect(fit, "theta")} return a non-positive definite matrix.} \item{\code{"zero.cell.tables"}:}{List. List of bivariate frequency tables where at least one cell is empty.} \item{\code{"version"}:}{The lavaan version number that was used to construct the fitted lavaan object.} } } \seealso{ \code{\link{lavaan}} } \examples{ # fit model HS.model <- ' visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 ' fit <- cfa(HS.model, data = HolzingerSwineford1939, group = "school") # extract information lavInspect(fit, "sampstat") lavTech(fit, "sampstat") } lavaan/DESCRIPTION0000644000176200001440000000742314630576512013241 0ustar liggesusersPackage: lavaan Title: Latent Variable Analysis Version: 0.6-18 Authors@R: c(person(given = "Yves", family = "Rosseel", role = c("aut", "cre"), email = "Yves.Rosseel@UGent.be", comment = c(ORCID = "0000-0002-4129-4477")), person(given = c("Terrence","D."), family = "Jorgensen", role = "aut", email = "TJorgensen314@gmail.com", comment = c(ORCID = "0000-0001-5111-6773")), person(given = c("Luc"), family = "De Wilde", role = "aut", email = "Luc.DeWilde@UGent.be"), person(given = "Daniel", family = "Oberski", role = "ctb", email = "daniel.oberski@gmail.com"), person(given = "Jarrett", family = "Byrnes", role = "ctb", email = "byrnes@nceas.ucsb.edu"), person(given = "Leonard", family = "Vanbrabant", role = "ctb", email = "info@restriktor.org"), person(given = "Victoria", family = "Savalei", role = "ctb", email = "vsavalei@ubc.ca"), person(given = "Ed", family = "Merkle", role = "ctb", email = "merklee@missouri.edu"), person(given = "Michael", family = "Hallquist", role = "ctb", email = "michael.hallquist at gmail.com"), person(given = "Mijke", family = "Rhemtulla", role = "ctb", email = "mrhemtulla@ucdavis.edu"), person(given = "Myrsini", family = "Katsikatsou", role = "ctb", email = "mirtok2@gmail.com"), person(given = "Mariska", family = "Barendse", role = "ctb", email = "m.t.barendse@gmail.com"), person(given = c("Nicholas"), family = "Rockwood", role = "ctb", email = "nrockwood@rti.org"), person(given = "Florian", family = "Scharf", role = "ctb", email = "florian.scharf@uni-leipzig.de"), person(given = "Han", family = "Du", role = "ctb", email = "hdu@psych.ucla.edu"), person(given = "Haziq", family = "Jamil", role = "ctb", email = "haziq.jamil@ubd.edu.bn", comment = c(ORCID = "0000-0003-3298-1010")), person(given = "Franz", family = "Classe", role = "ctb", email = "classe@dji.de") ) Description: Fit a variety of latent variable models, including confirmatory factor analysis, structural equation modeling and latent growth curve models. Depends: R(>= 3.4) Imports: methods, stats4, stats, utils, graphics, MASS, mnormt, pbivnorm, numDeriv, quadprog License: GPL (>= 2) LazyData: yes ByteCompile: true URL: https://lavaan.ugent.be NeedsCompilation: no Packaged: 2024-06-07 05:15:18 UTC; yves Author: Yves Rosseel [aut, cre] (), Terrence D. Jorgensen [aut] (), Luc De Wilde [aut], Daniel Oberski [ctb], Jarrett Byrnes [ctb], Leonard Vanbrabant [ctb], Victoria Savalei [ctb], Ed Merkle [ctb], Michael Hallquist [ctb], Mijke Rhemtulla [ctb], Myrsini Katsikatsou [ctb], Mariska Barendse [ctb], Nicholas Rockwood [ctb], Florian Scharf [ctb], Han Du [ctb], Haziq Jamil [ctb] (), Franz Classe [ctb] Maintainer: Yves Rosseel Repository: CRAN Date/Publication: 2024-06-07 12:30:02 UTC