New Upstream Release - r-cran-lavasearch2

Ready changes

Summary

Merged new upstream version: 2.0.1+dfsg (was: 1.5.6+dfsg).

Diff

diff --git a/DESCRIPTION b/DESCRIPTION
index e0c7c7d..3ab7e8b 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,8 +1,8 @@
 Package: lavaSearch2
 Type: Package
 Title: Tools for Model Specification in the Latent Variable Framework
-Version: 1.5.6
-Date: 2020-07-31
+Version: 2.0.1
+Date: 2023-04-11
 Authors@R: c(
     person("Brice", "Ozenne", role = c("aut", "cre"), email = "brice.mh.ozenne@gmail.com", comment = c(ORCID = "0000-0001-9694-2956"))
 	)
@@ -14,17 +14,19 @@ Description: Tools for model specification in the latent variable framework
     adjustment for multiple comparisons when searching for local dependencies,
     and adjustment for multiple comparisons when doing inference for multiple latent variable models. 
 License: GPL-3
-VignetteBuilder: R.rsp
+Encoding: UTF-8
 Depends: R (>= 2.10), ggplot2, lava (>= 1.6.4)
-Imports: doParallel, MASS, Matrix, methods, multcomp, mvtnorm, nlme,
-        parallel, Rcpp, reshape2, sandwich, stats, utils
-Suggests: data.table, foreach, lme4, lmerTest, numDeriv, pbapply,
-        pbkrtest, R.rsp, riskRegression, survival, testthat
+Imports: abind, doParallel, MASS, Matrix, methods, multcomp, mvtnorm,
+        nlme, parallel, Rcpp, reshape2, sandwich, stats, utils
+Suggests: clubSandwich, data.table, foreach, emmeans, lme4, lmerTest,
+        numDeriv, pbapply, pbkrtest, R.rsp, riskRegression, survival,
+        testthat
 LinkingTo: Rcpp, RcppArmadillo
+VignetteBuilder: R.rsp
 NeedsCompilation: yes
-RoxygenNote: 7.1.1
-Packaged: 2020-07-31 08:52:28.416 UTC; hpl802
+RoxygenNote: 7.2.1
+Packaged: 2023-04-11 20:52:26 UTC; bozenne
 Author: Brice Ozenne [aut, cre] (<https://orcid.org/0000-0001-9694-2956>)
 Maintainer: Brice Ozenne <brice.mh.ozenne@gmail.com>
 Repository: CRAN
-Date/Publication: 2020-07-31 09:40:02 UTC
+Date/Publication: 2023-04-11 21:40:02 UTC
diff --git a/MD5 b/MD5
index 86ae328..1629b2c 100644
--- a/MD5
+++ b/MD5
@@ -1,160 +1,171 @@
-4ec30ca5a93bf9da8d4ad61bc678321b *DESCRIPTION
-05eb2f7c3592604bc998da8cbe141cd6 *NAMESPACE
-b71f61d8deb2fb61cdbb4769060128e0 *NEWS
-b4b2bdc4c2ed8604326d5dc2ace6b8b8 *R/0onload.R
+7d14dc31218c6a48bbf777912a1c3f08 *DESCRIPTION
+e09884934212a643b6505c37593112c4 *NAMESPACE
+eee1f194b196e7eea80ce88c16591b3a *NEWS
+442e86026d4186ba5f205760c5a866c7 *R/0onload.R
 1a08ee3b1374ec46feff2cf6a36c7781 *R/IntDensTri.R
-cca80b83ad7cbb1b40757cd10161163a *R/RcppExports.R
-4654e67152b594721cffd3d95efb53ad *R/Utils-formula.R
-6df2f0dbeafed4c7f381c8eb3d29a3c0 *R/Utils-nlme.R
-db3d2dc07b0c15086be121cd6640e918 *R/autoplot.calibrateType1.R
-a235ac70ada7763b651c1e02b01a157c *R/calcDistMax.R
+62d2aebb0ae0f4cbded866b92622b579 *R/Objective_gaussian_weight.R
+4e7b25cfb6a8389491ab1336299b6a62 *R/RcppExports.R
+9e9c5b8b7c75e9f2ab0ac569a09f4d39 *R/Scorrect-confint2.R
+282e2268433c626a4631b4eb49abeada *R/Scorrect-model.tables.R
+107a37bd4986486197caee1a643cf05c *R/Utils.R
+b415b00527e7f3cfe056c768b47d7b5f *R/autoplot.calibrateType1.R
 df02c37afc9cc54caebead5f93523513 *R/calcType1postSelection.R
-5a930f54f25dcb5f11737a424995f293 *R/calibrateType1.R
+273751a7faafa90fc9e684efa1db0568 *R/calibrateType1.R
 0d8934b4f87db95fb0cbf1f22e375462 *R/checkData.R
-bf41893d268dd4ac427eda971934357e *R/clean.R
+df0aa233f3ce61e48802a2e4ddb2d585 *R/clean.R
 eeae02545b11c976ce7a09702fc98a4f *R/coefByType.R
-68aeabb799e1fa39f378559820fd22f7 *R/coefType.R
-adf56135a9bbafc4699293d6bd78e0c5 *R/compare2.R
-d8438c4dae1f1e61cd9c7fe539b02a1e *R/conditionalMoment.R
-b9b7d7fda1120319a64442f5b7b80c6c *R/createContrast.R
-b4db6de4dbf3daf23d93a95d65584c97 *R/createGrid.R
+56211e9e873b6640aed5349f839edbe7 *R/coefType.R
+268ed5af10487206df5b2647e5b1a376 *R/createContrast.R
+7c5742a385f5692fd0fc8eecf4d52973 *R/createGrid.R
 613f9330102c6da00e7c5335535782ca *R/defineCategoricalLink.R
-cabd92780b6a89b5d0c3d0e2c7f8b82c *R/effects2.R
-2e751eb68ca1ffacf9325558af9ffdd6 *R/estimate2.R
-6d20ed9b309f65e27770f9efcf92d74e *R/getVarCov2.R
-eee51f6d49cbd68ea2679d310f0c5e38 *R/iid2.R
-b84806646d7b9f19d7cd61ed70659311 *R/iidJack.R
-decdced0458f711f78ffb286f0f1bf5e *R/information2.R
+50e25d4ba6078f7fed982910d6eef9f3 *R/evalInParentEnv.R
+40bb24fc16487d8396f949c7276d1d67 *R/formula.R
+20d2697edb1c8048aa89f26e036e2fa5 *R/iidJack.R
 ce896a8b8b8de1724c9dfa0f47511000 *R/initVarlink.R
-02c584372cf9faf78145bece3a533cb9 *R/lavaSearch2-package.R
-e564c1ee3d15c8032e6a8b473daf856c *R/leverage.R
+1e493fca74c3eaf8ef70f91711267c65 *R/lavaSearch2-package.R
 d0fc3aebb818684f7f488e09bc64e2dc *R/link.R
-62a306fef50e1840e4d2b9016c3ad0fd *R/matrixPower.R
-f0b847a821666ca2907c8ef5dcc46270 *R/methods-modelsearch2.R
-83a97d518a32b17232531a4f423f72eb *R/modelsearch2.R
-aded0c5063832b4c93940654c40b766b *R/multcomp.R
-0a5d39103e988534db33b4ded6caa424 *R/nobs.R
-50e25d4ba6078f7fed982910d6eef9f3 *R/package-butils-evalInParentEnv.R
-89ef4f96e15bb24fcdd967ff71c29a65 *R/package-butils-extractData.R
-59130f74d492c2058028a246e184dd1c *R/package-butils-valid.R
-2296ac2d4ec53a16237f296a07a2d0bc *R/prepareScore2.R
-c1716bab1d2c4ae34c6324dda30af272 *R/print.modelsearch2.R
-fbdff96b70bf59ee3e7557da458ea523 *R/residuals2.R
-e583809f076373deb094374c6a1ff2ba *R/sCorrect.R
-be0cf205b094b4129a1f1ca6237052ae *R/score2.R
-1cd3b01f55f50b03098946a17f4df77d *R/skeleton.R
-de11a1f7a73ab299e2b848ada84e9423 *R/summary.calibrateType1.R
-63a294f99b9d22a89b6295f14c6fcf85 *R/summary.glht2.R
-12f5204ce439c77686a4e7d1b2aa6e53 *R/summary.modelsearch2.R
-6fc5e1a26e5d4fdaf5d82bcea8e12d66 *R/summary2.R
+341246b41c5e64aa4adb1e82de1f14bb *R/matrixPower.R
+a235ac70ada7763b651c1e02b01a157c *R/modelsearch2-calcDistMax.R
+f0b847a821666ca2907c8ef5dcc46270 *R/modelsearch2-methods.R
+c1716bab1d2c4ae34c6324dda30af272 *R/modelsearch2-print.R
+9a2ba004d3a160d974d812a7551414b5 *R/modelsearch2-summary.R
+77a6ed2ded4eff72f97e124a0ffbc62e *R/modelsearch2.R
+012eed8c97a9a8a7e7e2a7b2117ee50b *R/nobs.R
+f426b547049b839f33640c1e228566f0 *R/p.adjust2.R
+1fbcdb278b161dad374af5c68aba81cd *R/sCorrect-coef2.R
+6d853ddae4afb6db059f9e59f5300f7a *R/sCorrect-compare2.R
+517675088a45f611cc310be039ec98cd *R/sCorrect-dInformation2.R
+65dc97489d924fe4629f275499f45a07 *R/sCorrect-effects2.R
+69504df917593e39143e04c7df1be695 *R/sCorrect-estimate2.R
+0aa24296c11b7e30ad7fa0a7593a0a9b *R/sCorrect-extractData.R
+c5d3e58cd9cc866ebf67618f58e03189 *R/sCorrect-getGroups2.R
+093b9bfe04ef9fabe073914367319414 *R/sCorrect-getIndexOmega.R
+7845f45f74117cc9e41c028c44e2907c *R/sCorrect-getVarCov2.R
+14f266d3bedad8398b89aba604f84ac0 *R/sCorrect-glht2.R
+a164001954dde5789a281ffed081e486 *R/sCorrect-hessian2.R
+7ba9452354dece4b2e33e8ebf85fd601 *R/sCorrect-iid2.R
+0b22cf4c5f89596b5b921e581fa819b7 *R/sCorrect-information2.R
+ca4ec98bd1a9db66336026ebbb2c2c81 *R/sCorrect-leverage2.R
+bdded04c3cfe57272600607041ad8b36 *R/sCorrect-moments2.R
+169b89ff7976821bb6f06a6a90e14c19 *R/sCorrect-nobs2.R
+4579d3b2d09c5a91e41e32573930f1e5 *R/sCorrect-residuals2.R
+bcf1ae120838bbe7fcf83f8279af4eb5 *R/sCorrect-score2.R
+e81364891a15f48beaa1c76e1ac9994f *R/sCorrect-skeleton.R
+544e135c3a31eceaf7df11894bb4959f *R/sCorrect-sscCoxSnell.R
+9c22cd1f17131fffe5b356e2c927daae *R/sCorrect-sscResiduals.R
+c0610d69dc65259d5d97f8d5eb313e3c *R/sCorrect-summary.glht2.R
+f21086e9a7f8906741413b105e2febc8 *R/sCorrect-summary2.R
+c74cb534ff710da1f81199024cfd3255 *R/sCorrect-updateMoment.R
+fb0043903a035b91f1259a0e5cc4918a *R/sCorrect-vcov2.R
+9825925c814db322f03317e136b2eb40 *R/sCorrect.R
+d404bbb7034d9c507038351a993eb4f3 *R/sampleRepeated.R
+e028a12d82b3b7e86d3c09b1f0edafb7 *R/summary.calibrateType1.R
 4a885f5f6611d78d0d948d6dd61125d7 *R/symmetrize.R
+564371159a3045bacfb06d6a0ded00db *R/transformSummaryTable.R
 df900b6e59caaae505176cee968a6bbd *R/tryWithWarnings.R
 9b0e5d0986041f548b11e88bb7eab4e2 *R/var2dummy.R
-1faa0f102f8fffbab0984daa401f4662 *R/vcov2.R
-6593e3c576aa045a80764fb38f2b1519 *build/vignette.rds
-0b863b0cddf917bbb55d8b3afb6738ee *inst/doc/overview.ltx
-e2e9f1c6d693a5c5fb59bbafdbe6038f *inst/doc/overview.pdf
-87bb53780be2f7b0cf353e74002a8a28 *inst/implementationScheme.png
-88e508dd881ceb039b031bab6ff52756 *inst/implementationScheme.svg
-8e6a8e1f93949398e3267aba6a1ed587 *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.aux
-9f23f012040f50eca77d02ee3919afcd *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fdb_latexmk
-95cb9ab919fb55e57819b9577565fc9f *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fls
-a5870b27bbb2febe791dccfa939588b5 *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.log
-466b1b8184ee605528fd19cc6a4e5cce *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.org
-a8f3224f7456b3594eaf827253e8b27a *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.out
-de8aa271320183aae27ef3069e760287 *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.pdf
-d18983eac333031a0c35dd71d70f1bb5 *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.soc
-5e6b675c70d0de47ca5803dd72116ade *inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.tex
+9800477854324645fb18dc90d0f6b45e *build/vignette.rds
+511671e5cd2a1b3197b880f1b234c001 *inst/doc-software/changes.sty
+f1098d3352863114e8137abf0f372214 *inst/doc-software/modelsearch.png
+193418ffc5dcb2526e86823141862388 *inst/doc-software/overview-compress.pdf
+cc42d871d65396771ab21ecee4028979 *inst/doc-software/overview.aux
+b132435bbd21cd60e169ac71b87c2e0e *inst/doc-software/overview.loc
+32b4b04cb355b91c9c56f891bb3e679f *inst/doc-software/overview.log
+29fde365363f89a1c0877d8dc3ffe367 *inst/doc-software/overview.org
+f2793dafe26263a9f3e6e12f0ea4be90 *inst/doc-software/overview.pdf
+1375ff904aab693d7db34e285a005658 *inst/doc-software/overview.tex
+0363435545d167065760c9f8a091ae55 *inst/doc/overview.pdf
+b94bba6fc384407b194d25b2cbdba660 *inst/doc/overview.pdf.asis
 e1a621e4f9b900d4d3987b9bdeaa0143 *man/addLink.Rd
 4d71c04f02e9b02da0a4794ea9999587 *man/autoplot.intDensTri.Rd
-926c104ef5679915a46b59e49c4e4a38 *man/autoplot.modelsearch2.Rd
+204180d782dea32b6cbe9fcc89552ce8 *man/autoplot.modelsearch2.Rd
 adaa97aed0972a2433f3a048f85251c6 *man/autoplot_calibrateType1.Rd
-a8be22b97dcaf2a7f84261a8f2f73d7a *man/calcDistMax.Rd
+0d91ba5a35e70d3bed0a72c4d2673d14 *man/calcDistMax.Rd
 a65b394bafe315d68526f7c8f6eb8775 *man/calcType1postSelection.Rd
-f673cf60406ed8c8156f6edbeb6c1889 *man/calibrateType1.Rd
+242e54290dc50691e3d6ab9127aa19d6 *man/calibrateType1.Rd
 993720a25e51b102eb13fee79cedf27a *man/checkData.Rd
 87894707bdadd14525d8d588ee360f7a *man/clean.Rd
-6b5d63ce8e73e4c8c82c3963136f6e4a *man/coef2-internal.Rd
+7d498945f39e0f4dc061746356aba8ea *man/coef2.Rd
 195499d494d4bc244c82c196ac00ffeb *man/coefByType.Rd
 2c1e3170908197c6de4470f193118aa7 *man/coefType.Rd
-a5449cc4d3cd96a6f69e3d8e3db30417 *man/combination.Rd
-925eced7a95a93042e21d39f02812735 *man/combineFormula.Rd
-b06f9ce704d45458fb8ec1f2865400a8 *man/compare2.Rd
-433def6084e5a94384fc09d45cdc4280 *man/conditionalMoment.Rd
-87b99338cce5b4a7b6b78aee8fe21e53 *man/contrast2name.Rd
-ba25c674858e4df1c766440be715aacb *man/convFormulaCharacter.Rd
-b572bdd70a877fcce4cdee9aea380802 *man/createContrast.Rd
+f55b3276f44f6547c21a31da2ead39ae *man/combination.Rd
+80d52522f4cbabdad7b369ab0757c889 *man/combineFormula.Rd
+b1f644435e15165dcb493f6538f6bb06 *man/compare2.Rd
+7e4aaf315d817bf2d83032da5e997693 *man/confint2.Rd
+a2e9f9788db847e08f9161c352883291 *man/contrast2name.Rd
+6ebaeda74be7487f15dba5fc5b4aee68 *man/convFormulaCharacter.Rd
+b1b3e17a71052d8fc1227c210d5f955e *man/createContrast.Rd
 584057dc46b0aa6f0093bc22a15883d5 *man/createGrid.Rd
-18df5af5b70e00998a2746a90925d9ae *man/dInformation2-internal.Rd
 68001777b8d330bccf168e80eea94b5e *man/defineCategoricalLink.Rd
-9f4e01cd8c8ff671396e0f08e58fe0ee *man/dfSigma.Rd
-f0faef22ef62042e2d50930df43e37d1 *man/dfSigmaRobust.Rd
-46e4163c917d960eb90af77386e5f56f *man/effects2.Rd
-858c255783f708a17654f1d1e9dfa233 *man/estfun.Rd
-1671525b22947a5c110b41c12bb0e150 *man/estimate2.Rd
-81b20d5f800b0f8f6e62f041f6bc3414 *man/evalInParentEnv.Rd
-12128fb99c1bfad37dc83ca2d55ccdc5 *man/extractData.Rd
+361dfaa321c9090bf429a6f0f1703ce8 *man/dfSigma.Rd
+311cdaae69c6ccf58e5aa86e29519ecd *man/dot-dinformation2-internal.Rd
+88a3a3831088a0cceef7cbbf05bfba42 *man/effects2.Rd
+8d1ae73a7d3f13a21fe2ffed72847839 *man/estimate2.Rd
+a4b2e39859895839c09ceb2053296e14 *man/evalInParentEnv.Rd
+e5d7dd4fbc454aaa3012ee3accb5c35b *man/extractData.Rd
 aca5b5ecb2ce2a4826fccf4e13154ab8 *man/findNewLink.Rd
-4d4bcb3aeb80fc50c4a93c96771bc039 *man/getCluster2-internal.Rd
-f021e286737990a3b22ee16d954c3725 *man/getIndexOmega2-internal.Rd
-4a0503eccac1094e44bf2ede74577602 *man/getNewLink.Rd
-d25e2810e7bba8b36dad2cf98f482445 *man/getNewModel.Rd
-ded9287daecaef037131c4a32195a771 *man/getStep.Rd
-ea5083ec1d0c9a579bc03bab4754d76e *man/getVarCov2-internal.Rd
-740b29da6b0383cf263d900028bda71f *man/getVarCov2.Rd
-999d97e9ad56222a4780b526384b4a19 *man/glht2.Rd
-fe721418830e99910eb99107750d3a4e *man/iid2.Rd
-dc406baa5b234b018865c6c437a33713 *man/iidJack.Rd
-ab37acf12027985b3447d9bf5c1650e6 *man/information2-internal.Rd
-1498c9b578df72f633765967cace179d *man/information2.Rd
+51af5d5d19cf6b04bc498c940f08b2db *man/gaussian_weight.Rd
+33fb294df00deab517e575bbe148949f *man/getIndexOmega.Rd
+fc5fd63f61806aec857e073fb6253d6f *man/getNewLink.Rd
+363011f43e85b3d4bbb08927c9d3153a *man/getNewModel.Rd
+1de25550f9e4d9e3e9d58c222cfbbfc7 *man/getStep.Rd
+496bdb5d3f2db38dd7e332a6b83de734 *man/getVarCov2.Rd
+704a7ea66818eacb77e5e0ecff66ea28 *man/glht2.Rd
+10b81104fd06d0e3cd34c469703513e5 *man/hessian2-internal.Rd
+c8ff89dd52dcb2e3c7438a567738710c *man/hessian2.Rd
+1213e8393f5f114fcfc3725d3fa9079d *man/iid2.Rd
+09238b2a1e2990dd193f05511747f929 *man/iid2plot.Rd
+5fe8098fa0092ab538c67a55ea446dc5 *man/iidJack.Rd
+3ec31c47972ef4f73ff2cae2c4280e7a *man/information2-internal.Rd
+3bf10dfcc1a57a6857a4beef9e7dcdc9 *man/information2.Rd
 0c1a4ef5e12efe2ef02b1e9659f412ff *man/initVarLink.Rd
 032033fc7ae9699eff796e4479b4a9a4 *man/intDensTri.Rd
 0b58ba2e89ed0ed5dcbe20b303112aa9 *man/lavaSearch2.Rd
-3bef2439668c4514dbaa442abb495223 *man/leverage2.Rd
+77fc047ef8d0f905598c7956b53778ac *man/leverage2.Rd
 72e688cc7c998aa3ce9475a904d27e0f *man/matrixPower.Rd
 e914502c28e9417e5df65953334654de *man/modelsearch2.Rd
-551d92b99d1238fe0ca17b548958ad8b *man/nStep.Rd
-5b7f9c260f372d18c60d9a922aa51cae *man/residuals2.Rd
-5ed9bbd39b7ff8556fc88d8f265ad1a8 *man/sCorrect.Rd
-867d3732038732b183641e5374ca33f8 *man/score2-internal.Rd
-0b517b4d0ab88d885a0a694a8f8080e0 *man/score2.Rd
-0c4916a99853415b20c9c75e5641979c *man/selectRegressor.Rd
-d0b7b9c1a1900bd454de0a19d036eea5 *man/selectResponse.Rd
+6c51570262d5c196deb74e3b4fd9e6ed *man/moments2.Rd
+4bc3707f3006e38879d698f85e0a7e8c *man/nStep.Rd
+84b8f48162a9c5058f4b6bc4ada90b09 *man/nobs2.Rd
+73fd6823d7f23d6294c7a60addf86485 *man/residuals2.Rd
+6324d57eef96ce12e9f5e1a708f0596c *man/sCorrect.Rd
+5548d9b4db51341c21e5495073701f0a *man/sampleRepeated.Rd
+b5f4071acad0f40871cbf9c740ec21d2 *man/score2-internal.Rd
+3e7a0274d9d1680b3e77c2ad6370b9df *man/score2.Rd
+c4bd005460aae6dd877f28dfd1b78a12 *man/selectRegressor.Rd
+b2477d71539a1ca1b2aab9237a93f752 *man/selectResponse.Rd
 6c4b33518f34c256d6302cba9b575ed1 *man/setLink.Rd
-76fa65a0187f43c254afd0a49d0f2f23 *man/skeleton.Rd
+97917a090e4367b5f368b04e46356ff2 *man/skeleton.Rd
 00530cfc346697e55168366192d45ddb *man/summary.calibrateType1.Rd
-34c1447017ce2aef7e16ac5300264979 *man/summary.modelsearch2.Rd
-f2b2e4ba9db2f0b2ffeee03ac92001b8 *man/summary2.Rd
+577372ece37234fbd44b205d614ea007 *man/summary.glht2.Rd
+c8e6a25d66227233aad69786934b20f0 *man/summary.modelsearch2.Rd
+01793c1bd68031c9d1eaadd69b767da1 *man/summary2.Rd
 dd87ed008594087b91b3881a90bc9294 *man/symmetrize.Rd
+283876ce32c152e6558bf3da1e5b4c1e *man/transformSummaryTable.Rd
 96a591c3d5dd914a45aa1feec3d78c72 *man/tryWithWarnings.Rd
-182aa13532c2fdbe435525cfc5eb2853 *man/validFCTs.Rd
 2d6372fa0c0ece83359671946ba47fbc *man/var2dummy.Rd
-f2cbbaba66ed6909e3d516d1030748d8 *man/vcov2.Rd
+c446fe5d2ef80cbd33cd77bd3e6444f2 *man/vcov2-internal.Rd
+d4d2241e817f3259fa27ed5b1d091d79 *man/vcov2.Rd
 bcc42aaeb0a69ec1b279b42021751d66 *src/Makevars
 a0089cc1021ed29c492562045c3dadef *src/Makevars.win
 0d3c1c0419e3746bb74707d6661721b8 *src/OLS.cpp
-323ac6ef78cd27904ab9b00073077746 *src/RcppExports.cpp
+7f8d770c75cdea022044864cca32bfca *src/RcppExports.cpp
 925c75ef71403022d44faad79902e9c5 *src/wildBoot.cpp
-0c76bddea3a81607e2c6fd01642be6aa *tests/test-all.R
-297d9086e3b7ef4d0b276d77de539070 *tests/testthat/test-coefType.R
-1ec9bfcc3dfe2067b93a6d00dac0a721 *tests/testthat/test-initVar.R
-16a2d57c0693b5cd42ccf82b06d0da08 *tests/testthat/test-matrixPower.R
-dddc1e132dbe35b87d38e14b32c5aec1 *tests/testthat/test1-Utils-nlme.R
-3ae59505449a0bd316e195bc4b1bcb8f *tests/testthat/test1-adjustMoment.R
-f8427588732cfe11b5e9b61615471aaf *tests/testthat/test1-cluster-lava.R
-717bcc165f44fa51e8533eec22421db3 *tests/testthat/test1-compare2.R
-74c189964566aa66f3ea6db619766b16 *tests/testthat/test1-sCorrect-adjustedResiduals.R
-ac4d5b1e64390fc05c515eb21e9dd25e *tests/testthat/test1-sCorrect-dVcov.R
-d5d62769ddb640a7932013e1337bdf78 *tests/testthat/test1-sCorrect-lava.R
-3a2d1a388072ca59df405ec7176bd7b1 *tests/testthat/test1-sCorrect-missingValues.R
-fabaebb0a756360253818c8ee38562eb *tests/testthat/test1-sCorrect-smallSampleCorrection.R
-863df9b9d6e9e7aa41f6c4f5165e1cb7 *tests/testthat/test1-sCorrect-summary2.R
-4fa9200b650485906803c52434a2d079 *tests/testthat/test1-sCorrect-validObjects.R
-c53e550313fc8162e359a9613cee00a8 *tests/testthat/test2-IntDensTri.R
+d6fe67fa1fa787f065daf1ff5c3b19cb *tests/test-all.R
+110c08977e52f563de35c1c23c9160e3 *tests/testthat/test-coefType.R
+66a3299f37e09b799f1939cbee0e4c6a *tests/testthat/test-initVar.R
+da1d9ab7c930438b1f023d4781f5c92c *tests/testthat/test-matrixPower.R
+46a9f1abe9a3f23d8d0c7aae981719e2 *tests/testthat/test-previousBug.R
+8fb1b571ac19416685f026d8168757d5 *tests/testthat/test1-sCorrect-effects2.R
+15df696ee1193f92bc51df251bced529 *tests/testthat/test1-sCorrect-missingValues.R
+ecd8cec6ed6e854598321b33604afe4a *tests/testthat/test1a-sCorrect-validObjects.R
+d55663c4ec3468e756cb54e78fa2fd80 *tests/testthat/test1b-sCorrect-conditionalMoment.R
+2f8a29ec91798e20ddcbc79a6cb9e4ff *tests/testthat/test1c-sCorrect-ssc.R
+03db6dd42a0aa24ed5ade5a2b311b0fd *tests/testthat/test1d-sCorrect-compare2.R
+c91f1b55aa29fd37a259dffbc3180182 *tests/testthat/test1d-sCorrect-residuals2.R
+e0b2123c50d381cc859abafd8622c2b7 *tests/testthat/test1d-sCorrect-summary2.R
+8a99363d2e00fd0a7fcd921bc1474deb *tests/testthat/test2-IntDensTri.R
 a6df2cf0c7f4a71ce2d07c42e25dacce *tests/testthat/test2-calcType1postSelection.R
-e682b5e8a5083dc5c25a8d9dac2d2089 *tests/testthat/test2-modelsearch2.R
-be71d8b34e357cd8eb19684fca86dbd6 *tests/testthat/test3-multcomp.R
-7c2fb02abe0bfee668535b768d2890c5 *vignettes/changes.sty
-f1098d3352863114e8137abf0f372214 *vignettes/modelsearch.png
-0b863b0cddf917bbb55d8b3afb6738ee *vignettes/overview.ltx
-1d0b7e12321bd088868c9583b6464d53 *vignettes/overview.org
+7ea222da4c96cd4ae4aeb74c68c6ccf0 *tests/testthat/test2-modelsearch2.R
+e67829df9114066f8424046c6a222835 *tests/testthat/test3-multcomp.R
+b94bba6fc384407b194d25b2cbdba660 *vignettes/overview.pdf.asis
diff --git a/NAMESPACE b/NAMESPACE
index a116ab7..0f25f88 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,252 +1,215 @@
-# Generated by roxygen2: do not edit by hand
-
-S3method("sCorrect<-",gls)
-S3method("sCorrect<-",gls2)
-S3method("sCorrect<-",lm)
-S3method("sCorrect<-",lm2)
-S3method("sCorrect<-",lme)
-S3method("sCorrect<-",lme2)
-S3method("sCorrect<-",lvmfit)
-S3method("sCorrect<-",lvmfit2)
-S3method(addLink,lvm)
-S3method(autoplot,calibrateType1)
-S3method(autoplot,intDensTri)
-S3method(autoplot,modelsearch2)
-S3method(calibrateType1,lvm)
-S3method(calibrateType1,lvmfit)
-S3method(checkData,lvm)
-S3method(clean,lvm)
-S3method(coefCov,lvm)
-S3method(coefCov,lvmfit)
-S3method(coefCov,multigroup)
-S3method(coefExtra,lvm)
-S3method(coefExtra,lvmfit)
-S3method(coefExtra,multigroup)
-S3method(coefIndexModel,lvm)
-S3method(coefIndexModel,lvmfit)
-S3method(coefIndexModel,multigroup)
-S3method(coefIndexModel,multigroupfit)
-S3method(coefIntercept,lvm)
-S3method(coefIntercept,lvmfit)
-S3method(coefIntercept,multigroup)
-S3method(coefRef,lvmfit)
-S3method(coefReg,lvm)
-S3method(coefReg,lvmfit)
-S3method(coefReg,multigroup)
-S3method(coefType,lvm)
-S3method(coefType,lvmfit)
-S3method(coefType,multigroup)
-S3method(coefVar,lvm)
-S3method(coefVar,lvmfit)
-S3method(coefVar,multigroup)
-S3method(compare2,gls)
-S3method(compare2,gls2)
-S3method(compare2,lm)
-S3method(compare2,lm2)
-S3method(compare2,lme)
-S3method(compare2,lme2)
-S3method(compare2,lvmfit)
-S3method(compare2,lvmfit2)
-S3method(conditionalMoment,gls)
-S3method(conditionalMoment,lm)
-S3method(conditionalMoment,lme)
-S3method(conditionalMoment,lvm)
-S3method(conditionalMoment,lvmfit)
-S3method(createContrast,character)
-S3method(createContrast,gls)
-S3method(createContrast,list)
-S3method(createContrast,lm)
-S3method(createContrast,lme)
-S3method(createContrast,lvmfit)
-S3method(createContrast,mmm)
-S3method(effects2,lvmfit)
-S3method(effects2,lvmfit2)
-S3method(estfun,gls)
-S3method(estfun,lme)
-S3method(estfun,lvmfit)
-S3method(extractData,coxph)
-S3method(extractData,cph)
-S3method(extractData,gls)
-S3method(extractData,lm)
-S3method(extractData,lme)
-S3method(extractData,lvmfit)
-S3method(findNewLink,lvm)
-S3method(getNewLink,modelsearch2)
-S3method(getNewModel,modelsearch2)
-S3method(getStep,modelsearch2)
-S3method(getVarCov2,gls)
-S3method(getVarCov2,lme)
-S3method(getVarCov2,lvmfit)
-S3method(glht2,lvmfit)
-S3method(glht2,mmm)
-S3method(iid2,gls)
-S3method(iid2,gls2)
-S3method(iid2,lm)
-S3method(iid2,lm2)
-S3method(iid2,lme)
-S3method(iid2,lme2)
-S3method(iid2,lvmfit)
-S3method(iid2,lvmfit2)
-S3method(iidJack,default)
-S3method(information2,gls)
-S3method(information2,gls2)
-S3method(information2,lm)
-S3method(information2,lm2)
-S3method(information2,lme)
-S3method(information2,lme2)
-S3method(information2,lvmfit)
-S3method(information2,lvmfit2)
-S3method(leverage2,gls)
-S3method(leverage2,gls2)
-S3method(leverage2,lm)
-S3method(leverage2,lm2)
-S3method(leverage2,lme)
-S3method(leverage2,lme2)
-S3method(leverage2,lvmfit)
-S3method(leverage2,lvmfit2)
-S3method(modelsearch2,lvmfit)
-S3method(nStep,modelsearch2)
-S3method(print,intDensTri)
-S3method(print,modelsearch2)
-S3method(residuals2,gls2)
-S3method(residuals2,lm2)
-S3method(residuals2,lme2)
-S3method(residuals2,lvmfit2)
-S3method(sCorrect,gls)
-S3method(sCorrect,gls2)
-S3method(sCorrect,lm)
-S3method(sCorrect,lm2)
-S3method(sCorrect,lme)
-S3method(sCorrect,lme2)
-S3method(sCorrect,lvmfit)
-S3method(sCorrect,lvmfit2)
-S3method(score2,gls)
-S3method(score2,gls2)
-S3method(score2,lm)
-S3method(score2,lm2)
-S3method(score2,lme)
-S3method(score2,lme2)
-S3method(score2,lvmfit)
-S3method(score2,lvmfit2)
-S3method(setLink,lvm)
-S3method(summary,calibrateType1)
-S3method(summary,modelsearch2)
-S3method(summary2,gls)
-S3method(summary2,gls2)
-S3method(summary2,lm)
-S3method(summary2,lm2)
-S3method(summary2,lme)
-S3method(summary2,lme2)
-S3method(summary2,lvmfit)
-S3method(summary2,lvmfit2)
-S3method(var2dummy,list)
-S3method(var2dummy,lvm)
-S3method(vcov2,gls)
-S3method(vcov2,gls2)
-S3method(vcov2,lm)
-S3method(vcov2,lm2)
-S3method(vcov2,lme)
-S3method(vcov2,lme2)
-S3method(vcov2,lvmfit)
-S3method(vcov2,lvmfit2)
-export("sCorrect<-")
-export(addLink)
-export(calcDistMaxBootstrap)
-export(calcDistMaxIntegral)
-export(calcType1postSelection)
-export(calibrateType1)
-export(checkData)
-export(clean)
-export(coefCov)
-export(coefExtra)
-export(coefIndexModel)
-export(coefIntercept)
-export(coefRef)
-export(coefReg)
-export(coefType)
-export(coefVar)
-export(combineFormula)
-export(compare2)
-export(conditionalMoment)
-export(createContrast)
-export(effects2)
-export(extractData)
-export(findNewLink)
-export(formula2character)
-export(getNewLink)
-export(getNewModel)
-export(getStep)
-export(getVarCov2)
-export(glht2)
-export(iid2)
-export(iidJack)
-export(information2)
-export(initVarLink)
-export(initVarLinks)
-export(intDensTri)
-export(leverage2)
-export(matrixPower)
-export(modelsearch2)
-export(nStep)
-export(residuals2)
-export(sCorrect)
-export(score2)
-export(setLink)
-export(summary2)
-export(tryWithWarnings)
-export(vcov2)
-import(Rcpp)
-import(lava)
-importFrom(MASS,mvrnorm)
-importFrom(Matrix,bdiag)
-importFrom(ggplot2,aes_string)
-importFrom(ggplot2,autoplot)
-importFrom(graphics,par)
-importFrom(graphics,plot)
-importFrom(graphics,text)
-importFrom(methods,is)
-importFrom(multcomp,glht)
-importFrom(mvtnorm,pmvnorm)
-importFrom(mvtnorm,pmvt)
-importFrom(mvtnorm,qmvnorm)
-importFrom(mvtnorm,qmvt)
-importFrom(mvtnorm,rmvnorm)
-importFrom(parallel,detectCores)
-importFrom(parallel,makeCluster)
-importFrom(parallel,stopCluster)
-importFrom(reshape2,melt)
-importFrom(sandwich,estfun)
-importFrom(stats,anova)
-importFrom(stats,as.formula)
-importFrom(stats,coef)
-importFrom(stats,cov)
-importFrom(stats,df.residual)
-importFrom(stats,dist)
-importFrom(stats,formula)
-importFrom(stats,hclust)
-importFrom(stats,logLik)
-importFrom(stats,median)
-importFrom(stats,model.frame)
-importFrom(stats,model.matrix)
-importFrom(stats,na.omit)
-importFrom(stats,optim)
-importFrom(stats,p.adjust)
-importFrom(stats,pf)
-importFrom(stats,pnorm)
-importFrom(stats,predict)
-importFrom(stats,pt)
-importFrom(stats,qqnorm)
-importFrom(stats,quantile)
-importFrom(stats,residuals)
-importFrom(stats,rnorm)
-importFrom(stats,sd)
-importFrom(stats,setNames)
-importFrom(stats,sigma)
-importFrom(stats,update)
-importFrom(stats,vcov)
-importFrom(utils,methods)
-importFrom(utils,packageVersion)
-importFrom(utils,setTxtProgressBar)
-importFrom(utils,tail)
-importFrom(utils,txtProgressBar)
-useDynLib(lavaSearch2, .registration=TRUE)
+# Generated by roxygen2: do not edit by hand
+
+S3method("sCorrect<-",default)
+S3method(addLink,lvm)
+S3method(autoplot,calibrateType1)
+S3method(autoplot,intDensTri)
+S3method(autoplot,modelsearch2)
+S3method(calibrateType1,lvm)
+S3method(calibrateType1,lvmfit)
+S3method(checkData,lvm)
+S3method(clean,lvm)
+S3method(coef,lvmfit2)
+S3method(coef2,lvmfit)
+S3method(coef2,lvmfit2)
+S3method(coefCov,lvm)
+S3method(coefCov,lvmfit)
+S3method(coefCov,multigroup)
+S3method(coefExtra,lvm)
+S3method(coefExtra,lvmfit)
+S3method(coefExtra,multigroup)
+S3method(coefIndexModel,lvm)
+S3method(coefIndexModel,lvmfit)
+S3method(coefIndexModel,multigroup)
+S3method(coefIndexModel,multigroupfit)
+S3method(coefIntercept,lvm)
+S3method(coefIntercept,lvmfit)
+S3method(coefIntercept,multigroup)
+S3method(coefRef,lvmfit)
+S3method(coefReg,lvm)
+S3method(coefReg,lvmfit)
+S3method(coefReg,multigroup)
+S3method(coefType,lvm)
+S3method(coefType,lvmfit)
+S3method(coefType,multigroup)
+S3method(coefVar,lvm)
+S3method(coefVar,lvmfit)
+S3method(coefVar,multigroup)
+S3method(compare,lvmfit2)
+S3method(compare2,lvmfit)
+S3method(compare2,lvmfit2)
+S3method(confint,lvmfit2)
+S3method(confint2,lvmfit)
+S3method(confint2,lvmfit2)
+S3method(createContrast,character)
+S3method(createContrast,list)
+S3method(createContrast,lvmfit)
+S3method(createContrast,lvmfit2)
+S3method(createContrast,mmm)
+S3method(effects,lvmfit2)
+S3method(effects2,lvmfit)
+S3method(effects2,lvmfit2)
+S3method(estimate2,list)
+S3method(estimate2,lvm)
+S3method(estimate2,lvmfit)
+S3method(estimate2,mmm)
+S3method(extractData,lvmfit)
+S3method(findNewLink,lvm)
+S3method(getNewLink,modelsearch2)
+S3method(getNewModel,modelsearch2)
+S3method(getStep,modelsearch2)
+S3method(getVarCov2,lvmfit)
+S3method(getVarCov2,lvmfit2)
+S3method(glht,lvmfit2)
+S3method(glht2,lvmfit)
+S3method(glht2,lvmfit2)
+S3method(glht2,mmm)
+S3method(hessian2,lvmfit)
+S3method(hessian2,lvmfit2)
+S3method(iid,lvmfit2)
+S3method(iid2,lvmfit)
+S3method(iid2,lvmfit2)
+S3method(iidJack,default)
+S3method(information,lvmfit2)
+S3method(information2,lvmfit)
+S3method(information2,lvmfit2)
+S3method(leverage2,lvmfit)
+S3method(leverage2,lvmfit2)
+S3method(model.tables,lvmfit)
+S3method(model.tables,lvmfit2)
+S3method(model.tables2,lvmfit)
+S3method(model.tables2,lvmfit2)
+S3method(modelsearch2,lvmfit)
+S3method(moments2,lvm)
+S3method(moments2,lvmfit)
+S3method(nStep,modelsearch2)
+S3method(nobs2,lvmfit)
+S3method(nobs2,lvmfit2)
+S3method(print,intDensTri)
+S3method(print,modelsearch2)
+S3method(print,summary.glht2)
+S3method(residuals,lvmfit2)
+S3method(residuals2,lvmfit)
+S3method(residuals2,lvmfit2)
+S3method(sCorrect,default)
+S3method(score,lvmfit2)
+S3method(score2,lvmfit)
+S3method(score2,lvmfit2)
+S3method(setLink,lvm)
+S3method(summary,calibrateType1)
+S3method(summary,glht2)
+S3method(summary,lvmfit2)
+S3method(summary,modelsearch2)
+S3method(summary2,lvmfit)
+S3method(summary2,lvmfit2)
+S3method(var2dummy,list)
+S3method(var2dummy,lvm)
+S3method(vcov,lvmfit2)
+S3method(vcov2,lvmfit)
+S3method(vcov2,lvmfit2)
+export("sCorrect<-")
+export(addLink)
+export(calcDistMaxBootstrap)
+export(calcDistMaxIntegral)
+export(calcType1postSelection)
+export(calibrateType1)
+export(checkData)
+export(clean)
+export(coef2)
+export(coefCov)
+export(coefExtra)
+export(coefIndexModel)
+export(coefIntercept)
+export(coefRef)
+export(coefReg)
+export(coefType)
+export(coefVar)
+export(combineFormula)
+export(compare2)
+export(confint2)
+export(createContrast)
+export(effects2)
+export(estimate2)
+export(extractData)
+export(findNewLink)
+export(formula2character)
+export(gaussian_weight.estimate.hook)
+export(gaussian_weight_gradient.lvm)
+export(gaussian_weight_hessian.lvm)
+export(gaussian_weight_logLik.lvm)
+export(gaussian_weight_method.lvm)
+export(gaussian_weight_objective.lvm)
+export(gaussian_weight_score.lvm)
+export(getNewLink)
+export(getNewModel)
+export(getStep)
+export(getVarCov2)
+export(glht2)
+export(hessian2)
+export(iid2)
+export(iid2plot)
+export(iidJack)
+export(information2)
+export(initVarLink)
+export(initVarLinks)
+export(intDensTri)
+export(leverage2)
+export(matrixPower)
+export(model.tables2)
+export(modelsearch2)
+export(moments2)
+export(nStep)
+export(nobs2)
+export(residuals2)
+export(sCorrect)
+export(sampleRepeated)
+export(score2)
+export(setLink)
+export(skeleton)
+export(summary2)
+export(transformSummaryTable)
+export(tryWithWarnings)
+export(vcov2)
+import(Rcpp)
+import(lava)
+importFrom(MASS,mvrnorm)
+importFrom(Matrix,bdiag)
+importFrom(ggplot2,aes_string)
+importFrom(ggplot2,autoplot)
+importFrom(graphics,par)
+importFrom(graphics,plot)
+importFrom(graphics,text)
+importFrom(methods,is)
+importFrom(multcomp,glht)
+importFrom(mvtnorm,pmvnorm)
+importFrom(mvtnorm,pmvt)
+importFrom(mvtnorm,qmvnorm)
+importFrom(mvtnorm,qmvt)
+importFrom(mvtnorm,rmvnorm)
+importFrom(parallel,detectCores)
+importFrom(parallel,makeCluster)
+importFrom(parallel,stopCluster)
+importFrom(reshape2,melt)
+importFrom(sandwich,estfun)
+importFrom(stats,anova)
+importFrom(stats,coef)
+importFrom(stats,confint)
+importFrom(stats,cov)
+importFrom(stats,effects)
+importFrom(stats,formula)
+importFrom(stats,logLik)
+importFrom(stats,model.frame)
+importFrom(stats,model.matrix)
+importFrom(stats,model.tables)
+importFrom(stats,predict)
+importFrom(stats,qqnorm)
+importFrom(stats,quantile)
+importFrom(stats,residuals)
+importFrom(stats,update)
+importFrom(stats,vcov)
+importFrom(utils,methods)
+importFrom(utils,packageVersion)
+importFrom(utils,setTxtProgressBar)
+importFrom(utils,tail)
+importFrom(utils,txtProgressBar)
+useDynLib(lavaSearch2, .registration=TRUE)
diff --git a/NEWS b/NEWS
index abb9a4f..ad95504 100644
--- a/NEWS
+++ b/NEWS
@@ -1,83 +1,83 @@
-# -*- mode: org -*-
-
-* Version 1.5
-** User visible change
-- add option method.p.adjust="gof" to modelsearch2.
-
-** Internal change
-- degrees of freedom for the robust standard error are now computed
-  using a modified Satterthwaite approximation.
-- sCorrect compute the hessian.
-
-* Version 1.4 <2018-09-21 Fri>
-** User visible change
-- modelsearch2 is now only based on the score statistic.
-
-* Version 1.3.5 <2018-05-01 Tue>
-** User visible change
-- add arguments df, cluster to glht2
-- argument display is now named print in summary
-
-** Internal change
-- remove dependency on tcltk for displaying progress bar with parallel computation.
-- prepare for update of ggplot 2.3.0 
-
-* Version 1.3.4 <2018-04-23 Mon>
-** User visble changes 
-- =calibrateType1= is now a method and support parallel computation
-- =summary2=, =compare2=, and =iid2= for =lvmfit= object can compute
-  cluster robust standard error.
-
-* Version 1.3.3
-** User visble changes 
-- add option Ftest to =calibrateType1=.
-
-* Version 1.3.2
-** Internal changes
-- fix bugs
-
-* Version 1.3.1
-** User visble changes 
-- New feature: =calibrateType1= to perform simulation studies of the
-  type 1 error rate for Wald test in LVM.
-
-* Version 1.3.0 <2018-04-04 Wed>
-** User visble changes 
-- =summary2= can be called directly.
-
-** Internal changes
-- identification of the small sample bias of the variance-covariance
-  parameters. This enable to correct the derivatives and not only the
-  estimation of the conditional variance.
-- =sCorrect= is able to handle missing values
-- reorganisation of the structure of the functions in =sCorrect=.
-  Only =sCorrect= do real computations and all the results are stored
-  in the object (slot sCorrect). The other functions (=summary2=,
-  =compare2=, =iid2=, =score2=, =residuals2=) only extract and combine
-  information from the object.
-
-
-* Version 1.2.0 <2018-16-03 Fri>
-
-** Internal changes
-- iterative estimation of the small sample bias 
-
-* Version 1.1.0 <2018-02-01 Thu>
-** User visible changes
- - The argument =numericDerivative= has been renamed =numeric.derivative=.
- - The argument =adjust.residuals= has been renamed =bias.correct=.
- - The method =dVcov2= has been renamed =sCorrect=.
- - The method =lTest= is now replaced by the method =compare2=.
-   =compare2= is similar to =lava::compare= but with small sample correction.
-   The argument =C= of =lTest= is now =contrast= in =compare2=.
- - The interaction with the multtest package has been re-organized. 
-   There is no more =mlf2= or =mmm2= function. 
-   When no small sample correction is needed use =glht=, otherwise =glht2=.
- - The function =createContrast= has been improved.
-
-** Internal changes
- - When possible argument x has been converted to object.
- - Dependency on several packages has been moved from Import to
-   Suggest.  This leads to several requireNamespace in the code to
-   check the presence of packages.
- 
+# -*- mode: org -*-
+
+* Version 1.5
+** User visible change
+- add option method.p.adjust="gof" to modelsearch2.
+
+** Internal change
+- degrees of freedom for the robust standard error are now computed
+  using a modified Satterthwaite approximation.
+- sCorrect compute the hessian.
+
+* Version 1.4 <2018-09-21 Fri>
+** User visible change
+- modelsearch2 is now only based on the score statistic.
+
+* Version 1.3.5 <2018-05-01 Tue>
+** User visible change
+- add arguments df, cluster to glht2
+- argument display is now named print in summary
+
+** Internal change
+- remove dependency on tcltk for displaying progress bar with parallel computation.
+- prepare for update of ggplot 2.3.0 
+
+* Version 1.3.4 <2018-04-23 Mon>
+** User visble changes 
+- =calibrateType1= is now a method and support parallel computation
+- =summary2=, =compare2=, and =iid2= for =lvmfit= object can compute
+  cluster robust standard error.
+
+* Version 1.3.3
+** User visble changes 
+- add option Ftest to =calibrateType1=.
+
+* Version 1.3.2
+** Internal changes
+- fix bugs
+
+* Version 1.3.1
+** User visble changes 
+- New feature: =calibrateType1= to perform simulation studies of the
+  type 1 error rate for Wald test in LVM.
+
+* Version 1.3.0 <2018-04-04 Wed>
+** User visble changes 
+- =summary2= can be called directly.
+
+** Internal changes
+- identification of the small sample bias of the variance-covariance
+  parameters. This enable to correct the derivatives and not only the
+  estimation of the conditional variance.
+- =sCorrect= is able to handle missing values
+- reorganisation of the structure of the functions in =sCorrect=.
+  Only =sCorrect= do real computations and all the results are stored
+  in the object (slot sCorrect). The other functions (=summary2=,
+  =compare2=, =iid2=, =score2=, =residuals2=) only extract and combine
+  information from the object.
+
+
+* Version 1.2.0 <2018-16-03 Fri>
+
+** Internal changes
+- iterative estimation of the small sample bias 
+
+* Version 1.1.0 <2018-02-01 Thu>
+** User visible changes
+ - The argument =numericDerivative= has been renamed =numeric.derivative=.
+ - The argument =adjust.residuals= has been renamed =bias.correct=.
+ - The method =dVcov2= has been renamed =sCorrect=.
+ - The method =lTest= is now replaced by the method =compare2=.
+   =compare2= is similar to =lava::compare= but with small sample correction.
+   The argument =C= of =lTest= is now =contrast= in =compare2=.
+ - The interaction with the multtest package has been re-organized. 
+   There is no more =mlf2= or =mmm2= function. 
+   When no small sample correction is needed use =glht=, otherwise =glht2=.
+ - The function =createContrast= has been improved.
+
+** Internal changes
+ - When possible argument x has been converted to object.
+ - Dependency on several packages has been moved from Import to
+   Suggest.  This leads to several requireNamespace in the code to
+   check the presence of packages.
+ 
diff --git a/R/0onload.R b/R/0onload.R
index d4cc0fb..595beff 100644
--- a/R/0onload.R
+++ b/R/0onload.R
@@ -4,9 +4,15 @@
     lava::lava.options(search.calc.quantile.int = FALSE, ## hidden argument for modelsearch2
                        search.type.information = "E", ## hidden argument for modelsearch2
                        ## search.perm.stat = "exact", ## hidden argument for modelsearch2 (otherwise "exact")
-                       method.estimate2 = "ols",
-                       factor.dRvcov = 1/2
+                       method.estimate2 = "ols", ## hidden argument for sCorrect
+                       factor.dRvcov = 1/2,
+                       ssc = "residuals",
+                       df = "satterthwaite",
+                       df.robust = 1
                        )
+
+    lava::addhook("gaussian_weight.estimate.hook", hook = "estimate.hooks")
+
 }
 
 ## * .onAttach
diff --git a/R/Objective_gaussian_weight.R b/R/Objective_gaussian_weight.R
new file mode 100644
index 0000000..91ea58d
--- /dev/null
+++ b/R/Objective_gaussian_weight.R
@@ -0,0 +1,167 @@
+### Objective_gaussian_weight.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: feb 17 2020 (16:29) 
+## Version: 
+## Last-Updated: Jan 12 2022 (12:31) 
+##           By: Brice Ozenne
+##     Update #: 136
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+##' @title Estimate LVM With Weights
+##' @description Estimate LVM with weights.
+##' @name gaussian_weight
+##'
+##' @param x,object A latent variable model
+##' @param data dataset
+##' @param estimator name of the estimator to be used
+##' @param type must be "cond"
+##' @param p parameter value
+##' @param weights weight associated to each iid replicate.
+##' @param S empirical variance-covariance matrix between variable
+##' @param n number of iid replicates
+##' @param mu empirical mean
+##' @param debug,reindex,mean,constrain,indiv additional arguments not used
+##' @param ... passed to lower level functions.
+##' 
+##' @examples
+##' #### linear regression with weights ####
+##'
+##' ## data
+##' df <- data.frame(Y = c(1,2,2,1,2),
+##'                  X = c(1,1,2,2,2),
+##'                  missing = c(0,0,0,0,1),
+##'                  weights = c(1,1,2,1,NA))
+##'
+##' ## using lm
+##' e.lm.GS <- lm(Y~X, data = df)
+##' e.lm.test <- lm(Y~X, data = df[df$missing==0,], weights = df[df$missing==0,"weights"])
+##' 
+##' ## using lvm
+##' m <- lvm(Y~X)
+##' e.GS <- estimate(m, df)
+##' ## e.lava.test <- estimate(m, df[df$missing==0,], weights = df[df$missing==0,"weights"])
+##' ## warnings!!
+##' e.test <- estimate(m, data = df[df$missing==0,],
+##'                    weights = df[df$missing==0,"weights"],
+##'                    estimator = "gaussian_weight")
+##' 
+
+## * gaussian_weight.estimate.hook
+##' @rdname gaussian_weight
+##' @export
+gaussian_weight.estimate.hook <- function(x, data, estimator, ...){
+    dots <- list(...)
+    if(identical(estimator,"gaussian_weight")){
+        xe <- suppressWarnings(estimate(x, data = data, control = list(iter.max = 0))) ## initialize coefficients
+        x$sCorrect <- moments2(xe, data = data, param = NULL, ## initCoef,
+                               initialize = TRUE, usefit = FALSE, score = TRUE, information = TRUE, hessian = FALSE, vcov = TRUE, residuals = TRUE, leverage = FALSE, dVcov = FALSE, dVcov.robust = FALSE)
+    }
+    return( c(list(x=x, data=data, estimator = estimator),dots) )
+}
+
+##' @rdname gaussian_weight
+##' @export
+gaussian_weight_method.lvm <- "nlminb2"
+
+## * gaussian_weight_logLik.lvm
+##' @rdname gaussian_weight
+##' @export
+`gaussian_weight_logLik.lvm` <- function(object, type="cond", p, data, weights,...) {
+    ## ** compute mu and Omega
+    if(type!="cond"){
+        stop("Not implemented for other types than \"cond\"\n ")
+    }
+    cM <- moments2(object, param = p, data = data, weights = weights,
+                   initialize = FALSE, usefit = TRUE, score = FALSE, information = FALSE, hessian = FALSE, vcov = FALSE, residuals = TRUE, leverage = FALSE, dVcov = FALSE, dVcov.robust = FALSE)
+    
+    ## ** prepare
+    name.pattern <- cM$missing$name.pattern
+    missing.pattern <- cM$missing$pattern
+    unique.pattern <- cM$missing$unique.pattern
+    n.pattern <- length(name.pattern)
+    
+    OmegaM1 <- cM$moment$OmegaM1
+    residuals <- cM$residuals
+    logLik <- 0
+
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+        iResiduals <- residuals[iIndex,iY,drop=FALSE]
+        iM <- length(iY)
+        if(is.null(weights)){
+            logLik <- logLik - (cM$cluster$n.cluster/2) * (iM * log(2*pi) - log(det(iOmegaM1))) - sum((iResiduals %*% iOmegaM1) * iResiduals)/2
+        }else{
+            logLik <- logLik - (sum(weights)/2) * (iM * log(2*pi) - log(det(iOmegaM1))) - sum(weights[,1]/2 * rowSums((iResiduals %*% iOmegaM1) * iResiduals))
+        }
+    }
+    return(logLik)
+}
+
+##' @rdname gaussian_weight
+##' @export
+`gaussian_weight_objective.lvm` <- function(x, ...) {
+    logLik <- gaussian_weight_logLik.lvm(object = x,...)
+    return(-logLik)
+}
+
+## * gaussian_weight_score.lvm
+##' @rdname gaussian_weight
+##' @export
+gaussian_weight_score.lvm <- function(x, data, p, S, n, mu=NULL, weights=NULL, debug=FALSE, reindex=FALSE, mean=TRUE, constrain=TRUE, indiv=FALSE,...) {
+
+    ## if(constrain){
+    ##     stop("gaussian_weight_score.lvm does not handle constrain")
+    ## }
+    ## if(reindex){
+    ##     stop("gaussian_weight_score.lvm does not handle reindex")
+    ## }
+    ## if(!mean){
+    ##     stop("gaussian_weight_score.lvm only handles mean")
+    ## }
+    
+    ## ** compute moments
+    cM <- moments2(x, param = p, data = data, weights = weights,
+                   initialize = FALSE, usefit = TRUE, score = TRUE, information = FALSE, hessian = FALSE, vcov = FALSE, residuals = FALSE, leverage = FALSE, dVcov = FALSE, dVcov.robust = FALSE)
+
+    ## ** export
+    if(indiv){
+        return(cM$score)
+    }else{
+        return(colSums(cM$score))
+    }
+}
+
+## * gaussian_weight_gradient.lvm
+##' @rdname gaussian_weight
+##' @export
+gaussian_weight_gradient.lvm <-  function(...) {
+    return(-gaussian_weight_score.lvm(...))
+}
+
+## * gaussian_weight_hessian.lvm
+##' @rdname gaussian_weight
+##' @export
+`gaussian_weight_hessian.lvm` <- function(x, p, n, weights=NULL,...) {
+
+    ## ** compute moments
+    cM <- moments2(x, param = p, weights = weights,
+                   initialize = FALSE, usefit = TRUE, score = FALSE, information = TRUE, hessian = FALSE, vcov = FALSE, residuals = FALSE, leverage = FALSE, dVcov = FALSE, dVcov.robust = FALSE)
+
+    ## ** export
+    return(cM$information)
+}
+
+######################################################################
+### Objective_gaussian_weight.R ends here
diff --git a/R/RcppExports.R b/R/RcppExports.R
index 72b3cf1..fbfb3b0 100644
--- a/R/RcppExports.R
+++ b/R/RcppExports.R
@@ -1,15 +1,15 @@
-# Generated by using Rcpp::compileAttributes() -> do not edit by hand
-# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
-
-OLS_cpp <- function(X, y) {
-    .Call(`_lavaSearch2_OLS_cpp`, X, y)
-}
-
-OLS2_cpp <- function(X, y) {
-    .Call(`_lavaSearch2_OLS2_cpp`, X, y)
-}
-
-wildBoot_cpp <- function(iid, lsIndexModel, nSample, nObs, nModel, p) {
-    .Call(`_lavaSearch2_wildBoot_cpp`, iid, lsIndexModel, nSample, nObs, nModel, p)
-}
-
+# Generated by using Rcpp::compileAttributes() -> do not edit by hand
+# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
+
+OLS_cpp <- function(X, y) {
+    .Call(`_lavaSearch2_OLS_cpp`, X, y)
+}
+
+OLS2_cpp <- function(X, y) {
+    .Call(`_lavaSearch2_OLS2_cpp`, X, y)
+}
+
+wildBoot_cpp <- function(iid, lsIndexModel, nSample, nObs, nModel, p) {
+    .Call(`_lavaSearch2_wildBoot_cpp`, iid, lsIndexModel, nSample, nObs, nModel, p)
+}
+
diff --git a/R/Scorrect-confint2.R b/R/Scorrect-confint2.R
new file mode 100644
index 0000000..8a4b701
--- /dev/null
+++ b/R/Scorrect-confint2.R
@@ -0,0 +1,108 @@
+### Scorrect-confint2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: Jan  4 2022 (10:59) 
+## Version: 
+## Last-Updated: Jan 12 2022 (11:35) 
+##           By: Brice Ozenne
+##     Update #: 99
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+
+## * Documentation
+#' @title Confidence Intervals With Small Sample Correction
+#' @description Extract confidence intervals of the coefficients from a latent variable model.
+#' Similar to \code{lava::confint} but with small sample correction.
+#' @name confint2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param robust [logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] when \code{TRUE} uses the same names as when using \code{stats::coef}.
+#' @param transform [function] transformation to be applied.
+#' @param conf.level [numeric, 0-1] level of the confidence intervals.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+#' 
+#' @return A data.frame with a row per coefficient.
+#'
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`confint2` <- function(object, robust, cluster, transform,
+                       as.lava, conf.level, ...) UseMethod("confint2")
+
+## * Examples
+#' @rdname confint2
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' dW <- sampleRepeated(10, format = "wide")
+#' set.seed(10)
+#' dL <- sampleRepeated(10, format = "long")
+#' dL$time2 <- paste0("visit",dL$time)
+#' 
+#' #### latent variable models ####
+#' e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+#' confint(e.lvm)
+#' confint2(e.lvm)
+#' confint2(e.lvm, as.lava = FALSE)
+
+## * confint2.lvmfit
+#' @export
+confint2.lvmfit <- function(object, robust = FALSE, cluster = NULL,
+                            transform = NULL, as.lava = TRUE, conf.level = 0.95, 
+                            ssc = lava.options()$ssc, df = lava.options()$df, ...){
+
+    return(confint(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...),
+                   robust = robust, cluster = cluster, as.lava = as.lava, conf.level = conf.level,
+                   transform = transform))
+
+}
+
+## * confint2.lvmfit2
+#' @export
+confint2.lvmfit2 <- function(object, robust = FALSE, cluster = NULL,
+                            transform = NULL, as.lava = TRUE, conf.level = 0.95,  ...){
+
+    out <- model.tables(object, robust = robust, cluster = cluster, transform = transform,
+                        as.lava = as.lava, conf.level = conf.level, ...)[,c("lower","upper"),drop=FALSE]
+    
+    if(as.lava){
+        colnames(out) <- c(paste0(100*(1-conf.level)/2," %"),paste0(100*(1-(1-conf.level)/2)," %"))
+    }
+    return(out)
+
+}
+
+## *  confint.lvmfit2
+#' @export
+confint.lvmfit2 <- function(object, parm = NULL, level = NULL, ...){ ## necessary as confint must contain arguments parm and level
+
+    if(!is.null(parm)){
+        warning("Argument \'parm\' is ignored. \n")
+    }
+    if(!is.null(level)){
+        warning("Argument \'level\' is ignored. \n")
+    }
+    
+    return(confint2(object, ...))
+
+}
+
+##----------------------------------------------------------------------
+### Scorrect-confint2.R ends here
diff --git a/R/Scorrect-model.tables.R b/R/Scorrect-model.tables.R
new file mode 100644
index 0000000..8f6c75e
--- /dev/null
+++ b/R/Scorrect-model.tables.R
@@ -0,0 +1,128 @@
+### Scorrect-model.tables.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: Jan 12 2022 (09:41) 
+## Version: 
+## Last-Updated: Jan 12 2022 (11:45) 
+##           By: Brice Ozenne
+##     Update #: 29
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Estimate, Confidence Intervals, and P-value With Small Sample Correction
+#' @description Extract estimate, standard error, confidence intervals and p-values associated to each coefficient of a latent variable model.
+#' Similar to \code{lava::confint} but with small sample correction.
+#' @name confint2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param robust [logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] when \code{TRUE} uses the same names as when using \code{stats::coef}.
+#' @param transform [function] transformation to be applied.
+#' @param conf.level [numeric, 0-1] level of the confidence intervals.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+#' 
+#' @return A data.frame with a row per coefficient.
+#'
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`model.tables2` <- function(object, robust, cluster, transform,
+                            as.lava, conf.level, ...) UseMethod("model.tables2")
+
+## * model.tables.lvmfit
+#' @export
+model.tables.lvmfit <- function(x, ...){
+
+    x.confint <- confint(x, ...)
+    out <- cbind(summary(x, ...)$coef, upper = NA, lower = NA)
+    colnames(out)[1:4] <- c("estimate","se","statistic","p.value")
+    out <- out[,c("estimate","se","lower","upper","statistic","p.value")]
+    out[rownames(x.confint),"lower"] <- x.confint[,1]
+    out[rownames(x.confint),"upper"] <- x.confint[,2]
+
+    return(out)
+}
+
+## * model.tables2.lvmfit
+#' @export
+model.tables2.lvmfit <- function(object, robust = FALSE, cluster = NULL,
+                                 transform = NULL, as.lava = TRUE, conf.level = 0.95, 
+                                 ssc = lava.options()$ssc, df = lava.options()$df, ...){
+
+    return(model.tables(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...),
+                        robust = robust, cluster = cluster, as.lava = as.lava, conf.level = conf.level,
+                        transform = transform))
+
+}
+
+## * model.tables.lvmfit2
+#' @export
+model.tables2.lvmfit2 <- function(object, robust = FALSE, cluster = NULL,
+                            transform = NULL, as.lava = TRUE, conf.level = 0.95,  ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    ## ** new model parameters
+    param <- coef(object, as.lava = as.lava)
+    name.param <- names(param)
+    n.param <- length(name.param)
+
+    ## ** new Wald test
+    type <- object$sCorrect$skeleton$type
+    type <- type[!is.na(type$lava),]
+    null <- stats::setNames(rep(0, n.param),name.param)
+    if(any(type$detail %in% c("Sigma_var","Psi_var"))){
+        param.var <- type[type$detail %in% c("Sigma_var","Psi_var"),"param"]
+        if(as.lava){
+            null[names(object$sCorrect$skeleton$originalLink2param)[match(param.var,object$sCorrect$skeleton$originalLink2param)]] <- NA
+        }else{
+            null[object$sCorrect$skeleton$originalLink2param[match(param.var,object$sCorrect$skeleton$originalLink2param)]] <- NA
+        }
+    }
+    table.all <- compare2(object,
+                          linfct = name.param,
+                          rhs = null,
+                          robust = robust,
+                          cluster = NULL,
+                          F.test = FALSE,
+                          as.lava = FALSE,
+                          sep = c("",""))
+
+    tableS.all <- summary(table.all, test = multcomp::adjusted("none"), transform = transform, conf.level = conf.level, rowname.rhs = FALSE)$table2
+
+    if(as.lava){
+        tableS.all <- tableS.all[names(object$sCorrect$skeleton$originalLink2param),,drop=FALSE]
+    }else{
+        tableS.all <- tableS.all[as.character(object$sCorrect$skeleton$originalLink2param),,drop=FALSE]
+    }
+    return(tableS.all)
+
+}
+
+## *  model.tables.lvmfit2
+#' @export
+model.tables.lvmfit2 <- function(x, ...){ ## necessary as model.tables must have x has first argument
+    return(model.tables2(x, ...))
+}
+
+##----------------------------------------------------------------------
+### Scorrect-model.tables.R ends here
diff --git a/R/Utils-nlme.R b/R/Utils-nlme.R
deleted file mode 100644
index b7eefff..0000000
--- a/R/Utils-nlme.R
+++ /dev/null
@@ -1,527 +0,0 @@
-### utils-nlme.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov 15 2017 (17:29) 
-## Version: 
-## Last-Updated: dec 10 2018 (23:28) 
-##           By: Brice Ozenne
-##     Update #: 671
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * .coef2
-#' @title Export Mean and Variance Coefficients
-#' @description Export mean and variance coefficients
-#' from a \code{lm}, \code{gls}, or \code{lme} object.
-#' @name coef2-internal
-#'
-#' @param object a \code{lm}, \code{gls} or \code{lme} object.
-#' @param name.Y [character] the name of the endogenous variable. Used to name certain variance parameters.
-#' 
-#' @details The variance coefficients that are exported are the residual variance of each outcome. 
-#' This is \eqn{\sigma^2} for the first one and \eqn{k^2 \sigma^2} for the remaining ones.
-#'
-#' @return A numeric vector named with the names of the coefficient with three attributes:
-#' \itemize{
-#' \item mean.coef: the name of the mean coefficients.
-#' \item var.coef: the name of the variance coefficients.
-#' \item cor.coef:  the name of the correlation coefficients.
-#' }
-#'
-#' @concept extractor
-#' @keywords internal
-`.coef2` <-
-    function(object) UseMethod(".coef2")
-
-## * .coef2.lm
-#' @rdname coef2-internal
-.coef2.lm <- function(object){
-    coef.object <- coef(object)
-    p <- c(coef.object,sigma2=sigma(object)^2)
-    attr(p, "mean.coef") <- names(coef.object)
-    attr(p, "var.coef") <- "sigma2"
-    attr(p, "cor.coef") <- NULL
-    return(p)
-}
-
-## * .coef2.gls
-#' @rdname coef2-internal
-.coef2.gls <- function(object){
-
-    ## *** mean coefficients
-    mean.coef <- stats::coef(object)
-
-    ## *** variance coefficients
-    var.coef <- c(sigma2 = stats::sigma(object)^2)
-    if(!is.null(object$modelStruct$varStruct)){
-        var.coef <- c(var.coef,
-                      stats::coef(object$modelStruct$varStruct, unconstrained = FALSE, allCoef = FALSE)^2)          
-    }
-
-    ## *** covariance coefficients
-    if(!is.null(object$modelStruct$corStruct)){
-        cor.coef <- stats::coef(object$modelStruct$corStruct, unconstrained = FALSE)
-
-        n.var <- length(var.coef)
-        n.cor <- length(cor.coef)
-
-        ## check unstructured covariance matrix
-        if(!is.null(object$modelStruct$varStruct) && ((n.var*(n.var-1))/2 == n.cor)){
-
-            vecgroup <- attr(unclass(object$modelStruct$corStruct), "group")
-            veccov.cor <- unname(unlist(attr(object$modelStruct$corStruct, "covariate")))
-            veccov.var <- attr(object$modelStruct$varStruct, "groups")
-
-            table.covvar <- table(veccov.cor,veccov.var)
-            newlevels.cor <- colnames(table.covvar)[apply(table.covvar, 1, which.max)]
-            veccov.cor2 <- factor(veccov.cor, levels = 0:max(veccov.cor), labels = newlevels.cor)
-            
-            if(identical(as.character(veccov.cor2),as.character(veccov.var))){
-
-                cor.coefName <- apply(utils::combn(newlevels.cor, m = 2), MARGIN = 2, FUN = paste, collapse = "")
-                names(cor.coef) <- paste0("corCoef",cor.coefName)
-
-            }else{
-                names(cor.coef) <- paste0("corCoef",1:length(cor.coef))
-            }
-            
-            
-        }else{
-            names(cor.coef) <- paste0("corCoef",1:length(cor.coef))
-        }
-        
-        
-    }else{
-        cor.coef <- NULL
-    }
-
-    p <- c(mean.coef, cor.coef, var.coef)
-    attr(p, "mean.coef") <- names(mean.coef)
-    attr(p, "var.coef") <- names(var.coef)
-    attr(p, "cor.coef") <- names(cor.coef)
-    return(p)
-}
-
-
-
-
-## * .coef2.lme
-#' @rdname coef2-internal
-.coef2.lme <- function(object){
-
-     ## *** mean coefficients
-    mean.coef <- nlme::fixef(object)
-
-    ## *** variance coefficients
-    var.coef <- c(sigma2 = stats::sigma(object)^2)
-    if(!is.null(object$modelStruct$varStruct)){
-       var.coef <- c(var.coef,
-                      stats::coef(object$modelStruct$varStruct, unconstrained = FALSE, allCoef = FALSE)^2)   
-    }
-
-    ## *** random effect coefficients
-    random.coef <- as.double(nlme::getVarCov(object))    
-    names(random.coef) <- paste0("ranCoef",1:length(random.coef))
-
-     ## *** correlation coefficients
-    if(!is.null(object$modelStruct$corStruct)){
-        cor.coef <- stats::coef(object$modelStruct$corStruct, unconstrained = FALSE)
-        names(cor.coef) <- paste0("corCoef",1:length(cor.coef))
-    }else{
-        cor.coef <- NULL
-    }
-    
-    p <- c(mean.coef, cor.coef, var.coef, random.coef)
-    attr(p, "mean.coef") <- names(mean.coef)
-    attr(p, "var.coef") <- names(var.coef)
-    attr(p, "cor.coef") <- names(cor.coef)
-    attr(p, "ran.coef") <- names(random.coef)
-    return(p)
-}
-
-## * .getFormula2
-`.getFormula2` <-
-    function(object) UseMethod(".getFormula2")
-
-## * .getFormula2.gls
-.getFormula2.gls <- function(object){
-    return(evalInParentEnv(object$call$model))
-}
-
-## * .getFormula2.lme
-.getFormula2.lme <- function(object){
-    return(evalInParentEnv(object$call$fixed))
-}
-
-
-## * .getCluster2
-#' @title Reconstruct the Cluster Variable from a nlme Model
-#' @description Reconstruct the cluster variable from a nlme model.
-#' @name getCluster2-internal
-#'
-#' @param object a \code{gls} or \code{lme} object.
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' Only required for \code{gls} models with no correlation argument.
-#' @param data [data.frame] the data set.
-#' @param ... [internal] Only used by the generic method.
-#'  
-#' @return A list containing:
-#' \itemize{
-#' \item cluster: the cluster index for each observation.
-#' \item n.cluster: the number of clusters.
-#' }
-#'
-#' @concept extractor
-#' @keywords internal
-`.getCluster2` <-
-    function(object, ...) UseMethod(".getCluster2")
-
-## * .getCluster2.gls
-#' @rdname getCluster2-internal
-.getCluster2.gls <- function(object, cluster, data, ...){
-
-### ** get cluster
-    if(is.null(object$modelStruct$corStruct)){
-        if(missing(cluster)){
-            stop("cluster must be specified for gls object with no correlation structure \n")
-        }        
-        if(length(cluster) == 1 && is.character(cluster)){
-            if(cluster %in% names(data) == FALSE){
-                stop("Variable \"",cluster,"\" not in data \n")
-            }
-            ## cluster <- as.numeric(factor(data[[cluster]], levels = unique(data[[cluster]])))
-            cluster <- as.numeric(as.factor(data[[cluster]]))
-            
-        }else if(length(cluster)==NROW(data)){
-            cluster <- as.numeric(as.factor(cluster))
-        }else{
-            stop("length of cluster and data do not match \n")
-        }
-    }else{
-        cluster <- as.numeric(nlme::getGroups(object))
-    }
-    n.cluster <- length(unique(cluster))
-
-### ** reorder cluster according to the data ordering
-    levels.cluster <- unique(cluster)
-    cluster <- as.numeric(factor(cluster, levels = levels.cluster))
-
-### ** export
-    return(list(cluster = cluster,
-                levels.cluster = levels.cluster,
-                n.cluster = n.cluster))
-}
-
-## * .getCluster2.lme
-#' @name getCluster2-internal
-.getCluster2.lme <- function(object, ...){
-
-### ** get cluster
-    if(NCOL(object$groups)!=1){
-        stop("cannot only handle one random effect \n")
-    }
-    cluster <- as.numeric(nlme::getGroups(object))
-    n.cluster <- length(unique(cluster))
-
-### ** reorder cluster according to the data ordering
-    levels.cluster <- unique(cluster)
-    cluster <- as.numeric(as.factor(cluster))
-    ## cluster <- as.numeric(factor(cluster, levels = levels.cluster))
-
-### ** export
-    return(list(cluster = cluster,
-                levels.cluster = levels.cluster,
-                n.cluster = n.cluster))
-}
-
-## * .getIndexOmega2
-#' @title Extract the name of the endogenous variables
-#' @description Extract the name of the endogenous variables from a nlme model.
-#' @name getIndexOmega2-internal
-#'
-#' @param object a \code{gls} or \code{lme} object.
-#' @param param [numeric vector] the mean and variance coefficients.
-#' @param attr.param [character vector] the type of each coefficients (mean or variance).
-#' @param name.Y [character] name of the endogenous variable.
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' Only required for \code{gls} models with no correlation argument.
-#' @param data [data.frame] the data set.
-#' @param ... [internal] Only used by the generic method.
-#'  
-#' @return A list containing:
-#' \itemize{
-#' \item index.Omega: [list of integer vector] For each cluster of observations,
-#' the index of the endogenous variable relative to each observation.
-#' \item n.endogenous: [integer] the number of endogenous variables.
-#' \item name.endogenous: [character vector] the name of the endogenous variables.
-#' \item ref.group: [character vector] the levels of the variable defining the variance component in a generic covariance matrix.
-#' }
-#'
-#' @concept extractor
-#' @keywords internal
-`.getIndexOmega2` <-
-    function(object, ...) UseMethod(".getIndexOmega2")
-
-## * .getIndexOmega2.gls
-#' @rdname getIndexOmega2-internal
-.getIndexOmega2.gls <- function(object, param, attr.param, 
-                                name.Y, cluster, levels.cluster, data){
-
-    class.cor <- class(object$modelStruct$corStruct)
-    class.re <- class(object$modelStruct$reStruct)
-    if("NULL" %in% class.cor == FALSE){
-        formula.cor <- attr(object$modelStruct$corStruct,"formula")
-        varIndex.cor <- all.vars(nlme::getCovariateFormula(formula.cor))
-    }else{
-        varIndex.cor <- NULL
-    }
-
-    class.var <- class(object$modelStruct$varStruct)
-    if("NULL" %in% class.var == FALSE){
-        formula.var <- attr(object$modelStruct$varStruct,"formula")
-        varIndex.var <- all.vars(nlme::getCovariateFormula(formula.var))
-        groupValue.var <- attr(object$modelStruct$varStruct,"groups")
-        if("NULL" %in% class.cor == FALSE || "NULL" %in% class.re == FALSE){ ## nlme automaticly sort data when corStruct or reSruct
-            groupValue.var <- groupValue.var[order(order(cluster))] ## undo automatic sort
-        } 
-    }else{
-        varIndex.var <- NULL
-    }
-
-    ## ** Check admissible var-cor structure
-    validClass.cor <- c("NULL","corCompSymm","corSymm","corStruct")
-    if(any(class.cor %in% validClass.cor == FALSE)){
-        stop("Can only handle corStruct of class \"corCompSymm\" or \"corSymm\"\n")
-    }
-
-    validClass.var <- c("NULL","varIdent","varFunc")
-    if(any(class.var %in% validClass.var == FALSE)){
-        stop("Can only handle varStruct of class \"varIdent\"\n")
-    }
-
-    ## ** Check compatible ordering between var and cor
-    if(length(varIndex.cor) > 0 && length(varIndex.cor) > 0 && !identical(varIndex.cor,varIndex.cor)){
-        stop("Inconsistency between the left hand side of the formula in corStruct and the left hand side of the formula in varStruct. \n",
-             "it should be something like: correlation = corStruct(form = ~index|groupA) \n",
-             "                             weight = varStruct(form = ~index|groupB) \n")
-    }
-    
-    ## ** Identify the index and name of the endogenous variables
-    if("NULL" %in% class.var && "NULL" %in% class.cor){ ## basic lme models or lm-ish models
-        ## order of the variables does not matter
-        index.Omega <- tapply(cluster,cluster,function(iC){list(1:length(iC))})
-        norm <- FALSE
-    }else if("NULL" %in% class.var && "corCompSymm" %in% class.cor){
-        ## order of the variables does not matter
-        index.Omega <- attr(object$modelStruct$corStruct, "covariate")[levels.cluster]
-        norm <- TRUE
-    }else if(length(varIndex.cor)!=0){ 
-        ## order of the variables matters: use index variable in corStruct
-        index.Omega <- attr(object$modelStruct$corStruct, "covariate")[levels.cluster]
-        norm <- TRUE
-    }else if(length(varIndex.var)!=0){
-        ## order of the variables matters: using index variable in varStruct
-        index.tempo <- data[[varIndex.var]]
-        if(!is.numeric(index.tempo)){
-            stop("The variable in the left hand side of the formula in varStruct must be numeric \n")
-        }
-        if(!is.null(object$na.action)){
-            index.tempo <- index.tempo[-object$na.action]
-        }
-        index.Omega <- tapply(index.tempo, cluster, function(iC){list(iC)})
-        norm <- TRUE
-    }else{
-        ## order of the variables matters: check missing values
-        if("NULL" %in% class.var == FALSE){
-            test.duplicated <- unique(unlist(tapply(groupValue.var, cluster, duplicated)))
-        }else{
-            groupValue.cor <- attr(object$modelStruct$corStruct, "covariate")
-            ref.tempo <- unname(sort(groupValue.cor[[1]]))
-            test.identical <- unique(unlist(lapply(groupValue.cor,function(x){ # x <- groupValue.cor[[1]]
-                identical(unname(sort(x)),ref.tempo)
-            })))
-            test.duplicated <- TRUE
-        }
-        ## recover order
-        if(("NULL" %in% class.var == FALSE) && all(test.duplicated==FALSE)){
-            ## from varIdent when no missing values
-            tempo <- as.numeric(factor(groupValue.var, levels = attr(object$modelStruct$varStruct,"groupNames")))
-            index.Omega <- tapply(tempo, cluster, function(iC){iC})
-            norm <- FALSE
-        }else{
-            ## from the order of the data
-            index.Omega <- tapply(cluster,cluster,function(iC){list(1:length(iC))})
-            norm <- FALSE
-            if("NULL" %in% class.cor == FALSE){
-                if(any(test.identical == FALSE)){
-                    warning("The residuals covariance matrice is subset based on the ordering of the data\n",
-                            "It is safer to define the ordering within group by adding a variable in the left hand side of the formula in corStruct \n",
-                            "e.g. correlation = corStruct(form = ~index|group) \n")
-                }
-            }else{
-                warning("The attribution of the repetition number is based on the ordering of the data\n",
-                        "It is safer to define the ordering within group by adding a variable in the left hand side of the formula in varStruct \n",
-                        "e.g. correlation = varStruct(form = ~index|group) \n")
-            }
-        }
-    }
-    attr(object$modelStruct$varStruct,"groupNames")
-    attr(object$modelStruct$varStruct,"groups")
-
-    ## ** Normalize index.Omega
-    if(norm){
-        level.index <- unique(unlist(index.Omega))
-        convertion <- setNames(order(level.index), level.index)
-        index.Omega <- lapply(index.Omega, function(x){as.double(convertion[as.character(x)])})
-    }
-
-    ## ** Define the name and number of endogenous variables
-    vecIndex.Omega <- unlist(index.Omega)
-    n.endogenous <- max(vecIndex.Omega)
-    name.endogenous <- paste0(name.Y,".",1:n.endogenous)
-
-    if("corSymm" %in% class.cor){
-        ## second order polynomial equation
-        ## m(m-1)/2 = n.cor
-        ## i.e. m = 1/2 + sqrt(1+8 n.cor)/2
-        cor.coef <- param[attr.param$cor.coef]
-        if(n.endogenous != ( 1 + sqrt(1 + 8 * length(cor.coef)) ) / 2){
-            stop("The values of ",varIndex.cor," does not match the number of correlation coefficients \n",
-                 "i.e. the maximum value of ",varIndex.cor," should equal ( 1 + sqrt(1 + 8 * n.cor.coef)) ) / 2 \n"
-                 )
-        }
-    }
-
-    if("NULL" %in% class.var == FALSE){
-
-        groupValue.var.ordered <- groupValue.var[order(cluster)] ## reorder by cluster
-        table.unique <- tapply(1:length(vecIndex.Omega),vecIndex.Omega,function(x){
-            length(unique(groupValue.var.ordered[x]))
-        })
-        if(any(table.unique!=1)){
-            stop("The residual covariance matrix should not differ between clusters \n")
-        }                
-        ref.group <- groupValue.var.ordered[!duplicated(vecIndex.Omega)]
-    }else{
-        ref.group <- NULL
-    }
-        
-    ## ** Export
-    return(list(index.Omega = index.Omega,
-                n.endogenous = n.endogenous,
-                name.endogenous = name.endogenous,
-                ref.group = ref.group))
-}
-
-## * .getIndexOmega2.lme
-#' @rdname getIndexOmega2-internal
-.getIndexOmega2.lme <- .getIndexOmega2.gls
-
-## * .getVarCov2
-#' @title Reconstruct the Marginal Variance Covariance Matrix from a nlme Model
-#' @description Reconstruct the marginal variance covariance matrix from a nlme model.
-#' @name getVarCov2-internal
-#'
-#' @param object a \code{gls} or \code{lme} object
-#' @param param [numeric vector] the mean and variance coefficients.
-#' @param attr.param [character vector] the type of each coefficients (mean or variance).
-#' @param name.endogenous [character vector] name of each repetition of the endogenous variable. 
-#' @param n.endogenous [integer >0] number of repetitions of the endogenous variable.
-#' @param ref.group [character vector] the levels of the variable defining the variance component in a generic covariance matrix.
-#' @param ... [internal] Only used by the generic method.
-#'  
-#' @return [matrix] the marginal variance covariance matrix for a full sample.
-#'
-#' @concept extractor
-#' @keywords internal
-`.getVarCov2` <-
-    function(object, ...) UseMethod(".getVarCov2")
-
-## * .getVarCov2.gls
-#' @rdname getVarCov2-internal
-.getVarCov2.gls <- function(object, param, attr.param,
-                            name.endogenous, n.endogenous, ref.group, ...){
-
-    ## ** Extract information
-    var.coef <- param[attr.param$var.coef]
-    cor.coef <- param[attr.param$cor.coef]
-
-    ## ** Diagonal terms
-    if(length(ref.group)>0){        
-        factor.varcoef <- setNames(c(1,var.coef[-1]),
-                                   attr(object$modelStruct$varStruct,"groupNames"))
-        sigma2.base <- var.coef["sigma2"] * factor.varcoef[ref.group]
-    }else{
-        sigma2.base <- rep(var.coef["sigma2"], n.endogenous)
-    }
-    ## re-order according to the order of the correlation coefficients (if possible)
-    if(length(cor.coef)>1 & length(var.coef)>1){
-        cor.level <- gsub("corCoef","",names(cor.coef))
-        var.level <- names(sigma2.base)
-        var.permlevel <- .allPermutations(var.level)
-
-        M.try <- apply(var.permlevel, MARGIN = 1, function(iLevel){
-            all(apply(utils::combn(iLevel, m = 2), MARGIN = 2, FUN = paste, collapse = "") == cor.level)
-        })
-        if(any(M.try)){
-            sigma2.base <- sigma2.base[var.permlevel[which(M.try),]]
-        }
-    }
-    
-    Omega <- diag(as.double(sigma2.base),
-                  nrow = n.endogenous, ncol = n.endogenous)
-
-    ## ** Extra-diagonal terms
-    if(length(cor.coef)>0){
-        index.lower <- which(lower.tri(Omega))
-        index.lower.arr <- which(lower.tri(Omega),arr.ind = TRUE)
-        vec.sigma.tempo <- apply(index.lower.arr,1,function(x){prod(sqrt(sigma2.base[x]))})        
-        Omega[index.lower] <- cor.coef*vec.sigma.tempo
-        Omega <- symmetrize(Omega, update.upper = TRUE)
-    }    
-
-    ## ** names
-    if(all(!duplicated(names(sigma2.base)))){
-        dimnames(Omega) <- list(names(sigma2.base), names(sigma2.base))
-    }else{
-        dimnames(Omega) <- list(name.endogenous, name.endogenous)
-    }
-
-    
-    ## ** export
-    return(Omega)
-}
-
-## * .getVarCov2.lme
-#' @rdname getVarCov2-internal
-.getVarCov2.lme <- function(object, param, attr.param, ...){
-
-    ## ** prepare with gls
-    out <- .getVarCov2.gls(object, param = param, attr.param = attr.param, ...)
-
-    ## ** add contribution of the random effect
-    ran.coef <- param[attr.param$ran.coef]
-    out <- out + ran.coef
-
-    ## ** export
-    return(out)    
-}
-
-## * .allPermutations
-## .allPermutations(1:3)
-## .allPermutations(2:3)
-.allPermutations <- function(vec){
-    X <- lapply(vec, function(x){
-        cbind(x, .allPermutations(setdiff(vec, x)))
-    })
-    return(unname(do.call(rbind,X)))
-}
-
-##----------------------------------------------------------------------
-### utils-nlme.R ends here
diff --git a/R/Utils-formula.R b/R/Utils.R
similarity index 71%
rename from R/Utils-formula.R
rename to R/Utils.R
index 0917529..3045d7c 100644
--- a/R/Utils-formula.R
+++ b/R/Utils.R
@@ -1,11 +1,11 @@
-### Utils-formula.R --- 
+### Utils.R --- 
 ##----------------------------------------------------------------------
 ## Author: Brice Ozenne
 ## Created: nov 27 2018 (14:32) 
 ## Version: 
-## Last-Updated: nov 28 2018 (11:32) 
+## Last-Updated: Jan 11 2022 (16:43) 
 ##           By: Brice Ozenne
-##     Update #: 3
+##     Update #: 161
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -15,7 +15,8 @@
 ## 
 ### Code:
 
-## * selectResponse (Documentation)
+## * formula
+## ** selectResponse (Documentation)
 #' @title Response Variable of a Formula
 #' @description Return the response variable contained in the formula.
 #' @name selectResponse
@@ -49,7 +50,7 @@
 #' @keywords internal
 `selectResponse` <-  function(object, ...) UseMethod("selectResponse")
 
-## * selectResponse.formula
+## ** selectResponse.formula
 #' @rdname selectResponse
 #' @method selectResponse formula
 selectResponse.formula <- function(object, format = "call", ...){
@@ -68,7 +69,7 @@ selectResponse.formula <- function(object, format = "call", ...){
   return(res)
 }
 
-## * selectRegressor (Documentation)
+## ** selectRegressor (Documentation)
 #' @title Regressor of a Formula.
 #' @description Return the regressor variables contained in the formula
 #' @name selectRegressor
@@ -100,7 +101,7 @@ selectResponse.formula <- function(object, format = "call", ...){
 #' @keywords internal
 `selectRegressor` <-  function(object, ...) UseMethod("selectRegressor")
 
-## * selectRegressor.formula
+## ** selectRegressor.formula
 #' @rdname selectRegressor
 #' @method selectRegressor formula
 selectRegressor.formula <- function(object, format = "call", ...){
@@ -122,13 +123,7 @@ selectRegressor.formula <- function(object, format = "call", ...){
   return(res)
 }
 
-
-
-
-######################################################################
-### Utils-formula.R ends here
-
-## * combineFormula
+## ** combineFormula
 #' @title Combine formula
 #' @description Combine formula by outcome
 #' 
@@ -166,7 +161,7 @@ combineFormula <- function(ls.formula, as.formula = TRUE, as.unique = FALSE){
     X <- unlist(ls.X[which(ls.endogeneous==endogenous[iterE])])
     if(as.unique){X <- unique(X)}
     txt <- paste(endogenous[iterE],"~",paste(X, collapse = " + "))
-    if(as.formula){ls.formula2[[iterE]] <- as.formula(txt)}else{ls.formula2[[iterE]] <- txt}
+    if(as.formula){ls.formula2[[iterE]] <- stats::as.formula(txt)}else{ls.formula2[[iterE]] <- txt}
   }
   
   return(ls.formula2)
@@ -174,7 +169,7 @@ combineFormula <- function(ls.formula, as.formula = TRUE, as.unique = FALSE){
 
 
 
-## * formula2character
+## ** formula2character
 #' @title formula character conversion
 #' @description Conversion of formula into character string or vice versa
 #' @name convFormulaCharacter
@@ -203,3 +198,65 @@ formula2character <- function(f, type = "formula"){
   return(gsub("[[:blank:]]","",txt))
   
 }
+
+## * Miscellaneous
+## ** .allPermutations
+## .allPermutations(1:3)
+## .allPermutations(2:3)
+.allPermutations <- function(vec){
+    X <- lapply(vec, function(x){
+        cbind(x, .allPermutations(setdiff(vec, x)))
+    })
+    return(unname(do.call(rbind,X)))
+}
+## ** .combination
+#' @title Form all Unique Combinations Between two Vectors
+#' @description Form all unique combinations between two vectors (removing symmetric combinations).
+#' @name combination
+#'
+#' @param ... [vectors] elements to be combined.
+#' @param levels [logical] should a label for each combination be output as an attribute named levels.
+#'
+#' @return A matrix, each row being a different combination.
+#' 
+#' @examples
+#' .combination <- lavaSearch2:::.combination
+#' 
+#' .combination(1,1)
+#' .combination(1:2,1:2)
+#' .combination(c(1:2,1:2),1:2)
+#' 
+#' .combination(alpha = 1:2, beta = 3:4)
+#' .combination(alpha = 1:2, beta = 3:4, gamma = 1:4)
+#' .combination(alpha = 1:3, beta = 1:3, gamma = 1:3)
+#'
+#' @keywords internal
+.combination <- function(..., levels = FALSE){
+
+    ## ** normalize arguments
+    dots <- list(...)
+    test.null <- unlist(lapply(dots,is.null))    
+    if(any(test.null)){
+        return(NULL)
+    }
+    dots <- lapply(dots,unique)
+
+    ## ** form all combinations
+    grid <- expand.grid(dots, stringsAsFactors = FALSE) 
+    
+    ## ** remove identical combinations after permutations of the columns
+    flatGrid <- apply(grid,1,function(iX){paste0(sort(iX),collapse = "")})    
+    grid <- grid[!duplicated(flatGrid),]
+    rownames(grid) <- NULL
+    attr(grid,"levels") <- unname(flatGrid[!duplicated(flatGrid)])
+    
+    ## ** export
+    return(grid)        
+}
+
+######################################################################
+### Utils.R ends here
+
+
+
+
diff --git a/R/autoplot.calibrateType1.R b/R/autoplot.calibrateType1.R
index fb0af6b..5f82c62 100644
--- a/R/autoplot.calibrateType1.R
+++ b/R/autoplot.calibrateType1.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: apr  5 2018 (13:20) 
 ## Version: 
-## Last-Updated: maj 23 2018 (09:53) 
+## Last-Updated: Jan 11 2022 (16:46) 
 ##           By: Brice Ozenne
-##     Update #: 29
+##     Update #: 30
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -107,7 +107,7 @@ autoplot.calibrateType1 <- function(object, type = "bias", plot = TRUE, color.th
             keep.method <- as.character(unique(df.gg$method))
         }
         if(is.null(name2label)){
-            name2label <- setNames(unique(paste0(df.gg$statistic,", ",df.gg$correction)),unique(df.gg$method))
+            name2label <- stats::setNames(unique(paste0(df.gg$statistic,", ",df.gg$correction)),unique(df.gg$method))
         }
         if(is.null(color)){
             ## from ggthemes::colorblind_pal()(8)
diff --git a/R/calibrateType1.R b/R/calibrateType1.R
index 78d3832..ae53101 100644
--- a/R/calibrateType1.R
+++ b/R/calibrateType1.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: apr  5 2018 (10:23) 
 ## Version: 
-## Last-Updated: mar 13 2019 (11:55) 
+## Last-Updated: Jan 11 2022 (16:51) 
 ##           By: Brice Ozenne
-##     Update #: 813
+##     Update #: 852
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -49,7 +49,7 @@
 ##' Can also be \code{NULL}: in such a case the results are not exported.
 ##' @param F.test [logical] should a multivariate Wald test be perform testing simultaneously all the null hypotheses?
 ##' @param label.file [character] element to include in the file name.
-##' @param seed [integer, >0] seed value that will be set at the beginning of the simulation to enable eproducibility of the results.
+##' @param seed [integer, >0] value that will be set before adjustment for multiple comparisons to ensure reproducible results.
 ##' Can also be \code{NULL}: in such a case no seed is set.
 ##' @param cpus [integer >0] the number of processors to use.
 ##' If greater than 1, the simulations are performed in parallel. 
@@ -184,7 +184,7 @@ calibrateType1.lvm <- function(object, param, n.rep, n, correction = TRUE, warmu
     ## *** type of the coef of the fitted model
     df.type <- coefType(e.true, as.lava = FALSE)
     df.type <- df.type[df.type$name %in% name.coef,]
-    type.coef <- setNames(df.type$detail, df.type$name)
+    type.coef <- stats::setNames(df.type$detail, df.type$name)
 
     ## *** null hypothesis
     n.param <- length(param)
@@ -210,7 +210,10 @@ calibrateType1.lvm <- function(object, param, n.rep, n, correction = TRUE, warmu
         txtCoef <- paste(param[coef.true[param]==0], collapse = "\" \"")
         stop("Control type 2 error: coefficients \"",txtCoef,"\" are 0 while their belong to the param hypothesis\n")
     }
-    res.C <- createContrast(param, name.param = name.coef, add.rowname = TRUE, rowname.rhs = FALSE)
+    res.C <- .createContrast(param,
+                             name.param = name.coef,
+                             add.rowname = TRUE,
+                             rowname.rhs = FALSE)
     contrast <- res.C$contrast
     if(is.null(null)){
         rhs <- res.C$null
@@ -249,7 +252,7 @@ calibrateType1.lvm <- function(object, param, n.rep, n, correction = TRUE, warmu
 
     
 ### ** loop
-    store.coef <- param
+    store.coef <- paste0(rownames(contrast)," == ",rhs)
     if(F.test){
         store.coef <- c(store.coef, "global")
     }
@@ -363,7 +366,9 @@ calibrateType1.lvm <- function(object, param, n.rep, n, correction = TRUE, warmu
         filename.estimate <- gsub("\\(tempo\\)","",filename_tempo.estimate)
 
         if(!is.null(dir.save)){
-            validPath(dir.save, type = "dir")
+            if (dir.exists(dir.save) == FALSE) {
+                stop("Argument \'dir.save\' does not lead to an existing directory \n")
+            }            
         }
 
         if(!is.null(dir.save)){
@@ -525,21 +530,19 @@ calibrateType1.lvmfit <- function(object, param, n.rep, correction = TRUE, F.tes
     
     ## ** model fit
     e.lvm <- suppressWarnings(do.call(lava::estimate, args = c(list(object, data = dt.sim, cluster = cluster), dots)))
-    eS.lvm <- suppressWarnings(try(summary(e.lvm)$coef, silent = TRUE))
     
     ## check correct convergence of the latent variable model
     if(("convergence" %in% names(e.lvm$opt)) && (e.lvm$opt$convergence==1)){return(list(pvalue=NULL,estimate=NULL))} ## exclude lvm that has not converged
-    if(any(eigen(getVarCov2(e.lvm))$values<=0)){return(list(pvalue=NULL,estimate=NULL))} ## exclude lvm where the residual covariance matrix is not semipositive definite
+    if(any(eigen(getVarCov2(e.lvm, ssc = FALSE, df = FALSE))$values<=0)){return(list(pvalue=NULL,estimate=NULL))} ## exclude lvm where the residual covariance matrix is not semipositive definite
     ratio_sd_beta <- sqrt(diag(vcov(e.lvm)))/(abs(coef(e.lvm))+1)
-    if(max(na.omit(ratio_sd_beta))>1e3){return(list(pvalue=NULL,estimate=NULL))} ## exclude if standard error much larger than coefficient
-    if(inherits(eS.lvm, "try-error")){return(list(pvalue=NULL,estimate=NULL))} ## exclude lvm where we cannot compute the summary
+    if(max(stats::na.omit(ratio_sd_beta))>1e3){return(list(pvalue=NULL,estimate=NULL))} ## exclude if standard error much larger than coefficient
+    if(inherits(suppressWarnings(try(summary(e.lvm)$coef, silent = TRUE)), "try-error")){return(list(pvalue=NULL,estimate=NULL))} ## exclude lvm where we cannot compute the summary
 
     ## ** corrections
+    e.lvm <- estimate2(e.lvm, df = "none", ssc = "none")
     if(correction){
-        e.lvm.Satt <- e.lvm    
-        testError.Satt <- try(sCorrect(e.lvm.Satt) <- FALSE, silent = TRUE)
-        e.lvm.KR <- e.lvm
-        testError.KR <- try(suppressWarnings(sCorrect(e.lvm.KR, safeMode = TRUE) <- TRUE), silent = TRUE)
+        e.lvm.Satt <- try(estimate2(e.lvm, df = "satterthwaite", ssc = "none"), silent = TRUE)
+        e.lvm.KR <- try(suppressWarnings(estimate2(e.lvm, df = "satterthwaite", ssc = "residuals")), silent = TRUE)
     }else{
         e.lvm.Satt <- 1
         class(e.lvm.Satt) <- "try-error"
@@ -548,49 +551,49 @@ calibrateType1.lvmfit <- function(object, param, n.rep, correction = TRUE, F.tes
     }
     
     ## ** extract p.values
-    eS.ML <- summary2(e.lvm, robust = FALSE, df = FALSE, bias.correct = FALSE)$coef
-    F.ML <- compare2(e.lvm, robust = FALSE, df = FALSE, bias.correct = FALSE,
-                     contrast = contrast, null = rhs, F.test = F.test, as.lava = FALSE)
+    eS.ML <- summary2(e.lvm, robust = FALSE)$coef
+    F.ML <- summary(compare2(e.lvm, robust = FALSE,
+                             linfct = contrast, rhs = rhs, F.test = F.test, as.lava = FALSE))$table2
 
-    eS.robustML <- summary2(e.lvm, robust = TRUE, df = FALSE, bias.correct = FALSE)$coef
-    F.robustML <- compare2(e.lvm, robust = TRUE, df = FALSE, bias.correct = FALSE,
-                           contrast = contrast, null = rhs, F.test = F.test, as.lava = FALSE)
+    eS.robustML <- summary2(e.lvm, robust = TRUE)$coef
+    F.robustML <- summary(compare2(e.lvm, robust = TRUE,
+                                   linfct = contrast, rhs = rhs, F.test = F.test, as.lava = FALSE))$table2
     
 
     if(!inherits(e.lvm.Satt,"try-error")){
         
         eS.Satt <- summary2(e.lvm.Satt, robust = FALSE)$coef
-        F.Satt <- compare2(e.lvm.Satt, robust = FALSE,
-                           contrast = contrast, null = rhs, F.test = F.test,
-                           as.lava = FALSE)
+        F.Satt <- summary(compare2(e.lvm.Satt, robust = FALSE,
+                                   linfct = contrast, rhs = rhs, F.test = F.test,
+                                   as.lava = FALSE))$table2
             
         eS.robustSatt <- summary2(e.lvm.Satt, robust = TRUE)$coef
-        F.robustSatt <- compare2(e.lvm.Satt, robust = TRUE,
-                                  contrast = contrast, null = rhs, F.test = F.test,
-                                  as.lava = FALSE)
+        F.robustSatt <- summary(compare2(e.lvm.Satt, robust = TRUE,
+                                  linfct = contrast, rhs = rhs, F.test = F.test,
+                                  as.lava = FALSE))$table2
     }
     
     if(!inherits(e.lvm.KR,"try-error")){
 
-        ## eS.SSC <- summary2(e.lvm.KR, robust = FALSE, df = FALSE)$coef
-        F.SSC <- compare2(e.lvm.KR, robust = FALSE, df = FALSE,
-                           contrast = contrast, null = rhs, F.test = F.test,
-                          as.lava = FALSE)
+        eS.SSC <- summary2(e.lvm.KR, robust = FALSE)$coef
+        F.SSC <- summary(compare2(e.lvm.KR, robust = FALSE, 
+                                  linfct = contrast, rhs = rhs, F.test = F.test,
+                                  as.lava = FALSE))$table2
         
-        ## eS.robustSSC <- summary2(e.lvm.KR, robust = TRUE, df = FALSE)$coef
-        F.robustSSC <- compare2(e.lvm.KR, robust = TRUE, df = FALSE,
-                                 contrast = contrast, null = rhs, F.test = F.test,
-                                 as.lava = FALSE)
+        eS.robustSSC <- summary2(e.lvm.KR, robust = TRUE)$coef
+        F.robustSSC <- summary(compare2(e.lvm.KR, robust = TRUE, 
+                                        linfct = contrast, rhs = rhs, F.test = F.test,
+                                        as.lava = FALSE))$table2
 
         eS.KR <- summary2(e.lvm.KR, robust = FALSE)$coef
-        F.KR <- compare2(e.lvm.KR, robust = FALSE,
-                          contrast = contrast, null = rhs, F.test = F.test,
-                          as.lava = FALSE)
+        F.KR <- summary(compare2(e.lvm.KR, robust = FALSE,
+                                 linfct = contrast, rhs = rhs, F.test = F.test,
+                                 as.lava = FALSE))$table2
         
         eS.robustKR <- summary2(e.lvm.KR, robust = TRUE)$coef
-        F.robustKR <- compare2(e.lvm.KR, robust = TRUE,
-                                contrast = contrast, null = rhs, F.test = F.test,
-                                as.lava = FALSE)
+        F.robustKR <- summary(compare2(e.lvm.KR, robust = TRUE,
+                                       linfct = contrast, rhs = rhs, F.test = F.test,
+                                       as.lava = FALSE))$table2
     }
     
     ## ** store
@@ -637,28 +640,28 @@ calibrateType1.lvmfit <- function(object, param, n.rep, correction = TRUE, F.tes
     
     ## *** p-value
     if(is.null(cluster)){
-        ls.iP$p.Ztest <- F.ML[store.coef,"p-value"]
+        ls.iP$p.Ztest <- F.ML[store.coef,"p.value"]
         if(!inherits(e.lvm.Satt,"try-error")){
-            ls.iP$p.Satt <- F.Satt[store.coef,"p-value"]
+            ls.iP$p.Satt <- F.Satt[store.coef,"p.value"]
         }
         if(!inherits(e.lvm.KR,"try-error")){
-            ls.iP$p.SSC <- F.SSC[store.coef,"p-value"]
-            ls.iP$p.KR <- F.KR[store.coef,"p-value"]
+            ls.iP$p.SSC <- F.SSC[store.coef,"p.value"]
+            ls.iP$p.KR <- F.KR[store.coef,"p.value"]
         }
     }
-    ls.iP$p.robustZtest <- F.robustML[store.coef,"p-value"]
+    ls.iP$p.robustZtest <- F.robustML[store.coef,"p.value"]
     if(!inherits(e.lvm.Satt,"try-error")){
-        ls.iP$p.robustSatt <- F.robustSatt[store.coef,"p-value"]
+        ls.iP$p.robustSatt <- F.robustSatt[store.coef,"p.value"]
     }
     if(!inherits(e.lvm.KR,"try-error")){
-        ls.iP$p.robustSSC <- F.robustSSC[store.coef,"p-value"]
-        ls.iP$p.robustKR <- F.robustKR[store.coef,"p-value"]
+        ls.iP$p.robustSSC <- F.robustSSC[store.coef,"p.value"]
+        ls.iP$p.robustKR <- F.robustKR[store.coef,"p.value"]
     }
 
     ## *** niter.correct / warning
     if(!inherits(e.lvm.KR,"try-error")){
         test.warning <- inherits(attr(e.lvm.KR$sCorrect,"warning"),"try-error")
-        niter.correct <- as.double(e.lvm.KR$sCorrect$opt$iterations)
+        niter.correct <- as.double(e.lvm.KR$sCorrect$ssc$iter)
     }else{
         test.warning <- NA
         niter.correct <- as.double(NA)
@@ -684,12 +687,12 @@ calibrateType1.lvmfit <- function(object, param, n.rep, correction = TRUE, F.tes
     ## estimates
     ## use rep to avoid warning
     ## return(list(n = n,
-                ## rep = iRep,
-                ## seed = seed,
-                ## ninter = niter.correct,
-                ## warning = test.warning,
-                ## name = names(coef.true),
-                ## type = type.coef[name.coef]))
+    ## rep = iRep,
+    ## seed = seed,
+    ## ninter = niter.correct,
+    ## warning = test.warning,
+    ## name = names(coef.true),
+    ## type = type.coef[name.coef]))
     df1 <- data.frame(n = n,
                       rep = iRep, n.coef,
                       seed = seed, n.coef,
diff --git a/R/clean.R b/R/clean.R
index 525a16e..85f9336 100644
--- a/R/clean.R
+++ b/R/clean.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: nov 27 2018 (14:35) 
 ## Version: 
-## Last-Updated: nov 28 2018 (15:34) 
+## Last-Updated: Jan 11 2022 (16:00) 
 ##           By: Brice Ozenne
-##     Update #: 13
+##     Update #: 14
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -53,7 +53,6 @@
 #' clean(m)
 
 ## * clean.lvm
-#' @rdname clean
 #' @export
 clean.lvm <- function(x, rm.exo = TRUE, rm.endo = TRUE, rm.latent = TRUE, ...){
 
diff --git a/R/coefType.R b/R/coefType.R
index f46c658..bf80b97 100644
--- a/R/coefType.R
+++ b/R/coefType.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: okt 12 2017 (14:38) 
 ## Version: 
-## last-updated: okt  4 2018 (16:16) 
+## last-updated: Jan 11 2022 (09:55) 
 ##           By: Brice Ozenne
-##     Update #: 513
+##     Update #: 882
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -112,7 +112,7 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
 
     externalLink <- type <- NULL ## [:for CRAN check] subset
     
-    ## *** extract all coef
+    ## ** extract all coef
     index.all <- which(!is.na(object$M), arr.ind = FALSE)
     ls.name <- list()
     ls.X <- list()
@@ -122,11 +122,11 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
     ls.param <- list()
     ls.marginal <- list()
 
-    ## *** intercept
+    ## ** intercept
     n.intercept <- length(object$mean)
     if(n.intercept>0){
         ls.name$intercept <- names(object$mean)    
-    
+
         ls.Y$intercept <- ls.name$intercept
         ls.X$intercept <- rep(NA, n.intercept)    
         ls.type$intercept <- rep("intercept", n.intercept)
@@ -139,7 +139,7 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
         ls.marginal$intercept <-  ls.name$intercept %in% exogenous(object)
     }
     
-    ## *** regression
+    ## ** regression
     arrIndex.regression <- which(object$M==1, arr.ind = TRUE)
     index.regression <- which(object$M==1, arr.ind = FALSE)
     n.regression <- length(index.regression)
@@ -161,7 +161,7 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
         ls.marginal$regression <- rep(FALSE,n.regression)
     }
 
-    ## *** covariance
+    ## ** covariance
     M.cov <- object$cov
     M.cov[upper.tri(M.cov)] <- 0
     
@@ -205,7 +205,7 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
         ls.marginal$covariance <- rep(FALSE, n.covariance)
     }
     
-    ## *** external coefficients
+    ## ** external coefficients
     n.external <- length(object$expar)
     if(n.external>0){
         ls.name$external <- names(object$expar)
@@ -225,7 +225,7 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
         ls.marginal$external <-  rep(FALSE, n.external)
     }
 
-    ## *** merge
+    ## ** merge
     df.param <- data.frame(name = unlist(ls.name),
                            Y = unlist(ls.Y),
                            X = unlist(ls.X),
@@ -235,9 +235,9 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
                            param = unlist(ls.param),
                            marginal = unlist(ls.marginal),
                            stringsAsFactors = FALSE)
-    df.param[df.param$X %in% latent(object),"data"] <- NA
+    df.param[which(df.param$X %in% latent(object)),"data"] <- NA
     
-    ## *** categorical variables
+    ## ** categorical variables
     if(!is.null(object$attributes$ordinalparname)){
         resCar <- defineCategoricalLink(object, link = df.param$name, data = data)
         
@@ -279,8 +279,11 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
         df.param$originalLink <- df.param$name
     }
 
-    ## *** merge with lava
-    coef.lava <- coef(object)
+    ## ** original link
+    coef.lava <- coef(object, labels = 0)
+    coef2.lava <- coef(object, labels = 1)
+    
+    ## ** merge with lava
     name.coef <- names(coef.lava)
 
     index.keep <- which(df.param$type!="external" & df.param$factitious == FALSE & df.param$marginal == FALSE)
@@ -289,10 +292,13 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
                                                  name.coef = df.param[index.keep, "name"],
                                                  type.coef = df.param[index.keep, "type"])
     df.param$lava <- name.coef[match(df.param$originalLink,coef.lava)]
+    df.param[df.param$factitious,c("param","lava")] <- as.character(NA)
     df.param <- df.param[order(df.param$type,df.param$detail,df.param$name),,drop=FALSE]
+    df.param$originalLink[is.na(df.param$lava)] <- NA
+    ## df.param$param[is.na(df.param$lava)] <- NA
     rownames(df.param) <- NULL
 
-    ## *** export
+    ## ** export
     if(as.lava){
         ## add extra mean as links
         vec.extra <- unique(stats::na.omit(df.param$externalLink))
@@ -303,13 +309,15 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
             df.param <- rbind(df.param[,c("name", "type", "lava")],
                               df.extra)
         }
-        
         ## 
-        out <- subset(df.param, subset = !is.na(lava), select = c("type", "name"))
-        out <- stats::setNames(out$type, out$name)
-        out <- out[!duplicated(names(out))]
+        out <- stats::setNames(df.param$type, df.param$name)
+        ## out <- out[!duplicated(names(out))]
         return(out[coef.lava])    
     }else{
+        df.param$detail <- factor(df.param$detail,
+                                  levels = c("nu","alpha","K","Gamma","Lambda","B","Sigma_var","Sigma_cov","sigma2","sigma2k","cor","Psi_var","Psi_cov",NA))
+        df.param <- df.param[order(df.param$detail,df.param$param),]
+        df.param$detail <- as.character(df.param$detail)
         return(df.param)
     }
 }
@@ -319,16 +327,16 @@ coefType.lvm <- function(object, as.lava = TRUE, data = NULL, ...){
 #' @export
 coefType.lvmfit <- function(object, as.lava = TRUE, ...){ 
 
-    ## *** find type of the coefficients in the original model
-    df.param <- coefType(object$model0, as.lava = FALSE)
+    ## ** find type of the coefficients in the original model
+    df.param <- coefType(object$model0, as.lava = FALSE, ...)
     
-    ## *** export
+    ## ** export
     if(as.lava){
         out <- subset(df.param, subset = !is.na(lava), select = c("type", "name"))
         out <- stats::setNames(out$type, out$name)
         coef.lava <- names(stats::coef(object))
         return(out[coef.lava])    
-    }else{
+    }else{        
         return(df.param)
     }
 }
@@ -386,6 +394,5 @@ detailName <- function(object, name.coef, type.coef){
     return(type.coef)
 }
 
-
-#----------------------------------------------------------------------
+##----------------------------------------------------------------------
 ### coefType.R ends here
diff --git a/R/compare2.R b/R/compare2.R
deleted file mode 100644
index c714025..0000000
--- a/R/compare2.R
+++ /dev/null
@@ -1,555 +0,0 @@
-### compare2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: jan 30 2018 (14:33) 
-## Version: 
-## Last-Updated: mar  7 2019 (11:41) 
-##           By: Brice Ozenne
-##     Update #: 592
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - compare2
-#' @title Test Linear Hypotheses with small sample correction
-#' @description Test Linear Hypotheses using a multivariate Wald statistic.
-#' Similar to \code{lava::compare} but with small sample correction.
-#' @name compare2
-#'
-#' @param object an object that inherits from lm/gls/lme/lvmfit.
-#' @param df [logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-#' Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias? Argument passed to \code{sCorrect}.
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' @param par [vector of characters] expression defining the linear hypotheses to be tested.
-#' See the examples section. 
-#' @param contrast [matrix] a contrast matrix defining the left hand side of the linear hypotheses to be tested.
-#' @param robust [logical] should the robust standard errors be used instead of the model based standard errors?
-#' @param null,rhs [vector] the right hand side of the linear hypotheses to be tested.
-#' @param as.lava [logical] should the output be similar to the one return by \code{lava::compare}?
-#' @param F.test [logical] should a joint test be performed?
-#' @param level [numeric 0-1] the confidence level of the confidence interval.
-#' @param ...  [internal] only used by the generic method.
-#'
-#' @details The \code{par} argument or the arguments \code{contrast} and \code{null} (or equivalenty \code{rhs})
-#' specify the set of linear hypotheses to be tested. They can be written:
-#' \deqn{
-#'   contrast * \theta = null
-#' }
-#' where \eqn{\theta} is the vector of the model coefficients. \cr
-#' The \code{par} argument must contain expression(s) involving the model coefficients.
-#' For example \code{"beta = 0"} or \code{c("-5*beta + alpha = 3","-alpha")} are valid expressions if alpha and beta belong to the set of model coefficients.
-#' A contrast matrix and the right hand side will be generated inside the function. \cr
-#' 
-#' When directly specified, the contrast matrix must contain as many columns as there are coefficients in the model (mean and variance coefficients).
-#' Each hypothesis correspond to a row in the contrast matrix. \cr
-#'
-#' The null vector should contain as many elements as there are row in the contrast matrix. \cr
-#' 
-#' Argument rhs and null are equivalent.
-#' This redondance enable compatibility between \code{lava::compare}, \code{compare2}, \code{multcomp::glht}, and \code{glht2}.
-#'
-#' @seealso \code{\link{createContrast}} to create contrast matrices. \cr
-#' \code{\link{sCorrect}} to pre-compute quantities for the small sample correction.
-#' 
-#' @return If \code{as.lava=TRUE} an object of class \code{htest}.
-#' Otherwise a \code{data.frame} object.
-
-## * example - compare2
-#' @examples
-#' #### simulate data ####
-#' set.seed(10)
-#' mSim <- lvm(Y~0.1*X1+0.2*X2)
-#' categorical(mSim, labels = c("a","b","c")) <- ~X1
-#' transform(mSim, Id~Y) <- function(x){1:NROW(x)}
-#' df.data <- lava::sim(mSim, 1e2)
-#'
-#' #### with lm ####
-#' ## direct use of compare2
-#' e.lm <- lm(Y~X1+X2, data = df.data)
-#' anova(e.lm)
-#' compare2(e.lm, par = c("X1b=0","X1c=0"))
-#' 
-#' ## or first compute the derivative of the information matrix
-#' sCorrect(e.lm) <- TRUE
-#' 
-#' ## and define the contrast matrix
-#' C <- createContrast(e.lm, par = c("X1b=0","X1c=0"), add.variance = TRUE)
-#'
-#' ## run compare2
-#' compare2(e.lm, contrast = C$contrast, null = C$null)
-#' compare2(e.lm, contrast = C$contrast, null = C$null, robust = TRUE)
-#' 
-#' #### with gls ####
-#' library(nlme)
-#' e.gls <- gls(Y~X1+X2, data = df.data, method = "ML")
-#'
-#' ## first compute the derivative of the information matrix
-#' sCorrect(e.gls, cluster = 1:NROW(df.data)) <- TRUE
-#' 
-#' compare2(e.gls, par = c("5*X1b+2*X2 = 0","(Intercept) = 0"))
-#' 
-#' #### with lvm ####
-#' m <- lvm(Y~X1+X2)
-#' e.lvm <- estimate(m, df.data)
-#' 
-#' compare2(e.lvm, par = c("-Y","Y~X1b+Y~X1c"))
-#' compare2(e.lvm, par = c("-Y","Y~X1b+Y~X1c"), robust = TRUE)
-#' @concept small sample inference
-#' @export
-`compare2` <-
-  function(object, df, bias.correct, ...) UseMethod("compare2")
-
-## * compare2.lm
-#' @rdname compare2
-#' @export
-compare2.lm <- function(object, df = TRUE, bias.correct = TRUE, ...){
-    sCorrect(object, df = df) <- bias.correct
-    return(.compare2(object, ...))
-}
-
-## * compare2.gls
-#' @rdname compare2
-#' @export
-compare2.gls <- function(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...){
-    sCorrect(object, df = df, cluster = cluster) <- bias.correct
-    return(.compare2(object, ...))
-}
-
-## * compare2.lme
-#' @rdname compare2
-#' @export
-compare2.lme <- compare2.lm
-
-## * compare2.lvmfit
-#' @rdname compare2
-#' @export
-compare2.lvmfit <- function(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...){
-    sCorrect(object, df = df) <- bias.correct
-    return(.compare2(object, cluster = cluster, ...))
-}
-
-## * compare2.lm2
-#' @rdname compare2
-#' @export
-compare2.lm2 <- function(object, ...){
-    return(.compare2(object, ...))
-}
-
-## * compare2.gls2
-#' @rdname compare2
-#' @export
-compare2.gls2 <- function(object, ...){
-    return(.compare2(object, ...))
-}
-
-## * compare2.lme2
-#' @rdname compare2
-#' @export
-compare2.lme2 <- function(object, ...){
-    return(.compare2(object, ...))
-}
-
-## * compare2.lvmfit2
-#' @rdname compare2
-#' @export
-compare2.lvmfit2 <- function(object, ...){
-    return(.compare2(object, ...))
-}
-
-## * .compare2
-#' @rdname compare2
-.compare2 <- function(object, par = NULL, contrast = NULL, null = NULL, rhs = NULL,
-                      robust = FALSE, cluster = NULL, df = object$sCorrect$args$df,
-                      as.lava = TRUE, F.test = TRUE, level = 0.95){
-
-
-    if(!is.null(null) && !is.null(rhs)){
-        stop("Arguments \'null\' and \'rhs\' should not be both specified \n")
-    }
-    if(!is.logical(robust)){ 
-        stop("Argument \'robust\' should be TRUE or FALSE \n")
-    }
-    if(!is.logical(df) && (robust == FALSE || df %in% c(0:3) == FALSE)){     ## 2-3 hidden values
-        stop("Argument \'df\' should be TRUE or FALSE \n")
-    }
-
-    if(robust){
-        factor.dRvcov <- lava.options()$factor.dRvcov
-
-        if(!is.null(cluster)){
-            
-            if(length(cluster)==1){
-                ## reconstruct cluster variable
-                if(inherits(object,"lvmfit")){
-                    data <- object$data$model.frame
-                }else{
-                    data <- extractData(object)
-                }
-                
-                if(cluster %in%  names(data) == FALSE){
-                    stop("Could not find variable ",cluster," (argument \'cluster\') in argument \'data\' \n")
-                }else{
-                    cluster <- data[[cluster]]
-                }            
-                
-            }else if(stats::nobs(object)!=length(cluster)){
-                stop("length of argument \'cluster\' does not match number of rows of the score matrix \n")
-            }
-            ls.indexCluster <- tapply(1:length(cluster),cluster,list)
-            n.cluster <- length(ls.indexCluster)
-        }else{
-            n.cluster <- stats::nobs(object)
-        }
-    }
-
-    
-    ## ** extract information
-    ## 0-order: param
-    param <- object$sCorrect$param
-
-    n.param <- length(param)
-    name.param <- names(param)
-
-    ## 1-order: score
-    if(robust){
-        score <- object$sCorrect$score
-    }
-    
-    ## 2-order: variance covariance
-    vcov.param <- vcov2(object)
-    attr(vcov.param, "warning") <- NULL
-    warn <- attr(vcov2(object), "warning")
-    
-    if(robust){
-        rvcov.param <- crossprod(iid2(object, cluster = cluster))
-        hessian <- object$sCorrect$hessian
-    }
-
-    ## 3-order: derivative of the variance covariance
-    if(df>0){
-        dVcov.param <- object$sCorrect$dVcov.param
-        keep.param <- dimnames(dVcov.param)[[3]]
-    }
-    
-    ## ** Prepare for the robust case 
-    if(df>1 && robust){ ## not used if df=1
-
-        ## update the score/hessian/derivative at the cluster level
-        if(!is.null(cluster)){            
-            scoreSave <- score
-            hessianSave <- hessian
-
-            score <- matrix(NA, nrow = n.cluster, ncol = NCOL(score),
-                            dimnames = list(NULL, colnames(score)))
-            hessian <- array(NA, dim = c(NCOL(score), NCOL(score), n.cluster),
-                             dimnames = list(colnames(score), colnames(score), NULL))            
-            for(iCluster in 1:n.cluster){ ## iCluster <- 1
-                score[iCluster,] <- colSums(scoreSave[ls.indexCluster[[iCluster]],,drop=FALSE])
-                hessian[,,iCluster] <- apply(hessianSave[,,ls.indexCluster[[iCluster]],drop=FALSE],1:2,sum)
-            }
-            ## compute derivative
-            name.3deriv <- dimnames(dVcov.param)[[3]]
-            dRvcov.param <- array(NA, dim = c(n.param,n.param,n.param), dimnames = list(name.param,name.param,name.param))
-            for(iP in 1:n.param){ ## iP <- 1
-                ## if(name.param[iP] %in% name.3deriv){
-                    ## term1 <- dVcov.param[,,name.param[iP]] %*% crossprod(score) %*% vcov.param
-                ## }else{
-                    ## term1 <- matrix(0, nrow = n.param, ncol = n.param)
-                ## }
-                ## term2 <- vcov.param %*% hessian[iP,,] %*% score %*% vcov.param
-                ## dRvcov.param[,,iP] <- term1 + t(term1) + term2 + t(term2)
-
-                term2 <- vcov.param %*% hessian[iP,,] %*% score %*% vcov.param
-                dRvcov.param[,,iP] <- term2 + t(term2)
-            }
-        }else{
-            dRvcov.param <- object$sCorrect$dRvcov.param
-        }
-    }
-    
-    ### ** normalize linear hypotheses
-    if(!is.null(par)){
-        
-        if(!is.null(contrast)){
-            stop("Argument \'par\' and argument \'contrast\' should not simultaneously specified")
-        }else if(!is.null(null)){
-            stop("Argument \'par\' and argument \'null\' should not simultaneously specified")
-        }else{
-            res.C <- createContrast(par, name.param = name.param, add.rowname = TRUE)
-            contrast <- res.C$contrast
-            null <- res.C$null
-        }
-        
-    }else{
-        
-        if(is.null(contrast)){
-            stop("Argument \'contrast\' and argument \'par\' cannot be both NULL \n",
-                 "Please specify the null hypotheses using one of the two arguments \n")
-        }
-        if(is.null(colnames(contrast))){
-            stop("Argument \'contrast\' must have column names \n")
-        }
-        if(any(colnames(contrast) %in% name.param == FALSE)){
-            txt <- setdiff(colnames(contrast), name.param)
-            stop("Argument \'contrast\' has incorrect column names \n",
-                 "invalid name(s): \"",paste(txt, collapse = "\" \""),"\"\n")
-        }
-        if(any(name.param %in% colnames(contrast) == FALSE)){
-            txt <- setdiff(name.param, colnames(contrast))
-            stop("Argument \'contrast\' has incorrect column names \n",
-                 "missing name(s): \"",paste(txt, collapse = "\" \""),"\"\n")
-        }
-        if(NCOL(contrast) != n.param){
-            stop("Argument \'contrast\' should be a matrix with ",n.param," columns \n")
-        }
-        ## reorder columns according to coefficients
-        contrast <- contrast[,name.param,drop=FALSE]
-        if(any(abs(svd(contrast)$d)<1e-10)){
-            stop("Argument \'contrast\' is singular \n")
-        }
-        if(is.null(null)){
-            null <- setNames(rep(0,NROW(contrast)),rownames(contrast))
-        }else if(length(null)!=NROW(contrast)){
-            stop("The length of argument \'null\' does not match the number of rows of argument \'contrast' \n")
-        }
-        if(is.null(rownames(contrast))){
-            rownames(contrast) <- .contrast2name(contrast, null = null)
-            null <- setNames(null, rownames(contrast))
-        }
-    }
-    
-    ### ** prepare export
-    name.hypo <- rownames(contrast)
-    n.hypo <- NROW(contrast)
-
-    df.table <- as.data.frame(matrix(NA, nrow = n.hypo, ncol = 5,
-                                     dimnames = list(name.hypo,
-                                                     c("estimate","std","statistic","df","p-value"))
-                                     ))
-
-    ## ** Univariate Wald test
-    C.p <- (contrast %*% param) - null
-    if(robust){
-        C.vcov.C <- contrast %*% rvcov.param %*% t(contrast)
-    }else{
-        C.vcov.C <- contrast %*% vcov.param %*% t(contrast)
-    }
-    sd.C.p <- sqrt(diag(C.vcov.C))
-    stat.Wald <- C.p/sd.C.p
-
-    ## store
-    df.table$estimate <- as.numeric(C.p)
-    df.table$std <- as.numeric(sd.C.p)
-    df.table$statistic <- as.numeric(stat.Wald)
-
-    ##  degrees of freedom
-    if(df>0 && !is.null(dVcov.param)){
-
-        ## univariate
-        if(robust == FALSE){
-            df.Wald  <- dfSigma(contrast = contrast,
-                                vcov = vcov.param,
-                                dVcov = dVcov.param,
-                                keep.param = keep.param)
-        }else if(robust == TRUE){
-
-            if(df == TRUE){
-                df.Wald <- dfSigma(contrast = contrast,
-                                   vcov = vcov.param,
-                                   dVcov = dVcov.param,
-                                   keep.param = keep.param)
-            }else if(df == 2){
-                df.Wald  <- dfSigma(contrast = contrast,
-                                    vcov = rvcov.param,
-                                    dVcov = dRvcov.param * factor.dRvcov,
-                                    keep.param = name.param)
-            }else if(df == 3){
-                df.Wald <- dfSigmaRobust(contrast = contrast,
-                                         vcov = vcov.param,
-                                         rvcov = rvcov.param,
-                                         score = score)
-            }
-        }
-    }else{
-        df.Wald <- rep(Inf, n.hypo)
-        df.F <- Inf
-    }
-
-    ## store
-    df.table$df <- as.numeric(df.Wald)
-    df.table$`p-value` <- as.numeric(2*(1-stats::pt(abs(df.table$statistic), df = df.table$df)))
-    
-    ## ** Multivariate Wald test
-    df.table <- rbind(df.table, global = rep(NA,5))
-    error <- NULL
-    if(F.test){
-        ## statistic
-        iC.vcov.C <- try(solve(C.vcov.C), silent = TRUE)
-        
-        if(!inherits(iC.vcov.C,"try-error")){
-            stat.F <- t(C.p) %*% iC.vcov.C %*% (C.p) / n.hypo
-
-            ## df (independent t statistics)
-            if(df>0){
-                svd.tempo <- eigen(iC.vcov.C)
-                D.svd <- diag(svd.tempo$values, nrow = n.hypo, ncol = n.hypo)
-                P.svd <- svd.tempo$vectors
-     
-                C.anova <- sqrt(D.svd) %*% t(P.svd) %*% contrast
-
-                if(df == TRUE){
-                    nu_m <- dfSigma(contrast = C.anova,
-                                    vcov = vcov.param,
-                                    dVcov = dVcov.param,
-                                    keep.param = keep.param)
-                } else if(df == 2){
-                    nu_m <- dfSigma(contrast = C.anova,
-                                    vcov = rvcov.param,
-                                    dVcov = dRvcov.param * factor.dRvcov,
-                                    keep.param = keep.param)
-                } else if(df == 3){
-                    nu_m <- dfSigmaRobust(contrast = C.anova,
-                                          vcov = vcov.param,
-                                          rvcov = rvcov.param,
-                                          score = score)
-                }
-        
-                EQ <- sum(nu_m/(nu_m-2))
-                df.F <- 2*EQ / (EQ - n.hypo)
-            }else{
-                df.F <- Inf
-            }
-            ## store
-            df.table["global", "statistic"] <- as.numeric(stat.F)
-            df.table["global", "df"] <- df.F
-            df.table["global", "p-value"] <- 1 - stats::pf(df.table["global", "statistic"],
-                                                           df1 = n.hypo,
-                                                           df2 = df.table["global", "df"])
-        }else{
-            warning("Unable to compute the degrees of freedom for the F-test \n")
-            error <- iC.vcov.C
-        }
-    }
-
-    ## ** export
-    if(as.lava == TRUE){
-        level.inf <- (1-level)/2
-        level.sup <- 1-level.inf
-
-        level.inf.label <- paste0(100*level.inf,"%")
-        level.sup.label <- paste0(100*level.sup,"%")
-
-        df.estimate <- matrix(NA, nrow = n.hypo, ncol = 5,
-                              dimnames = list(name.hypo,c("Estimate", "Std.Err", "df", level.inf.label, level.sup.label)))
-        df.estimate[,"Estimate"] <- df.table[name.hypo,"estimate"]
-        df.estimate[,"Std.Err"] <- df.table[name.hypo,"std"]
-        df.estimate[,"df"] <- df.table[name.hypo,"df"]
-        df.estimate[,level.inf.label] <- df.table[name.hypo,"estimate"] + stats::qt(level.inf, df = df.table[name.hypo,"df"]) * df.table[name.hypo,"std"]
-        df.estimate[,level.sup.label] <- df.table[name.hypo,"estimate"] + stats::qt(level.sup, df = df.table[name.hypo,"df"]) * df.table[name.hypo,"std"]
-
-        out <- list(statistic = setNames(df.table["global","statistic"],"F-statistic"),
-                    parameter = setNames(round(df.table["global","df"],2), paste0("df1 = ",n.hypo,", df2")), ## NOTE: cannot not be change to coefficients because of lava
-                    p.value = df.table["global","p-value"],
-                    method = c("- Wald test -", "", "Null Hypothesis:", name.hypo),
-                    estimate = df.estimate,
-                    vcov = C.vcov.C,
-                    coef = C.p[,1],
-                    null = null,
-                    cnames = name.hypo                    
-                    )
-        if(robust){
-            colnames(out$estimate)[2] <- "robust SE"
-        }        
-        attr(out, "B") <- contrast
-        class(out) <- "htest"
-    }else{
-        out <- df.table
-        attr(out, "warning") <- warn
-        attr(out, "contrast") <- contrast
-    }
-    attr(out,"error") <- error
-    return(out)
-}
-
-## * dfSigma
-##' @title Degree of Freedom for the Chi-Square Test
-##' @description Computation of the degrees of freedom of the chi-squared distribution
-##' relative to the model-based variance
-##'
-##' @param contrast [numeric vector] the linear combination of parameters to test
-##' @param vcov [numeric matrix] the variance-covariance matrix of the parameters.
-##' @param dVcov [numeric array] the first derivative of the variance-covariance matrix of the parameters.
-##' @param keep.param [character vector] the name of the parameters with non-zero first derivative of their variance parameter.
-##' 
-dfSigma <- function(contrast, vcov, dVcov, keep.param){
-    ## iLink <- "LogCau~eta"
-    C.vcov.C <- rowSums(contrast %*% vcov * contrast) ## variance matrix of the linear combination
-    ## C.vcov.C - vcov[iLink,iLink]
-
-    C.dVcov.C <- sapply(keep.param, function(x){
-        rowSums(contrast %*% dVcov[,,x] * contrast)
-    })
-    ## C.dVcov.C - dVcov[iLink,iLink,]
-    numerator <- 2 *(C.vcov.C)^2
-    ## numerator - 2*vcov[iLink,iLink]^2
-    denom <- rowSums(C.dVcov.C %*% vcov[keep.param,keep.param,drop=FALSE] * C.dVcov.C)
-    ## denom - t(dVcov[iLink,iLink,]) %*% vcov[keep.param,keep.param,drop=FALSE] %*% dVcov[iLink,iLink,]
-    df <- numerator/denom
-    return(df)
-}
-
-## * dfSigmaRobust
-##' @title Degree of Freedom for the Robust Chi-Square Test
-##' @description Computation of the degrees of freedom of the chi-squared distribution
-##' relative to the robust-based variance
-##'
-##' @param contrast [numeric vector] the linear combination of parameters to test
-##' @param vcov [numeric matrix] the variance-covariance matrix of the parameters.
-##' @param rvcov [numeric matrix] the robust variance-covariance matrix of the parameters.
-##' @param score [numeric matrix] the individual score for each parameter.
-##'
-##' @details When contrast is the identity matrix, this function compute the moments of the sandwich estimator
-##' and the degrees of freedom of the approximate t-test as described in (Pan, 2002) section 2 and 3.1.
-##'
-##' @references
-##' Wei Pan and Melanie M. Wall, Small-sample adjustments in using the sandwich variance estiamtor in generalized estimating equations. Statistics in medicine (2002) 21:1429-1441.
-##' 
-dfSigmaRobust <- function(contrast, vcov, rvcov, score){
-    
-    ## ** prepare
-    n <- NROW(score)
-
-    ## apply contrasts
-    vcov.S <- vcov %*% t(contrast)
-
-    ## ** compute moments of rvcov
-    ## fast
-    E.score2 <- crossprod(score)
-    iid.score2 <- lapply(1:n, function(iRow){
-        (tcrossprod(score[iRow,]) - E.score2/n)^2
-    })
-
-    var.rvcov <- t(vcov.S^2) %*% Reduce("+",iid.score2) %*% vcov.S^2
-
-    ## slow    
-    E.rvcov <- contrast %*% rvcov %*% t(contrast)
-    ## iid.rvcov <- lapply(1:n, function(iRow){
-    ##     (t(vcov.S) %*% tcrossprod(score[iRow,]) %*% vcov.S - E.rvcov/n)^2
-    ## })
-    ## var.rvcov <- Reduce("+",iid.rvcov)
-
-    ## ** export
-    df.rvcov <- (2*E.rvcov^2)/var.rvcov
-    
-    return(setNames(diag(df.rvcov), rownames(contrast)))
-}
-
-
-##----------------------------------------------------------------------
-### compare2.R ends here
diff --git a/R/conditionalMoment.R b/R/conditionalMoment.R
deleted file mode 100644
index 2436a12..0000000
--- a/R/conditionalMoment.R
+++ /dev/null
@@ -1,413 +0,0 @@
-### conditionalMoment.R --- 
-#----------------------------------------------------------------------
-## author: Brice Ozenne
-## created: okt 27 2017 (16:59) 
-## Version: 
-## last-updated: feb  8 2019 (11:47) 
-##           By: Brice Ozenne
-##     Update #: 1139
-#----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-#----------------------------------------------------------------------
-## 
-### Code:
-
-## * conditionalMoment - documentation
-#' @title Prepare the Computation of score2
-#' @description Compute the conditional mean and variance,
-#' and their first and second derivative regarding the model parameters.
-#' @name conditionalMoment
-#' 
-#' @param object,x a latent variable model.
-#' @param data [data.frame] data set.
-#' @param formula [formula] two-sided linear formula.
-#' @param param,p [numeric vector] the fitted coefficients.
-#' @param attr.param [character vector] the type of each coefficient
-#' (e.g. mean or variance coefficient).
-#' @param ref.group [character vector] the levels of the variable defining the variance component in a generic covariance matrix.
-#' @param second.order [logical] should the terms relative to the third derivative of the likelihood be be pre-computed?
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' @param n.cluster [integer >0] the number of i.i.d. observations.
-#' @param n.endogenous [integer >0] the number of outcomes.
-#' @param usefit,value [logical] If TRUE the coefficients estimated by the model are used to pre-compute quantities. Only for lvmfit objects.
-#' @param name.endogenous [character vector, optional] name of the endogenous variables
-#' @param name.latent [character vector, optional] name of the latent variables
-#' @param ... [internal] only used by the generic method or by the <- methods.
-#' 
-#' @details For lvmfit objects, there are two levels of pre-computation:
-#' \itemize{
-#' \item a basic one that do no involve the model coefficient (\code{conditionalMoment.lvm}).
-#' \item an advanced one that require the model coefficients (\code{conditionalMoment.lvmfit}). 
-#' }
-#' 
-#' @examples
-#' m <- lvm(Y1~eta,Y2~eta,Y3~eta)
-#' latent(m) <- ~eta
-#'
-#' d <- lava::sim(m,1e2)
-#' e <- estimate(m, d)
-#'
-#' ## basic pre-computation
-#' res1 <- conditionalMoment(e, data = d,
-#'                          first.order = FALSE, second.order = FALSE,
-#'                          name.endogenous = endogenous(e),
-#'                          name.latent = latent(e), usefit = FALSE)
-#' res1$skeleton$Sigma
-#' 
-#' ## full pre-computation
-#' res2 <- conditionalMoment(e, param = coef(e), data = d,
-#'                          first.order = FALSE, second.order = FALSE,
-#'                          name.endogenous = endogenous(e),
-#'                          name.latent = latent(e), usefit = TRUE
-#' )
-#' res2$value$Sigma
-#'
-#' @concept small sample inference
-#' @concept derivative of the score equation
-#' 
-#' @keywords internal
-#' @export
-`conditionalMoment` <-
-  function(object, ...) UseMethod("conditionalMoment")
-
-
-## * conditionalMoment.lm
-#' @rdname conditionalMoment
-#' @export
-conditionalMoment.lm <- function(object, data, param,
-                                 name.endogenous,
-                                 first.order, second.order, ...){
-
-    out <- list(param = param,
-                name.3deriv = "sigma2")
-    
-    ## design matrix
-    X <- model.matrix(formula(object), data)
-
-    ## linear predictor
-    out$mu <- X %*% param[colnames(X)]
-    
-    ## residuals variance
-    out$Omega <- matrix(param["sigma2"], nrow = 1, ncol = 1,
-                    dimnames = list(name.endogenous, name.endogenous))
-    
-    ## ** first order
-    if(first.order){
-        out$dmu <- lapply(1:NCOL(X), function(i){
-            M <- X[,i,drop=FALSE]
-            colnames(M) <- name.endogenous
-            return(M)
-        })
-        names(out$dmu) <- colnames(X)
-        out$dOmega = list(sigma2 = matrix(1))
-    }
-
-    ## ** second order
-    if(second.order){
-        out$d2mu <- NULL
-        out$d2Omega <- NULL
-    }
-
-    ## ** export
-    return(out)
-}
-
-## * conditionalMoment.gls
-#' @rdname conditionalMoment
-#' @export
-conditionalMoment.gls <- function(object, data, formula, 
-                                  param, attr.param, ref.group,
-                                  first.order, second.order,
-                                  index.Omega, vec.OmegaMat, cluster, n.cluster,
-                                  name.endogenous, n.endogenous,
-                                  ...){
-
-    if(first.order == FALSE && second.order == TRUE){
-        stop("Cannot pre-compute quantities for the second order derivatives ",
-             "without those for the first order derivatives \n")
-    }
-### ** prepare
-
-    ## *** coefficients
-    name.varcoef <- attr.param$var.coef
-    name.corcoef <- attr.param$cor.coef
-    n.varcoef <- length(name.varcoef)
-    n.corcoef <- length(name.corcoef)
-    var.coef <- param[name.varcoef]
-    cor.coef <- param[name.corcoef]
-        
-    class.var <- class(object$modelStruct$varStruct)
-    class.cor <- class(object$modelStruct$corStruct)
-
-    
-    ## *** design matrix    
-    X <- stats::model.matrix(formula, data)
-    X <- X[,attr.param$mean.coef,drop=FALSE] ## drop unused columns (e.g. factor with 0 occurence)    
-    attr(X,"assign") <- NULL
-    attr(X,"contrasts") <- NULL
-    
-    ## *** variance terms
-    if("NULL" %in% class.var == FALSE){
-        name.otherVar <- setdiff(names(var.coef),"sigma2")
-        factor.varcoef <- setNames(c(1,var.coef[name.otherVar]),
-                                   attr(object$modelStruct$varStruct,"groupNames"))
-        sigma2.base0 <- factor.varcoef[ref.group]        
-    }else{
-        name.otherVar <- NULL
-        sigma2.base0 <- setNames(rep(1, n.endogenous), name.endogenous)
-    }
-    sigma2.base <- sigma2.base0 * var.coef["sigma2"]
-
-    ## *** corelation terms
-    if("NULL" %in% class.cor == FALSE){
-        M.corcoef <- matrix("", n.endogenous, n.endogenous,
-                            dimnames = list(name.endogenous,name.endogenous))
-        M.corcoef[which(lower.tri(M.corcoef))] <- name.corcoef
-        M.corcoef <- symmetrize(M.corcoef)
-
-        index.lower.tri <- which(lower.tri(M.corcoef))
-        indexArr.lower.tri <- which(lower.tri(M.corcoef), arr.ind = TRUE)
-
-        Msigma2.base0 <- matrix(0, n.endogenous, n.endogenous,
-                                dimnames = list(name.endogenous, name.endogenous))
-        Msigma2.base0[index.lower.tri] <- apply(indexArr.lower.tri, 1, function(x){sqrt(prod(sigma2.base0[x]))})
-        Msigma2.base0 <- symmetrize(Msigma2.base0)
-    }else{
-        M.corcoef <- NULL
-        Msigma2.base0 <- NULL
-        index.lower.tri <- NULL
-        indexArr.lower.tri <- NULL
-    }
-
-    ## *** export
-    out <- list(param = param,
-                name.3deriv = c(name.varcoef,name.corcoef),
-                skeleton = list(class.cor = class.cor,
-                                class.var = class.var,
-                                sigma2.base0 = sigma2.base0,
-                                Msigma2.base0 = Msigma2.base0,
-                                ref.group = ref.group,
-                                n.endogenous = n.endogenous,
-                                name.endogenous = name.endogenous,
-                                M.corcoef = M.corcoef,
-                                index.lower.tri = index.lower.tri,
-                                indexArr.lower.tri = indexArr.lower.tri,
-                                var.coef = var.coef,
-                                name.varcoef = name.varcoef,
-                                n.varcoef = n.varcoef,
-                                name.otherVar = name.otherVar,
-                                ref.group = ref.group,
-                                cor.coef = cor.coef,
-                                name.corcoef = name.corcoef,
-                                n.corcoef = n.corcoef,
-                                cluster  = cluster,
-                                n.cluster = n.cluster))
-    
-### ** Reconstruct conditional mean
-    ## transpose necessary because of the way index.OmegaMat was computed
-    out$mu <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                     dimnames = list(NULL, name.endogenous))
-    out$mu[vec.OmegaMat] <- X %*% param[colnames(X)]
-    
-    
-### ** Reconstruct conditional variance covariance matrix
-    out$Omega <- .getVarCov2(object,
-                             param = param,
-                             attr.param = attr.param,
-                             name.endogenous = name.endogenous,
-                             n.endogenous = n.endogenous,
-                             ref.group = ref.group)
-    
-### ** first order
-    if(first.order){
-        outD1 <- skeletonDtheta(object,
-                                class.cor = class.cor,
-                                class.var = class.var,
-                                X = X,  
-                                sigma2.base0 = sigma2.base0,
-                                Msigma2.base0 = Msigma2.base0,
-                                M.corcoef = M.corcoef,
-                                ref.group = ref.group,
-                                name.endogenous = name.endogenous,
-                                n.endogenous = n.endogenous,
-                                index.lower.tri = index.lower.tri,
-                                indexArr.lower.tri = indexArr.lower.tri,
-                                cluster = cluster,
-                                n.cluster = n.cluster,
-                                var.coef = var.coef,
-                                name.varcoef = name.varcoef,
-                                name.otherVar = name.otherVar,
-                                n.varcoef = n.varcoef,
-                                cor.coef = cor.coef,
-                                name.corcoef = name.corcoef,
-                                n.corcoef = n.corcoef,
-                                index.Omega = index.Omega,
-                                update.mean = TRUE, update.variance = TRUE,
-                                ...) ## ... to pass coef.rancoef
-        out$dmu <- outD1$dmu
-        out$dOmega <- outD1$dOmega
-    }
-    
-### ** second order
-    if(second.order){
-        out$d2mu <- NULL
-        out$d2Omega <- skeletonDtheta2(object,
-                                       dOmega = out$dOmega,
-                                       class.cor = class.cor,
-                                       class.var = class.var,
-                                       M.corcoef = M.corcoef,
-                                       n.endogenous = n.endogenous,
-                                       index.lower.tri = index.lower.tri,
-                                       indexArr.lower.tri = indexArr.lower.tri,
-                                       var.coef = var.coef,
-                                       name.varcoef = name.varcoef,
-                                       name.otherVar = name.otherVar,
-                                       n.varcoef = n.varcoef,
-                                       cor.coef = cor.coef,
-                                       name.corcoef = name.corcoef)
-    }
-    
-### ** export
-    return(out)
-    
-}
-
-## * conditionalMoment.lme
-#' @rdname conditionalMoment
-#' @export
-conditionalMoment.lme <- function(object, attr.param, ...){
-
-    name.rancoef <- attr.param$ran.coef
-    out <- conditionalMoment.gls(object, attr.param = attr.param,
-                                 name.rancoef = name.rancoef, ...)
-
-    ##  the derivative regarding the random effect is added by skeletonDtheta.lme
-    out$name.3deriv <- c(out$name.3deriv, name.rancoef)
-    out$skeleton$class.ran <- class(object$modelStruct$reStruct)
-
-    return(out)
-}
-## * conditionalMoment.lvm
-#' @rdname conditionalMoment
-#' @export
-conditionalMoment.lvm <- function(object, data,
-                                  first.order, second.order,
-                                  name.endogenous, name.latent,
-                                  ...){
-
-    if(first.order == FALSE && second.order == TRUE){
-        stop("Cannot pre-compute quantities for the second order derivatives ",
-             "without those for the first order derivatives \n")
-    }
-
-### ** Initialize conditional moments   
-    Moment <- skeleton(object,
-                       name.endogenous = name.endogenous, 
-                       name.latent = name.latent, 
-                       as.lava = TRUE)
-
-### ** Initialize partial derivatives of the conditional moments
-    if(first.order){
-        dMoment <- skeletonDtheta(object, data = data,
-                                  df.param.all = Moment$df.param,
-                                  param2originalLink = Moment$param2originalLink,
-                                  name.endogenous = name.endogenous, 
-                                  name.latent = name.latent)
-    }else{
-        dMoment <- NULL
-    }
-    
-### ** Initialize second order partial derivatives of the conditional moments
-    if(second.order){
-        d2Moment <- skeletonDtheta2(object, data = data,
-                                    df.param.all = Moment$df.param,
-                                    param2originalLink = Moment$param2originalLink,
-                                    name.latent = name.latent)
-    }else{
-        d2Moment <- NULL
-    }
-
-### ** Export
-    return(c(Moment, list(dMoment.init = dMoment, d2Moment.init = d2Moment)))
-}
-    
-    
-## * conditionalMoment.lvmfit
-#' @rdname conditionalMoment
-#' @export
-conditionalMoment.lvmfit <- function(object, data, param, 
-                                     first.order, second.order, usefit,
-                                     ...){
-
-### ** normalize arguments
-    name.endogenous <- endogenous(object)
-    n.endogenous <- length(name.endogenous)
-    name.latent <- latent(object)
-    n.latent <- length(name.latent)
-
-    data <- as.matrix(data[,lava::manifest(object),drop=FALSE])
-
-### ** initialize
-    if(is.null(object$conditionalMoment)){       
-        object$conditionalMoment <- conditionalMoment(lava::Model(object),
-                                                      data = data,
-                                                      first.order = first.order,
-                                                      second.order = second.order,
-                                                      name.endogenous = name.endogenous,
-                                                      name.latent = name.latent)
-
-        ##  param with non-zero third derivative
-        type.3deriv <- c("alpha","Gamma","Lambda","B","Psi_var","Sigma_var","Psi_cov","Sigma_cov")
-        index.keep <- intersect(which(!is.na(object$conditionalMoment$df.param$lava)),
-                                which(object$conditionalMoment$df.param$detail %in% type.3deriv)
-                                )    
-        object$conditionalMoment$name.3deriv <- object$conditionalMoment$df.param[index.keep, "originalLink"]
-    }
-
-### ** update according to the value of the model coefficients
-    if(usefit){
-
-        ## *** conditional moments
-        object$conditionalMoment$value <- skeleton(object, data = data, param = param,
-                                                   name.endogenous = name.endogenous, 
-                                                   name.latent = name.latent)
-
-        if(object$conditionalMoment$skeleton$toUpdate["param"]){
-            object$conditionalMoment$param <- coef(object)
-        }
-        if(object$conditionalMoment$skeleton$toUpdate["mu"]){            
-            if(n.latent==0){
-                object$conditionalMoment$mu <- object$conditionalMoment$value$nu.XK
-            }else{
-                object$conditionalMoment$mu <- object$conditionalMoment$value$nu.XK + object$conditionalMoment$value$alpha.XGamma.iIB %*% object$conditionalMoment$value$Lambda
-            }            
-        }
-        if(object$conditionalMoment$skeleton$toUpdate["Omega"]){
-            object$conditionalMoment$Omega <- getVarCov2(object)
-        }
-        
-        ## *** first order derivatives
-        if(first.order){            
-            out <- skeletonDtheta(object,
-                                  name.endogenous = name.endogenous, 
-                                  name.latent = name.latent)
-            object$conditionalMoment$dmu <- out$dmu
-            object$conditionalMoment$dOmega <- out$dOmega            
-        }
-
-        ## *** second order derivatives
-        if(second.order){
-            out2 <- skeletonDtheta2(object)
-            object$conditionalMoment$d2mu <- out2$d2mu
-            object$conditionalMoment$d2Omega <- out2$d2Omega
-        }
-       
-    }
-     
-### ** Export
-    return(object$conditionalMoment)
-}
-
diff --git a/R/createContrast.R b/R/createContrast.R
index 506968c..e2b1a5b 100644
--- a/R/createContrast.R
+++ b/R/createContrast.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: jan 31 2018 (12:05) 
 ## Version: 
-## Last-Updated: mar  4 2019 (11:14) 
+## Last-Updated: jan 18 2022 (10:38) 
 ##           By: Brice Ozenne
-##     Update #: 264
+##     Update #: 491
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -21,23 +21,21 @@
 #' The contrast matrix will contains the hypotheses in rows and the model coefficients in columns.
 #' @name createContrast
 #' 
-#' @param object a \code{ls.lvmfit} object.
-#' @param par [vector of characters] expression defining the linear hypotheses to be tested. See the examples section. 
-#' @param add.variance [logical] should the variance coefficients be considered as model coefficients?
-#' Required for lm, gls, and lme models.
-#' @param var.test [character] a regular expression that is used to identify the coefficients to be tested using \code{grep}. Each coefficient will be tested in a separate hypothesis. When this argument is used, the argument \code{par} is disregarded.
-#' @param diff.first [logical] should the contrasts between the first and any of the other coefficients define the null hypotheses.
-#' @param name.param [internal] the names of all the model coefficients.
-#' @param add.rowname [internal] should a name be defined for each hypothesis.
-#' @param rowname.rhs should the right hand side of the null hypothesis be added to the name.
-#' @param ... [internal] Only used by the generic method.
+#' @param object a \code{lvmfit} object or a list of  a \code{lvmfit} objects.
+#' @param linfct [vector of characters] expression defining the linear hypotheses to be tested.
+#' Can also be a regular expression (of length 1) that is used to identify the coefficients to be tested using \code{grep}.
+#' See the examples section.
+#' @param ... Argument to be passed to \code{.createContrast}:
+#' \itemize{
+#' \item diff.first [logical] should the contrasts between the first and any of the other coefficients define the null hypotheses.
+#' \item add.rowname [logical] add rownames to the contrast matrix and names to the right-hand side.
+#' \item rowname.rhs [logical] when naming the hypotheses, add the right-hand side (i.e. "X1-X2=0" instead of "X1-X2").
+#' \item sep [character vector of length2] character surrounding the left part of the row names.
+#' }
 #'
 #' @details
-#' One can initialize an empty contrast matrix setting the argument\code{par} to \code{character(0)}. \cr \cr
+#' One can initialize an empty contrast matrix setting the argument\code{linfct} to \code{character(0)}. \cr \cr
 #'
-#' When using \code{multcomp::glht} one should set the argument \code{add.variance} to \code{FALSE}. \cr
-#' When using \code{lavaSearch2::glht2} one should set the argument \code{add.variance} to \code{TRUE}.
-#' 
 #' @return A list containing
 #' \itemize{
 #' \item{contrast} [matrix] a contrast matrix corresponding to the left hand side of the linear hypotheses.
@@ -66,22 +64,23 @@
 #'                  data = df.data)
 #'
 #' ## Contrast matrix for a given model
-#' createContrast(lmX, par = "X~Age")
-#' createContrast(lmX, par = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"))
-#' createContrast(lmX, par = character(0))
+#' createContrast(lmX, linfct = "X~Age")
+#' createContrast(lmX, linfct = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"))
+#' createContrast(lmX, linfct = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"), sep = NULL)
+#' createContrast(lmX, linfct = character(0))
 #'
 #' ## Contrast matrix for the join model
 #' ls.lvm <- list(X = lmX, Y = lmY, Z = lvmZ)
-#' createContrast(ls.lvm, var.test = "Treatment", add.variance = FALSE)
-#' createContrast(ls.lvm, par = character(0), add.variance = FALSE)
+#' createContrast(ls.lvm, linfct = "TreatmentSSRI=0")
+#' createContrast(ls.lvm, linfct = "TreatmentSSRI=0", rowname.rhs = FALSE)
+#' createContrast(ls.lvm, linfct = character(0))
 #'
 #' ## Contrast for multigroup models
 #' m <- lava::lvm(Y~Age+Treatment)
 #' e <- lava::estimate(list(m,m), data = split(df.data, df.data$Gender))
 #' print(coef(e))
-#' createContrast(e, par = "Y~TreatmentSSRI@1 - Y~TreatmentSSRI@2 = 0")
-#' createContrast(e, par = "Y~TreatmentSSRI@2 - Y~TreatmentSSRI@1 = 0")
-#' @concept small sample inference
+#' createContrast(e, linfct = "Y~TreatmentSSRI@1 - Y~TreatmentSSRI@2 = 0")
+#' createContrast(e, linfct = "Y~TreatmentSSRI@2 - Y~TreatmentSSRI@1 = 0")
 #' 
 #' @export
 `createContrast` <-
@@ -90,154 +89,62 @@
 ## * createContrast.character
 #' @rdname createContrast
 #' @export
-createContrast.character <- function(object, name.param, diff.first = FALSE,
-                                     add.rowname = TRUE, rowname.rhs = TRUE,
-                                     ...){
-
-    n.param <- length(name.param)
-    dots <- list(...)
-    dots[["add.variance"]] <- NULL
-    if(length(dots)>0){
-        txt.args <- paste(names(dots), collapse = "\" \"")
-        txt.s <- if(length(dots)>1){"s"}else{""}
-        warning("Extra argument",txt.s," \"",txt.args,"\" are ignored. \n")
-    }
-
-    if(diff.first){
-        object <- paste0(object[-1]," - ",object[1])
-    }
-    
-    n.hypo <- length(object)
-    if(any(nchar(object)==0)){
-        stop("Argument contains empty character string(s) instead of an expression involving the model mean coefficients \n")
-    }
-    null <- rep(NA, n.hypo)
-    contrast <- matrix(0, nrow = n.hypo, ncol = n.param,
-                       dimnames = list(NULL,name.param))
-
-    if(n.hypo>0){
-        for(iH in 1:n.hypo){ # iH <- 1
-            iTempo.eq <- strsplit(object[iH], split = "=", fixed = TRUE)[[1]]
-            if(length(iTempo.eq)==1){ ## set null to 0 when second side of the equation is missing
-                iTempo.eq <- c(iTempo.eq,"0")
-            }
-
-            null[iH] <- as.numeric(trim(iTempo.eq[2]))
-            iRh.plus <- strsplit(iTempo.eq[[1]], split = "+", fixed = TRUE)[[1]]
-            iRh <- trim(unlist(sapply(iRh.plus, strsplit, split = "-", fixed = TRUE)))
-            iRh <- iRh[iRh!="",drop=FALSE]
-                            
-            ls.iRh <- lapply(strsplit(iRh, split = "*", fixed = TRUE), trim)
-                    
-            iN.tempo <- length(ls.iRh)
-                    
-            for(iCoef in 1:iN.tempo){ # iCoef <- 1
+createContrast.character <- function(object, ...){
 
-                if(length(ls.iRh[[iCoef]])==1){
-                    iFactor <- 1
-                    iName <- ls.iRh[[iCoef]][1]                
-                }else{
-                    iFactor <- as.numeric(ls.iRh[[iCoef]][1])
-                    iName <- ls.iRh[[iCoef]][2]
-                }
-            
-                if(iName %in% name.param == FALSE){
-                    txt.message <- paste0("unknown coefficient ",iName," in hypothesis ",iH,"\n")
-                    possibleMatch <- pmatch(iName, table = name.param)
-                    if(all(is.na(possibleMatch))){
-                        possibleMatch <- grep(iName, name.param, fixed = TRUE, value = TRUE)
-                    }
-                    if(length(possibleMatch)==0){
-                        possibleMatch <- agrep(iName, name.param, ignore.case = TRUE,value = TRUE)
-                    }
-                    if(length(possibleMatch)>0){
-                        txt.message <- c(txt.message,
-                                         paste0("candidates: \"",paste(possibleMatch, collapse = "\" \""),"\"\n"))
-                    }
-                    stop(txt.message)                    
-                }
+    object.lhs <- strsplit(object,split = "=")[[1]] ## rm rhs
+    object.term.lhs <- base::trimws(strsplit(object.lhs[[1]],split = "\\+|\\-")[[1]], which = "both") ## split with +/-
+    object.coefname <- sapply(object.term.lhs, function(iE){tail(strsplit(iE,split="*",fixed=TRUE)[[1]],1)}) ## remove *
 
-                ## identify if it is a minus sign
-                iBeforeCoef <- strsplit(iTempo.eq[[1]], split = ls.iRh[iCoef])[[1]][1]
-                if(iCoef > 1){
-                    iBeforeCoef <- strsplit(iBeforeCoef, split = ls.iRh[iCoef-1])[[1]][2]
-                }
-                test.sign <- length(grep("-",iBeforeCoef))>0
-                contrast[iH,iName] <- c(1,-1)[test.sign+1] * iFactor
-            }
-        }
-    
-        if(add.rowname){
-            name.hypo <- .contrast2name(contrast, null = if(rowname.rhs){null}else{NULL})
-            rownames(contrast) <- name.hypo
-            null <- setNames(null, name.hypo)
-        }
-    }
-    
-    return(list(contrast = contrast,
-                null = null,
-                Q = n.hypo))
+    return(.createContrast(object, object.coefname, ...))
+        
 }
 
-## * createContrast.lm
+## * createContrast.lvmfit
 #' @rdname createContrast
 #' @export
-createContrast.lm <- function(object, par, add.variance, ...){
-
-    if(!identical(class(par),"character")){
-        stop("Argument \'par\' must be a character \n")
-    }    
-    name.coef <- names(coef(object))
-    if(is.null(add.variance)){
-        stop("Argument \'add.variance\' must be specified for lm objects \n")
-    }
-    if(add.variance){
-        if(any("sigma2" %in% name.coef)){
-            stop("createContrast does not work when one of the coefficients is named \"sigma2\" \n")
-        }
-        name.coef <- c(name.coef,"sigma2")
+createContrast.lvmfit <- function(object, linfct, ...){
+    name.param <- names(coef(object))
+    if(!identical(class(linfct),"character")){
+        stop("Argument \'linfct\' must be of type character (or vector of character) \n")
     }
-
-    out <- createContrast(par, name.param = name.coef, ...)
+    ## if(length(linfct)==1 && all(linfct %in% name.param == FALSE) & all(sapply(linfct, function(iL){any(grepl(iL, name.param, fixed = TRUE))}))){
+    ##     linfct <- grep(linfct, name.param,  value = TRUE)
+    ## }
+    out <- .createContrast(linfct = linfct, name.param = name.param, ...)
     return(out)
     
 }
 
-## * createContrast.gls
+## * createContrast.lvmfit
 #' @rdname createContrast
 #' @export
-createContrast.gls <- function(object, par, add.variance, ...){
-
-    if(!identical(class(par),"character")){
-        stop("Argument \'par\' must be a character \n")
+createContrast.lvmfit2 <- function(object, linfct, ...){
+    if(!identical(class(linfct),"character")){
+        stop("Argument \'linfct\' must be of type character (or vector of character) \n")
     }
-    if(add.variance){
-        name.coef <- names(.coef2(object))
+    ## if(length(linfct)==1){
+        ## name.param <- names(coef(object, as.lava = TRUE))
+        ## name.param2 <- names(coef(object, as.lava = FALSE))
+
+    ##     if(all(name.param != linfct) & all(sapply(linfct, function(iL){any(grepl(iL, name.param, fixed = TRUE))}))){
+    ##         linfct <- grep(linfct, name.param,  value = TRUE)
+    ##     }
+    ## }else{
+    ##     name.param <- names(coef(object))
+    ## }
+    name.param <- names(coef(object, as.lava = TRUE))
+    name.param2 <- names(coef(object, as.lava = FALSE))
+    if(any(name.param!=name.param2)){
+        if(any(sapply(setdiff(name.param2,name.param), function(iCoef){any(grepl(iCoef, linfct, fixed = TRUE))}))){
+            out <- .createContrast(linfct = linfct, name.param = name.param2, ...)
+        }else{
+            out <- .createContrast(linfct = linfct, name.param = name.param, ...)
+        }
+        
     }else{
-        name.coef <- names(coef(object))
+        out <- .createContrast(linfct = linfct, name.param = name.param, ...)
     }
-    out <- createContrast(par, name.param = name.coef, ...)
-    return(out)
     
-}
-
-## * createContrast.lme
-#' @rdname createContrast
-#' @export
-createContrast.lme <- createContrast.gls
-
-## * createContrast.lvmfit
-#' @rdname createContrast
-#' @export
-createContrast.lvmfit <- function(object, par = NULL, var.test = NULL, ...){
-
-    if(is.null(par) && !is.null(var.test)){
-       par <- grep(var.test, names(coef(object)),  value = TRUE)
-    }
-    if(!identical(class(par),"character")){
-        stop("Argument \'par\' must be a character \n")
-    }
-    out <- createContrast(par, name.param = names(coef(object)), ...)
     return(out)
     
 }
@@ -245,19 +152,23 @@ createContrast.lvmfit <- function(object, par = NULL, var.test = NULL, ...){
 ## * createContrast.list
 #' @rdname createContrast
 #' @export
-createContrast.list <- function(object, par = NULL, add.variance = NULL, var.test = NULL, 
-                                ...){
+createContrast.list <- function(object, linfct = NULL, ...){
+
+    if(!identical(class(linfct),"character")){
+        stop("Argument \'linfct\' must be of type character (or vector of character) \n")
+    }
 
     ## ** find the names of the coefficients
     name.model <- names(object)
+    n.model <- length(name.model)
     if(is.null(name.model)){
-        stop("Each element of the argument \'object\' must be named \n")
+        stop("Incorrect argument  \'object\' \n",
+             "Each element of the list must be named \n")
     }
-    
+
     ls.coefname <- lapply(name.model, function(iModel){ ## list by model
         iResC <- createContrast(object[[iModel]],
-                                par = character(0),
-                                add.variance = add.variance)
+                                linfct = character(0))
         return(colnames(iResC$contrast))
     })
     names(ls.coefname) <- name.model
@@ -269,24 +180,59 @@ createContrast.list <- function(object, par = NULL, add.variance = NULL, var.tes
     
     object.coefname <- unname(unlist(ls.object.coefname)) ## vector
     n.coef <- length(object.coefname)
-    
-    ## ** normalize arguments
-    if(!is.null(var.test)){
-        if(!is.null(par)){
-            stop("Argument \'var.test\' cannot be specified when argument \'par\' is specified \n")
-        }else{
-            if(length(var.test)!=1){
-                stop("Argument \'var.test\' must have length 1 \n")
+        
+    ## ** create full contrast matrix
+    if(length(linfct)==1){
+
+        ## isolate left hand side of the linfct and remove multiplicative factor (e.g. 2*Age=0 -> Age)
+        linfct.contrast <- createContrast(linfct, ...)
+        linfct.coefname <- colnames(linfct.contrast$contrast)
+        linfct.rhs <- as.double(linfct.contrast$null)
+        linfct.factor <- as.double(linfct.contrast$contrast)
+        
+        ## name associated with each hypothesis
+        name.linfct <- names(linfct)
+        if(is.null(name.linfct)){
+            name.linfct <- rownames(linfct.contrast$contrast)
+        }
+        ## generate contrast matrix with only 0
+        out <- list(contrast = matrix(0, nrow = 0, ncol = n.coef,
+                                      dimnames = list(NULL, object.coefname)),
+                    null = numeric(0),
+                    Q = 0)
+        
+        ## regressor corresponding to each coefficient
+        tableCoef <- data.frame(name = unlist(lapply(object, function(iO){rownames(coef(iO, type = 9, labels = 2))})),
+                                model = unlist(lapply(1:n.model, function(iO){rep(name.model[iO], NROW(coef(object[[iO]], type = 9, labels = 2)))})),
+                                type = unlist(lapply(object, function(iO){attr(coef(iO, type = 9, labels = 2),"type")})),
+                                to = unlist(lapply(object, function(iO){attr(coef(iO, type = 9, labels = 2),"var")})),
+                                from = unlist(lapply(object, function(iO){attr(coef(iO, type = 9, labels = 2),"from")})))
+        tableCoef <- tableCoef[tableCoef$type=="regression",]
+        tableCoef$group <- interaction(tableCoef$model,tableCoef$to,drop=TRUE)
+        tableCoef$group <- as.numeric(factor(tableCoef$group,unique(tableCoef$group)))
+
+        ## fill contrast matrix
+        for(iG in 1:max(tableCoef$group)){ ## iG <- 3
+            if(all(linfct.coefname %in% tableCoef[tableCoef$group==iG,"from"])){
+
+                iTemplate <- .createContrast(linfct, tableCoef[tableCoef$group==iG,"from"], ...)
+
+                iRow <- matrix(0, nrow = 1, ncol = n.coef, dimnames = list(NULL,object.coefname))
+                if(!is.null(rownames(iTemplate$contrast))){
+                        rownames(iRow) <- paste0(unique(tableCoef[tableCoef$group==iG,"model"]),": ",name.linfct)
+                }
+                iRow[,paste0(tableCoef[tableCoef$group==iG,"model"],": ",tableCoef[tableCoef$group==iG,"name"])] <- unname(iTemplate$contrast)
+
+                out$contrast <- rbind(out$contrast,iRow)
+                out$null <- c(out$null, unname(iTemplate$null))
+                out$Q <- out$Q+1
+
+                tableCoef[tableCoef$group==iG,"contrast"] <- as.double(iTemplate$contrast)
             }
-            object.coefname.red <- unlist(lapply(object.coefname, function(iName){strsplit(iName, split = ": ", fixed = TRUE)[[1]][2]}))
-            par <- object.coefname[grep(var.test, object.coefname.red, value = FALSE)]
         }
-    }
 
-    ## ** create full contrast matrix
-    out <- createContrast(par, name.param = object.coefname)
-    if(any(out$null!=0)){
-        warning("glht ignores the \'rhs\' argument when dealing with a multiple models \n")
+    }else{
+        out <- .createContrast(linfct, name.param = object.coefname, ...)
     }
 
     ## ** create contrast matrix relative to each model
@@ -309,18 +255,21 @@ createContrast.list <- function(object, par = NULL, add.variance = NULL, var.tes
     class(out$mlf) <- "mlf"
 
     ## remove right hand side from the names (like in multicomp)
-    if(length(par)>0){
-        rownames(out$contrast) <- .contrast2name(out$contrast, null = NULL)
+    if(!is.null(list(...)$rowname.rhs) && list(...)$rowname.rhs==FALSE){
         out$mlf <- lapply(out$mlf, function(x){ ## x <- name.model[1]
             if(NROW(x)>0){
-                rownames(x) <- .contrast2name(x, null = NULL)
+                if("sep" %in% names(list(...))){
+                    rownames(x) <- .contrast2name(x, null = NULL, sep = list(...)$sep)
+                }else{
+                    rownames(x) <- .contrast2name(x, null = NULL)
+                }
             }
             return(x)
         })
             
         class(out$mlf) <- "mlf"
-        names(out$null) <- rownames(out$contrast)
     }
+    names(out$null) <- rownames(out$contrast)
    
     ## ** export
     return(out)    
@@ -331,6 +280,102 @@ createContrast.list <- function(object, par = NULL, add.variance = NULL, var.tes
 #' @export
 createContrast.mmm <- createContrast.list
 
+## * .createContrast
+.createContrast <- function(linfct, name.param, diff.first = FALSE, add.rowname = TRUE, rowname.rhs = TRUE, sep = c("[","]"), ...){
+
+    n.param <- length(name.param)
+    dots <- list(...)
+    if(length(dots)>0){
+        txt.args <- paste(names(dots), collapse = "\" \"")
+        txt.s <- if(length(dots)>1){"s"}else{""}
+        txt.verb <- if(length(dots)>1){"are"}else{"is"}
+        warning("Extra argument",txt.s," \"",txt.args,"\" ",txt.verb," ignored. \n")
+    }
+
+    if(diff.first){
+        linfct <- paste0(linfct[-1]," - ",linfct[1])
+    }
+
+    n.hypo <- length(linfct)
+    name.hypo <- names(linfct)
+    if(any(nchar(linfct)==0)){
+        stop("Argument contains empty character string(s) instead of an expression involving the model mean coefficients \n")
+    }
+    null <- rep(NA, n.hypo)
+    contrast <- matrix(0, nrow = n.hypo, ncol = n.param,
+                       dimnames = list(NULL,name.param))
+
+    if(n.hypo>0){
+        for(iH in 1:n.hypo){ # iH <- 1
+            iTempo.eq <- strsplit(linfct[iH], split = "=", fixed = TRUE)[[1]]
+            if(length(iTempo.eq)==1){ ## set null to 0 when second side of the equation is missing
+                iTempo.eq <- c(iTempo.eq,"0")
+            }
+
+            null[iH] <- as.numeric(trim(iTempo.eq[2]))
+            iRh.plus <- strsplit(iTempo.eq[[1]], split = "+", fixed = TRUE)[[1]]
+            iRh <- trim(unlist(sapply(iRh.plus, strsplit, split = "-", fixed = TRUE)))
+            iRh <- iRh[iRh!="",drop=FALSE]
+                            
+            ls.iRh <- lapply(strsplit(iRh, split = "*", fixed = TRUE), trim)
+                    
+            iN.tempo <- length(ls.iRh)
+
+            for(iCoef in 1:iN.tempo){ # iCoef <- 1
+
+                if(length(ls.iRh[[iCoef]])==1){
+                    iFactor <- 1
+                    iName <- ls.iRh[[iCoef]][1]                
+                }else{
+                    iFactor <- as.numeric(ls.iRh[[iCoef]][1])
+                    iName <- ls.iRh[[iCoef]][2]
+                }
+            
+                if(iName %in% name.param == FALSE){
+                    txt.message <- paste0("unknown coefficient ",iName," in hypothesis ",iH,"\n")
+                    possibleMatch <- grep(iName, name.param, fixed = TRUE, value = TRUE)
+                    if(all(is.na(possibleMatch)) || length(possibleMatch)==0){
+                        possibleMatch <- pmatch(iName, table = name.param)
+                    }else if(length(linfct) == 1 && length(possibleMatch)>1){ ##
+                        message("Guessing the contrast based on the string \"",linfct,"\" (",length(possibleMatch)," coefficients found). \n")
+                        return(.createContrast(linfct = possibleMatch, name.param = name.param, diff.first = diff.first, add.rowname = add.rowname, rowname.rhs = rowname.rhs, sep = sep, ...))
+                    }
+                    
+                    if(all(is.na(possibleMatch)) || length(possibleMatch)==0){
+                        possibleMatch <- agrep(iName, name.param, ignore.case = TRUE,value = TRUE)
+                    }
+                    if(all(!is.na(possibleMatch)) && length(possibleMatch)>0){
+                        txt.message <- c(txt.message,
+                                         paste0("candidates: \"",paste(possibleMatch, collapse = "\" \""),"\"\n"))
+                    }
+                    stop(txt.message)                    
+                    
+                    
+                    
+                }
+
+                ## identify if it is a minus sign
+                iBeforeCoef <- strsplit(iTempo.eq[[1]], split = ls.iRh[iCoef])[[1]][1]
+                if(iCoef > 1){
+                    iBeforeCoef <- strsplit(iBeforeCoef, split = ls.iRh[iCoef-1])[[1]][2]
+                }
+                test.sign <- length(grep("-",iBeforeCoef))>0
+                contrast[iH,iName] <- c(1,-1)[test.sign+1] * iFactor
+            }
+        }
+        if(add.rowname){
+            if(is.null(name.hypo)){
+                name.hypo <- .contrast2name(contrast, null = if(rowname.rhs){null}else{NULL}, sep = sep)
+            }
+            rownames(contrast) <- name.hypo
+            null <- stats::setNames(null, name.hypo)
+        }
+    }
+    return(list(contrast = contrast,
+                null = null,
+                Q = n.hypo))
+}
+
 ## * .contrast2name
 #' @title Create Rownames for a Contrast Matrix
 #' @description Create rownames for a contrast matrix using the coefficients and the names of the coefficients. The rownames will be [value * name] == null, e.g. [beta + 4*alpha] = 0.
@@ -338,13 +383,14 @@ createContrast.mmm <- createContrast.list
 #'
 #' @param contrast [matrix] a contrast matrix defining the left hand side of the linear hypotheses to be tested.
 #' @param null [vector, optional] the right hand side of the linear hypotheses to be tested.
+#' @param sep [character of length 2, optional] character used in rownames to wrap the left hand side of the equation.
 #'
 #' @details When argument \code{NULL} is null then the rownames will not be put into brackets and the right hand side will not be added to the name.
 #'
 #' @return a character vector.
 #' 
 #' @keywords internal
-.contrast2name <- function(contrast, null = NULL){
+.contrast2name <- function(contrast, null = NULL, sep = c("[","]")){
     contrast.names <- colnames(contrast)
     
     df.index <- as.data.frame(which(contrast != 0, arr.ind = TRUE))
@@ -382,8 +428,10 @@ createContrast.mmm <- createContrast.list
 
     ## add right hand side
     if(!is.null(null)){
-        out <- paste0("[",out,"] = ",null)
-        
+        ## out <- paste0("[",out,"] = ",null)
+        out <- paste0(sep[1],out,sep[2]," = ",null)
+    }else if(!is.null(sep)){
+        out <- paste0(sep[1],out,sep[2])
     }
 
     return(as.character(out))
diff --git a/R/createGrid.R b/R/createGrid.R
index cdec1a1..7e38e57 100644
--- a/R/createGrid.R
+++ b/R/createGrid.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: aug 31 2017 (16:40) 
 ## Version: 
-## last-updated: feb  5 2018 (15:51) 
+## last-updated: Jan 11 2022 (17:08) 
 ##           By: Brice Ozenne
-##     Update #: 87
+##     Update #: 88
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
diff --git a/R/effects2.R b/R/effects2.R
deleted file mode 100644
index 7b7a8b9..0000000
--- a/R/effects2.R
+++ /dev/null
@@ -1,162 +0,0 @@
-### effects2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar  4 2019 (10:28) 
-## Version: 
-## Last-Updated: mar 12 2019 (16:24) 
-##           By: Brice Ozenne
-##     Update #: 75
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * effects2 (documentation)
-#' @title Effects from a fitted model
-#' @description Test whether a path in the latent variable model correspond to a null effect.
-#' Similar to \code{lava::effects} but with small sample correction.
-#' So far it only work for paths composed of two edges.
-#' @name effects2
-#'
-#' @param object an object that inherits from lvmfit.
-#' @param link [character vector] The path for which the effect should be assessed (e.g. \code{"A~B"}),
-#' i.e. the effect of the right variable (B) on the left variable (A). 
-#' @param df [logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-#' Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias? Argument passed to \code{sCorrect}.
-#' @param ...  [internal] only used by the generic method.
-#' 
-#' @concept small sample inference
-#' @export
-`effects2` <-
-  function(object, link, ...) UseMethod("effects2")
-
-## * effects2 (examples)
-## TODO
-
-## * compare2.lvmfit
-#' @rdname effects2
-#' @export
-effects2.lvmfit <- function(object, link, df = TRUE, bias.correct = TRUE, ...){
-    sCorrect(object, df = df) <- bias.correct    
-    return(effects2.lvmfit2(object, link = link))
-}
-
-## * effects2 (code)
-##' @rdname effects2
-##' @export
-effects2.lvmfit2 <- function(object, link, ...){
-    n.link <- length(link)
-
-    name.coef <- names(coef(object))
-    link.direct <- link[link %in% name.coef]
-    link.other <- setdiff(link, link.direct)
-
-    object.summary2 <- summary2(object)$coef 
-    
-    out <- NULL
-    if(length(link.direct)>0){        
-        out <- rbind(out,
-                     object.summary2[link.direct,])
-    }
-
-    if(length(link.other)>0){
-        allCoef <- coefType(object, as.lava = FALSE)
-        mu <- setNames(object.summary2[,"Estimate"],rownames(object.summary2))
-        mu.se <- setNames(object.summary2[,"Std. Error"],rownames(object.summary2))
-        mu.df <- setNames(object.summary2[,"df"],rownames(object.summary2))
-        Sigma <- vcov2(object)
-        dSigma <- object$sCorrect$dVcov.param
-        
-        for(iL in 1:length(link.other)){ ## iL <- 1
-            iLink <- link.other[iL]
-            iPath <- lava::path(object, to = as.formula(iLink))
-            if(length(iPath$path[[1]])==0){
-                stop("No path found \n")
-            }else{
-                iNode <- iPath$path[[1]]
-                iN.node <- length(iNode)
-                iLink <- paste0(iNode[-1], lava.options()$symbols[1], iNode[-iN.node], collpase = "")
-
-                if(any(iLink %in% allCoef$originalLink == FALSE)){
-                    stop("Part of the path could not be identified \n")
-                }
-                if(any(allCoef$type[allCoef$originalLink %in% iLink] != "regression")){
-                    stop("Part of the path does not correspond to a regression link \n")
-                }
-                if(any(length(iLink)!=2)){
-                    stop("Only implemented for path of length 2 \n")
-                }                
-
-                test.noconstrain <- is.na(allCoef$value[allCoef$originalLink %in% iLink])
-                if(all(test.noconstrain)){
-                    out <- rbind(out,
-                                 .deltaMethod_product(mu = mu, Sigma = Sigma, dSigma = dSigma, link = iLink)
-                                 )
-                }else{
-                    iLink.NA <- iLink[test.noconstrain==FALSE]
-                    iLink.NNA <- iLink[test.noconstrain==TRUE]
-                    iEffect <- prod(mu[iLink])
-                    iEffect.se <- mu.se[iLink.NNA] * mu[iLink.NA]
-                    iEffect.df <- mu.df[iLink.NNA]
-                    
-                    iRow <- c("Estimate" = iEffect,
-                              "Std. Error" = iEffect.se,
-                              "t-value" = iEffect/iEffect.se,
-                              "P-value" = 2*(1-pt(abs(iEffect/iEffect.se), df = iEffect.df)),
-                              "df" = iEffect.df
-                              )
-                    out <- rbind(out,
-                                 iRow
-                                 )                    
-                }
-                
-            }
-
-        }            
-        
-    }
-    rownames(out) <- c(link.direct,link.other)
-
-    return(out)
-    
-}
-
-.deltaMethod_product <- function(mu,Sigma,dSigma,link){
-    link1 <- link[1]
-    link2 <- link[2]
-
-    effect <- as.double(prod(mu[link]))
-    effect.var <- as.double(Sigma[link1,link1] * mu[link2]^2 + Sigma[link2,link2] * mu[link1]^2 + 2 * Sigma[link1,link2] * mu[link1] * mu[link2])
-    effect.se <- sqrt(effect.var)
-    effect.Wald <- effect/effect.se
-
-    if(!is.null(dSigma)){
-        keep.param <- dimnames(dSigma)[[3]]
-        Ilink1 <- as.numeric(keep.param %in% link1)
-        Ilink2 <- as.numeric(keep.param %in% link2)
-    
-        dvar1 <- dSigma[link1,link1,] * mu[link2]^2 + Sigma[link1,link1] * 2 * Ilink2 * mu[link2]
-        dvar2 <- dSigma[link2,link2,] * mu[link1]^2 + Sigma[link2,link2] * 2 * Ilink1 * mu[link1]
-        dvar12 <- 2 * dSigma[link1,link2,] * mu[link1] * mu[link2] + 2 * Sigma[link1,link2] * (Ilink2 * mu[link2] + mu[link1] * Ilink1)
-        dvar <- dvar1 + dvar2 + dvar12
-
-        effect.df <- 2 * effect.var^2 / (t(dvar) %*% Sigma[keep.param,keep.param,drop=FALSE] %*% dvar)[1,1]
-    }else{
-        effect.df <- Inf
-    }
-    
-    return(c("Estimate" = effect,
-             "Std. Error" = effect.se,
-             "t-value" = effect.Wald,
-             "P-value" = 2*(1-pt(abs(effect.Wald), df = effect.df)),
-             "df" = effect.df
-             ))
-}
-
-######################################################################
-### effects2.R ends here
diff --git a/R/estimate2.R b/R/estimate2.R
deleted file mode 100644
index c9c4592..0000000
--- a/R/estimate2.R
+++ /dev/null
@@ -1,549 +0,0 @@
-### estimate2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: feb 16 2018 (16:38) 
-## Version: 
-## Last-Updated: feb 15 2019 (14:08) 
-##           By: Brice Ozenne
-##     Update #: 864
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * estimate2
-#' @title Compute Bias Corrected Quantities.
-#' @description Compute bias corrected residuals variance covariance matrix
-#' and information matrix.
-#' Also provides the leverage values and corrected sample size when adjust.n is set to TRUE.
-#' @name estimate2
-#' 
-#' @keywords internal
-.estimate2 <- function(object, epsilon, n.cluster,
-                       name.param, name.endogenous, name.meanparam, name.varparam,
-                       index.Omega,
-                       adjust.Omega, adjust.n, tol, n.iter, trace){
-
-    ## ** Prepare
-    Omega <- object$conditionalMoment$Omega
-    dmu <- object$conditionalMoment$dmu
-    dOmega <- object$conditionalMoment$dOmega
-    
-    name.hybridparam <- intersect(name.meanparam, name.varparam)
-
-    n.param <- length(name.param)
-    n.meanparam <- length(name.meanparam)
-    n.varparam <- length(name.varparam)
-    n.hybridparam <- length(name.hybridparam)
-
-    n.endogenous <- length(name.endogenous)
-    grid.meanparam <- .combination(name.meanparam, name.meanparam)    
-    n.grid.meanparam <- NROW(grid.meanparam)
-    grid.varparam <- .combination(name.varparam, name.varparam)
-    n.grid.varparam <- NROW(grid.varparam)
-
-    ## check low diagonal
-    name2num <- setNames(1:n.param,name.param)
-    if(!all(name2num[grid.meanparam[,1]]-name2num[grid.meanparam[,2]]>=0)){
-        stop("Incorrect allocation of the computation of the information matrix (mean parameter) \n")
-    }
-    name2num <- setNames(1:n.param,name.param)
-    if(!all(name2num[grid.varparam[,1]]-name2num[grid.varparam[,2]]>=0)){
-        stop("Incorrect allocation of the computation of the information matrix (variance parameter) \n")
-    }
-    ##
-    
-    leverage <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                       dimnames = list(NULL, name.endogenous))
-    ls.dmu <- vector(mode = "list", length = n.cluster)
-    for(iC in 1:n.cluster){ # iC <- 1
-        if(is.null(index.Omega)){            
-            leverage[iC,] <- 0
-            ls.dmu[[iC]] <- matrix(0, nrow = n.param, ncol = n.endogenous,
-                                   dimnames = list(name.param, name.endogenous))
-            ls.dmu[[iC]][name.meanparam,] <- do.call(rbind, lapply(dmu[name.meanparam],function(x){x[iC,]}))
-        }else{
-            leverage[iC,index.Omega[[iC]]] <- 0
-            ls.dmu[[iC]] <- matrix(0, nrow = n.param, ncol = length(index.Omega[[iC]]),
-                                   dimnames = list(name.param, name.endogenous[index.Omega[[iC]]]))
-            ls.dmu[[iC]][name.meanparam,] <- do.call(rbind, lapply(dmu[name.meanparam],function(x){x[iC,index.Omega[[iC]]]}))
-        }        
-    }
-    
-    ## ** Initialisation (i.e. first iteration without correction)
-    if(any(eigen(Omega)$value<=0)){
-        stop("the residual variance-covariance matrix is not positive definite \n")
-    }
-
-    if(is.null(index.Omega)){
-        n.corrected <- rep(n.cluster, n.endogenous)
-    }else{
-        n.corrected <- NULL
-    }
-    ls.Psi <- vector(mode = "list", length = n.cluster)
-
-    Omega.adj <- Omega
-    if(!adjust.n){
-       epsilon.adj <- epsilon
-    }
-
-    if(trace>0){
-        cat("* Reconstruct estimated information matrix ")
-    }
-
-    iInfo <- .information2(dmu = dmu,
-                           dOmega = dOmega,
-                           Omega = Omega,
-                           n.corrected = n.corrected,
-                           leverage = leverage, index.Omega = index.Omega, n.cluster = n.cluster,
-                           grid.meanparam = grid.meanparam,
-                           n.grid.meanparam = n.grid.meanparam,
-                           grid.varparam = grid.varparam,
-                           n.grid.varparam = n.grid.varparam,
-                           name.param = name.param,
-                           n.param = n.param)
-    iVcov.param <- try(chol2inv(chol(iInfo)), silent = TRUE)
-    if(inherits(iVcov.param, "try-error")){
-        iVcov.param <- solve(iInfo)
-    }
-    if(trace>0){
-        cat("- done \n")
-    }
-    
-    ## ** Loop    
-    if(adjust.Omega || adjust.n){
-        if(trace>0){
-            cat("* iterative small sample correction: ")
-        }
-        iIter <- 0
-        iTol <- Inf
-        Omega_save <- Omega
-        iOmega.adj <- Omega.adj
-    }else{
-        iIter <- Inf
-        iTol <- -Inf        
-    }
-    
-    while(iIter < n.iter & iTol > tol){
-        if(trace>0){
-            cat("*")
-        }
-
-        ## *** Step (i-ii): compute individual bias, expected bias
-        Psi <- matrix(0, nrow = n.endogenous, ncol = n.endogenous,
-                      dimnames = list(name.endogenous, name.endogenous))
-        M.countCluster <- matrix(0, nrow = n.endogenous, ncol = n.endogenous,
-                                 dimnames = list(name.endogenous, name.endogenous))
-        for(iC in 1:n.cluster){
-            ## individual bias
-            ls.Psi[[iC]] <- t(ls.dmu[[iC]])  %*% iVcov.param %*% ls.dmu[[iC]]
-            ## cumulated bias            
-            if(is.null(index.Omega)){
-                Psi <- Psi + ls.Psi[[iC]]
-                M.countCluster <- M.countCluster + 1
-            }else{
-                Psi[index.Omega[[iC]],index.Omega[[iC]]] <- Psi[index.Omega[[iC]],index.Omega[[iC]]] + ls.Psi[[iC]]
-                M.countCluster[index.Omega[[iC]],index.Omega[[iC]]] <- M.countCluster[index.Omega[[iC]],index.Omega[[iC]]] + 1
-            }
-        }
-
-        ## update
-        for(iPsi in 1:length(Psi)){
-            if(M.countCluster[iPsi]>0){
-                Psi[iPsi] <- Psi[iPsi]/M.countCluster[iPsi]
-            }
-        }
-        
-        ## *** Step (iii): compute leverage
-        if(adjust.n){
-            epsilon.adj <- .adjustResiduals(Omega = Omega.adj,
-                                            Psi = Psi,
-                                            epsilon = epsilon,
-                                            index.Omega = index.Omega,
-                                            name.endogenous = name.endogenous,
-                                            n.endogenous = n.endogenous,
-                                            n.cluster = n.cluster)
-
-            leverage <- .adjustLeverage(Omega = Omega.adj,
-                                        epsilon = epsilon.adj,
-                                        ls.dmu = ls.dmu,
-                                        dOmega = dOmega,
-                                        vcov.param = iVcov.param,
-                                        index.Omega = index.Omega,
-                                        name.endogenous = name.endogenous,
-                                        n.endogenous = n.endogenous,
-                                        name.varparam = name.varparam,
-                                        n.varparam = n.varparam,
-                                        n.cluster = n.cluster)
-
-            n.corrected <- rep(n.cluster, n.endogenous) - colSums(leverage, na.rm = TRUE)
-        }
-        
-        ## *** Step (v): correct residual covariance matrix, estimates, and derivatives
-        if(adjust.Omega){
-            ## corrected residual covariance variance
-            Omega.adj <- Omega + Psi
-            
-            ## correct estimates
-            object$conditionalMoment <- .adjustMoment(object, Omega = Omega.adj)
-            dOmega <- object$conditionalMoment$dOmega
-            ## conditionalMoment.adj$param - coef(object)
-           
-        }
-
-        ## *** Step (vii): expected information matrix
-        iInfo <- .information2(dmu = dmu,
-                               dOmega = dOmega,
-                               Omega = Omega.adj,
-                               n.corrected = n.corrected,
-                               leverage = leverage,
-                               index.Omega = index.Omega,
-                               n.cluster = n.cluster,
-                               grid.meanparam = grid.meanparam,
-                               n.grid.meanparam = n.grid.meanparam,
-                               grid.varparam = grid.varparam,
-                               n.grid.varparam = n.grid.varparam,
-                               name.param = name.param,
-                               n.param = n.param)
-        iVcov.param <- try(chol2inv(chol(iInfo)), silent = TRUE)
-        if(inherits(iVcov.param, "try-error")){
-            iVcov.param <- solve(iInfo)
-        }
-        
-        ## *** Update cv
-        iIter <- iIter + 1
-        iTol <- norm(Omega.adj-Omega_save, type = "F")
-        Omega_save <- Omega.adj
-        ## cat("Omega.adj: ",Omega.adj," | n:",n.corrected," | iTol:",iTol,"\n")
-    }
-    
-    ## ** Post processing
-    if(!is.infinite(iIter)){
-
-        if(iTol > tol){
-            warning("small sample correction did not reach convergence after ",iIter," iterations \n")
-
-            if(trace>0){
-                cat(" - incomplete \n")
-            }
-        }else{
-            if(trace>0){
-                cat(" - done \n")
-            }
-        }
-        
-    }
-
-    vcov.param <- try(chol2inv(chol(iInfo)), silent = TRUE)
-    if("try-error" %in% class(vcov.param)){
-        errorMessage <- vcov.param
-        vcov.param <- solve(iInfo)
-        attr(vcov.param, "warning") <- errorMessage
-    }
-    dimnames(vcov.param) <- dimnames(iInfo)
-
-    ## update object
-    object$conditionalMoment$Omega <- Omega.adj
-    object$dVcov <- list(param = object$conditionalMoment$param,
-                         score = NULL,
-                         vcov.param = vcov.param,
-                         dVcov.param = NULL,
-                         Omega = Omega.adj,
-                         residuals = epsilon.adj,
-                         leverage = leverage,
-                         n.corrected = rep(n.cluster, n.endogenous) - colSums(leverage, na.rm = TRUE),
-                         opt = list(objective = iTol, iterations = iIter, convergence = (iTol <= tol), grid.meanparam = grid.meanparam, grid.varparam = grid.varparam))
-
-    ## ** Export
-    return(object)
-}
-
-## * .adjustResiduals
-.adjustResiduals <- function(Omega, Psi, epsilon,
-                             index.Omega,
-                             name.endogenous, n.endogenous, n.cluster){
-
-    if(is.null(index.Omega)){ ## no missing values
-        
-        Omega.chol <- matrixPower(Omega, symmetric = TRUE, power = 1/2)
-        H <- Omega %*% Omega - Omega.chol %*% Psi %*% Omega.chol
-        HM1 <- tryCatch(matrixPower(H, symmetric = TRUE, power = -1/2), warning = function(w){w})
-        if(inherits(HM1,"warning")){
-            stop("Cannot compute the adjusted residuals \n",
-                 "Estimated bias too large compared to the estimated variance-covariance matrix \n",
-                 "Consider setting argument \'adjust.n\' to FALSE when calling sCorrect \n")
-        }
-        epsilon.adj <- epsilon %*% Omega.chol %*% HM1 %*% Omega.chol
-        
-    }else{ ## missing values
-        
-        epsilon.adj <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                              dimnames = list(NULL, name.endogenous))
-
-        for(iC in 1:n.cluster){
-            iIndex <- index.Omega[[iC]]
-            iOmega <- Omega[iIndex,iIndex,drop=FALSE]
-            iOmega.chol <- matrixPower(iOmega, symmetric = TRUE, power = 1/2)
-            iH <- iOmega %*% iOmega - iOmega.chol %*% Psi[iIndex,iIndex,drop=FALSE] %*% iOmega.chol
-            iHM1 <- tryCatch(matrixPower(iH, symmetric = TRUE, power = -1/2), warning = function(w){w})
-            if(inherits(iHM1,"warning")){
-                stop("Cannot compute the adjusted residuals \n",
-                     "Estimated bias too large compared to the estimated variance-covariance matrix \n",
-                     "Consider setting argument \'adjust.n\' to FALSE when calling sCorrect \n")
-            }
-            epsilon.adj[iC,iIndex] <- epsilon[iC,iIndex] %*% iOmega.chol %*% iHM1 %*% iOmega.chol
-        }
-        
-    }
-    dimnames(epsilon.adj) <- list(NULL,name.endogenous)
-    return(epsilon.adj)
-}
-
-## * .adjustLeverage
-.adjustLeverage <- function(Omega, epsilon, ls.dmu, dOmega, vcov.param,
-                            index.Omega,
-                            name.endogenous, n.endogenous, name.varparam, n.varparam, n.cluster){
-
-    ## ** prepare
-    leverage <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                       dimnames = list(NULL, name.endogenous))
-
-    if(is.null(index.Omega)){
-        iIndex <- 1:n.endogenous
-        iOmegaM1 <- chol2inv(chol(Omega)) ## solve(Omega)
-        iOmegaM1.dOmega.OmegaM1 <- lapply(dOmega, function(x){iOmegaM1 %*% x %*% iOmegaM1})
-    }
-
-    ## ** compute
-    for(iC in 1:n.cluster){                 # iC <- 1
-        if(!is.null(index.Omega)){
-            iIndex <- index.Omega[[iC]]
-            iOmegaM1 <- chol2inv(chol(Omega[iIndex,iIndex,drop=FALSE]))
-            iOmegaM1.dOmega.OmegaM1 <- lapply(dOmega, function(x){iOmegaM1 %*% x[iIndex,iIndex] %*% iOmegaM1})
-        }
-        ## derivative of the score regarding Y
-        scoreY <- ls.dmu[[iC]] %*% iOmegaM1
-
-        for(iP in 1:n.varparam){ ## iP <- 1
-            scoreY[name.varparam[iP],] <- scoreY[name.varparam[iP],] + 2 * epsilon[iC,iIndex] %*% iOmegaM1.dOmega.OmegaM1[[name.varparam[iP]]]
-        }
-        ## leverage
-        leverage[iC,iIndex] <- colSums(vcov.param %*% ls.dmu[[iC]] * scoreY) ## NOTE: dimensions of ls.dmu and scoreY matches even when there are missing values
-                                        # same as
-                                        # diag(t(ls.dmu[[iC]])  %*% iVcov.param %*% scoreY)
-    }
-
-    return(leverage)            
-}
-
-## * .adjustMoment
-`.adjustMoment` <-
-    function(object, ...) UseMethod(".adjustMoment")
-
-## * .adjustMoment.lm
-.adjustMoment.lm <- function(object, Omega){
-
-    object$conditionalMoment$param["sigma2"] <- as.double(Omega)
-    return(object$conditionalMoment)
-    
-}
-
-## * .adjustMoment.gls
-.adjustMoment.gls <- function(object, Omega, ...){
-
-    ## ** extract information
-    class.cor <- object$conditionalMoment$skeleton$class.cor
-    class.var <- object$conditionalMoment$skeleton$class.var
-    name.corcoef <- object$conditionalMoment$skeleton$name.corcoef
-    name.otherVar <- object$conditionalMoment$skeleton$name.otherVar
-    name.varcoef <- object$conditionalMoment$skeleton$name.varcoef
-    ref.group <- object$conditionalMoment$skeleton$ref.group
-    M.corcoef <- object$conditionalMoment$skeleton$M.corcoef
-    name.endogenous <- object$conditionalMoment$skeleton$name.endogenous
-    n.endogenous <- object$conditionalMoment$skeleton$n.endogenous
-    
-    ## ** identify parameters
-
-    if(identical(class.var, "NULL")){
-        object$conditionalMoment$param["sigma2"] <- mean(diag(Omega))
-    }else{            
-        index.Sigma2 <- which(ref.group %in% name.otherVar == FALSE)
-        object$conditionalMoment$param["sigma2"] <- mean(diag(Omega)[index.Sigma2])
-
-        vec.k <- tapply(diag(Omega)/Omega[index.Sigma2,index.Sigma2], ref.group, mean)            
-        object$conditionalMoment$param[name.otherVar] <- vec.k[name.otherVar]
-    }
-
-    if(identical(class.cor, "NULL")){
-        ## do nothing
-    }else if("corCompSymm" %in% class.cor){
-        object$conditionalMoment$param[name.corcoef] <- mean(stats::cov2cor(Omega)[lower.tri(Omega)])
-    }else if("corSymm" %in% class.cor){
-        vec.cor <- tapply(stats::cov2cor(Omega)[lower.tri(Omega)],
-                          M.corcoef[lower.tri(Omega)],
-                          mean)            
-        object$conditionalMoment$param[name.corcoef] <- vec.cor[name.corcoef]
-    } 
-
-    ## ** update conditional moments
-    object$conditionalMoment$Omega <- .getVarCov2(object,
-                                                  param = object$conditionalMoment$param,
-                                                  attr.param = attributes(object$conditionalMoment$param),
-                                                  name.endogenous = name.endogenous,
-                                                  n.endogenous = n.endogenous,
-                                                  ref.group = ref.group)
-    
-    ## ** update first derivative of the conditional variance
-    object$conditionalMoment$dOmega <- skeletonDtheta(object, class.cor = class.cor, class.var = class.var, 
-                                                      sigma2.base0 = object$conditionalMoment$skeleton$sigma2.base0,
-                                                      Msigma2.base0 = object$conditionalMoment$skeleton$Msigma2.base0,
-                                                      M.corcoef = M.corcoef, ref.group = ref.group,
-                                                      index.lower.tri = object$conditionalMoment$skeleton$index.lower.tri,
-                                                      indexArr.lower.tri = object$conditionalMoment$skeleton$indexArr.lower.tri,
-                                                      name.endogenous =  name.endogenous, n.endogenous = n.endogenous,
-                                                      cluster = object$conditionalMoment$skeleton$cluster,
-                                                      n.cluster = object$conditionalMoment$skeleton$n.cluster,
-                                                      var.coef = object$conditionalMoment$param[name.varcoef],
-                                                      name.varcoef = name.varcoef, name.otherVar = name.otherVar,
-                                                      n.varcoef = object$conditionalMoment$skeleton$n.varcoef,
-                                                      cor.coef = object$conditionalMoment$param[name.corcoef],
-                                                      name.corcoef = name.corcoef,
-                                                      n.corcoef = object$conditionalMoment$skeleton$n.corcoef,
-                                                      update.mean = FALSE, update.variance = TRUE, ...)$dOmega
-
-    ## ** export
-    ## names(object$conditionalMoment)
-    ## object$conditionalMoment$param["sigma2"] <- as.double(Omega)
-    return(object$conditionalMoment)
-    
-}
-
-## * .adjustMoment.lme
-.adjustMoment.lme <- function(object, Omega){
-
-    name.rancoef <- attr(object$conditionalMoment$param,"ran.coef")
-    
-    ## ** Identify random effect
-    if(!identical(object$conditionalMoment$skeleton$class.cor,"NULL")){
-        stop("Does not know how to identify the correlation coefficients when corStruct is not NULL \n")
-    }
-    object$conditionalMoment$param[name.rancoef] <- mean(Omega[lower.tri(Omega)])
-
-    ## ** save derivative regarding random effect
-    save <- object$conditionalMoment$dOmega$ranCoef1
-
-    ## ** compute moments 
-    conditionalMoment <- .adjustMoment.gls(object, Omega = Omega - object$conditionalMoment$param["ranCoef1"],
-                                           name.rancoef = name.rancoef)
-
-    ## ** restaure derivative regarding random effect
-    conditionalMoment$dOmega$ranCoef1 <- save
-    return(conditionalMoment)
-}
-
-## * .adjustMoment.lvmfit
-.adjustMoment.lvmfit <- function(object, Omega){
-
-    ## ** extract info
-    n.endogenous <- NROW(Omega)
-    df.param <- object$conditionalMoment$df.param
-    
-    index.matrix <- object$conditionalMoment$adjustMoment$index.matrix
-    index.Psi <- object$conditionalMoment$adjustMoment$index.Psi
-    A <- object$conditionalMoment$adjustMoment$A
-    name.var <- object$conditionalMoment$adjustMoment$name.var
-    n.rhs <- object$conditionalMoment$adjustMoment$n.rhs
-    index.LambdaB <- object$conditionalMoment$adjustMoment$index.LambdaB
-    name.endogenous <- object$conditionalMoment$adjustMoment$name.endogenous
-    name.latent <- object$conditionalMoment$adjustMoment$name.latent
-    
-    skeleton <- object$conditionalMoment$skeleton
-
-    param <- object$conditionalMoment$param
-    Lambda <- object$conditionalMoment$value$Lambda
-    iIB <- object$conditionalMoment$value$iIB
-    iIB.Lambda <- object$conditionalMoment$value$iIB.Lambda
-    dLambda <- object$conditionalMoment$dMoment.init$dLambda
-    dB <- object$conditionalMoment$dMoment.init$dB
-
-    ## ** right hand side of the equation
-    eq.rhs <- Omega[index.matrix$index]
-
-    ## ** left hand side of the equation
-    if(NROW(index.Psi)>0){
-        n.index.Psi <- NROW(index.Psi)
-        n.latent <- NROW(skeleton$Psi)        
-        Z <- iIB %*% Lambda
-
-        ## A = t(Z) Psi Z + Sigma
-        ## (t(Z) Psi Z)_{ij} = \sum_{k,l} Z_{k,i} Psi_{k,l} Z_{l,j}
-        for(iIndex in 1:n.rhs){ # iIndex <- 1
-            iRow <- index.matrix[iIndex,"row"]
-            iCol <- index.matrix[iIndex,"col"]
-            for(iPsi in 1:n.index.Psi){
-                iRowPsi <- index.Psi[iPsi,"row"]
-                iColPsi <- index.Psi[iPsi,"col"]
-                A[iIndex,skeleton$Psi[iRowPsi,iColPsi]] <- A[iIndex,skeleton$Psi[iRowPsi,iColPsi]] + Z[iRowPsi,iRow]*Z[iColPsi,iCol]
-            }
-        }
-    }
-
-    ## ** solve equation
-    ## microbenchmark::microbenchmark(svd = {asvd <- svd(A) ; asvd$v %*% diag(1/asvd$d) %*% t(asvd$u) %*% eq.rhs;},
-    ## qr = qr.coef(qr(A), eq.rhs),
-    ## Rcpp = OLS_cpp(A, eq.rhs),
-    ## RcppTry = try(OLS_cpp(A, eq.rhs)[,1], silent = TRUE),
-    ## Rcpp2 = OLS2_cpp(A, eq.rhs),
-    ## OLS1 = solve(crossprod(A), crossprod(A, eq.rhs)),
-    ## OLS2 = solve(t(A) %*% A) %*% t(A) %*% eq.rhs,
-    ## OLS_stats = stats::lsfit(x = A, y = eq.rhs),
-    ## OLS_LINPACK = .Call(stats:::C_Cdqrls, x = A, y = eq.rhs, tolerance = 1e-7, FALSE)$coefficients, times = 500)
-    if(lava.options()$method.estimate2=="svd"){
-        asvd <- svd(A)
-        iSolution <- try((asvd$v %*% diag(1/asvd$d) %*% t(asvd$u) %*% eq.rhs)[,1], silent = TRUE)
-    }else if(lava.options()$method.estimate2=="ols"){
-        iSolution <- try(OLS_cpp(A, eq.rhs)[,1], silent = TRUE)
-    }else{
-        stop("unknown OLS methods \n")
-    }
-    
-    if(inherits(iSolution, "try-error")){
-        if(abs(det(t(A) %*% A)) <  1e-10){            
-            stop("Singular matrix: cannot update the estimates \n")
-        }else{
-            stop(iSolution)
-        }
-    }
-
-    ## ** update parameters in conditional moments
-    object$conditionalMoment$param[name.var] <- setNames(iSolution, name.var)
-
-    ## ** update conditional moments
-    object$conditionalMoment$skeleton$toUpdate <- object$conditionalMoment$adjustMoment$toUpdate
-    object$conditionalMoment$value <- skeleton.lvmfit(object,
-                                                      param = param,
-                                                      data = NULL,
-                                                      name.endogenous = name.endogenous,
-                                                      name.latent = name.latent)
-    object$conditionalMoment$Omega <- Omega
-    
-
-    ## ** update first derivative of the conditional variance
-    if(length(index.LambdaB)>0){
-        object$conditionalMoment$dMoment.init$toUpdate[] <- FALSE
-        object$conditionalMoment$dMoment.init$toUpdate[index.LambdaB] <- TRUE
-
-        object$conditionalMoment$dOmega <- skeletonDtheta.lvmfit(object,
-                                                                name.endogenous = name.endogenous,
-                                                                name.latent = name.latent)$dOmega
-    }
-    ## ** export
-    return(object$conditionalMoment)
-}
-
-##----------------------------------------------------------------------
-### estimate2.R ends here
diff --git a/R/package-butils-evalInParentEnv.R b/R/evalInParentEnv.R
similarity index 100%
rename from R/package-butils-evalInParentEnv.R
rename to R/evalInParentEnv.R
diff --git a/R/formula.R b/R/formula.R
new file mode 100644
index 0000000..768e3c5
--- /dev/null
+++ b/R/formula.R
@@ -0,0 +1,35 @@
+### formula.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 25 2019 (09:39) 
+## Version: 
+## Last-Updated: Jan 11 2022 (16:43) 
+##           By: Brice Ozenne
+##     Update #: 14
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+formula.varStruct <- function(x, ...){
+    return(attr(x, "formula"))
+}
+formula.corStruct <- function(x, ...){
+    return(attr(x, "formula"))
+}
+formula.reStruct <- function(x, ...){
+    n.random <- length(x)
+    group.random <- names(x)
+    ls.formula <- lapply(1:n.random, function(iN){ ## iN <- names(x)[1]
+        stats::as.formula(paste0(deparse(attr(x[[iN]],"formula")),"|", group.random[iN]))
+    })
+    return(ls.formula)
+}
+
+
+######################################################################
+### formula.R ends here
diff --git a/R/getVarCov2.R b/R/getVarCov2.R
deleted file mode 100644
index 013ca07..0000000
--- a/R/getVarCov2.R
+++ /dev/null
@@ -1,194 +0,0 @@
-### getVarCov2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar 27 2018 (09:55) 
-## Version: 
-## Last-Updated: jul 25 2019 (10:13) 
-##           By: Brice Ozenne
-##     Update #: 33
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * getVarCov2
-
-#' @title Reconstruct the Conditional Variance Covariance Matrix
-#' @description Reconstruct the conditional variance covariance matrix from a nlme or lvm model.
-#' Only compatible with specific correlation and variance structure.
-#' @name getVarCov2
-#'
-#' @param object a \code{gls} or \code{lme} object
-#' @param param [numeric vector] values for the model parameters.
-#' @param data [data.frame] the data set.
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' @param ... [internal] only used by the generic method.
-#' 
-#' @details The compound symmetry variance-covariance matrix in a gls model is of the form:
-#' \tabular{cccc}{
-#' \eqn{\Sigma =} \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \rho} \tab \eqn{\sigma^2 \rho} \cr
-#' \tab . \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \rho} \cr
-#' \tab . \tab . \tab \eqn{\sigma^2}
-#' }
-#'
-#' The unstructured variance-covariance matrix in a gls model is of the form:
-#'  \tabular{cccc}{
-#' \eqn{\Sigma =} \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \sigma_2 \rho_{1,2}} \tab \eqn{\sigma^2 \sigma_3 \rho_{1,3}} \cr
-#' \tab . \tab \eqn{\sigma^2 \sigma_2^2} \tab \eqn{\sigma^2 \sigma_2 \sigma_3 \rho_{2,3}} \cr
-#' \tab . \tab . \tab \eqn{\sigma^2 \sigma_3^2}
-#' }
-#' @return A list containing the residual variance-covariance matrix in the element Omega.
-#' 
-#' @examples
-#' 
-#' ## simulate data 
-#' library(nlme)
-#' n <- 5e1
-#' mSim <- lvm(c(Y1~1*eta,Y2~1*eta,Y3~1*eta,eta~G))
-#' latent(mSim) <- ~eta
-#' transform(mSim,Id~Y1) <- function(x){1:NROW(x)}
-#' set.seed(10)
-#' dW <- lava::sim(mSim,n,latent = FALSE)
-#' dW <- dW[order(dW$Id),,drop=FALSE]
-#' dL <- reshape2::melt(dW,id.vars = c("G","Id"), variable.name = "time")
-#' dL <- dL[order(dL$Id),,drop=FALSE]
-#' dL$Z1 <- rnorm(NROW(dL))
-#' dL$time.num <- as.numeric(as.factor(dL$time))
-#' 
-#' #### iid model #### 
-#' e1.gls <- nlme::gls(Y1 ~ G, data = dW, method = "ML")
-#' getVarCov2(e1.gls, cluster = 1:n)$Omega
-#' 
-#' #### heteroschedasticity ####
-#' dW$group <- rbinom(n, size = 1, prob = 1/2)
-#' dW$repetition <- as.numeric(as.factor(dW$group))
-#' e2a.gls <- nlme::gls(Y1 ~ G, data = dW, method = "ML",
-#'                     weights = varIdent(form =~ repetition|group))
-#' getVarCov2(e2a.gls, cluster = 1:n)$Omega
-#'
-#' 
-#' e2b.gls <- nlme::gls(value ~ 0+time + time:G,
-#'                    weight = varIdent(form = ~ time.num|time),
-#'                    data = dL, method = "ML")
-#' getVarCov2(e2b.gls, cluster = "Id")$Omega
-#'
-#' #### compound symmetry ####
-#' e3.gls <- nlme::gls(value ~ time + G,
-#'                    correlation = corCompSymm(form = ~1| Id),
-#'                    data = dL, method = "ML")
-#' getVarCov2(e3.gls)$Omega
-#' 
-#' #### unstructured ####
-#' e4.gls <- nlme::gls(value ~ time,
-#'                     correlation = corSymm(form = ~time.num| Id),
-#'                     weight = varIdent(form = ~ 1|time),
-#'                     data = dL, method = "ML")
-#' getVarCov2(e4.gls)$Omega
-#'
-#' #### lvm model ####
-#' m <- lvm(c(Y1~1*eta,Y2~1*eta,Y3~1*eta,eta~G))
-#' latent(m) <- ~eta
-#' e <- estimate(m, dW)
-#' getVarCov2(e)
-#' 
-#' @concept extractor
-#' 
-#' @export
-`getVarCov2` <-
-    function(object, ...) UseMethod("getVarCov2")
-
-## * getVarCov2.gls
-#' @rdname getVarCov2
-#' @export
-getVarCov2.gls <- function(object, data = NULL, cluster, ...){
-
-    ## ** data
-    if(is.null(data)){
-        data <- extractData(object, design.matrix = FALSE, as.data.frame = TRUE,
-                            envir = parent.env(environment()))
-    }
-
-    ## ** endogenous variable
-    formula.object <- .getFormula2(object)
-    name.Y <- all.vars(stats::update(formula.object, ".~1"))
-
-    ## ** extractors   
-    res.cluster <- .getCluster2(object, data = data, cluster = cluster)
-    res.param <- .coef2(object)
-    res.index <- .getIndexOmega2(object,
-                                 param = res.param,
-                                 attr.param = attributes(res.param),
-                                 name.Y = name.Y,
-                                 cluster = res.cluster$cluster,
-                                 levels.cluster = res.cluster$levels.cluster,
-                                 data = data)
-    res.Omega <- .getVarCov2(object,
-                             param = res.param,
-                             attr.param = attributes(res.param),
-                             name.endogenous = res.index$name.endogenous,
-                             n.endogenous = res.index$n.endogenous,
-                             ref.group = res.index$ref.group)
-
-    ## ** export
-    return(c(res.cluster,
-             list(param = res.param),
-             res.index,
-             list(Omega = res.Omega))
-           )
-}
-
-## * getVarCov2.lme
-#' @rdname getVarCov2
-#' @export
-getVarCov2.lme <- getVarCov2.gls
-
-## * getVarCov2.lvmfit
-#' @rdname getVarCov2
-#' @export
-getVarCov2.lvmfit <- function(object, data = NULL, param = NULL, ...){
-
-    if(inherits(object, "lvmfit2")){
-        return(object$sCorrect$Omega)
-    }else{
-        name.latent <- latent(object)
-        n.latent <- length(name.latent)
-
-        ## ** Prepare
-        if(is.null(object$conditionalMoment)){
-            name.endogenous <- endogenous(object)
-            if(is.null(param)){
-                param <- coef(object)
-            }
-            if(is.null(data)){
-                data <- as.data.frame(object$data$model.frame)
-            }
-
-            object$conditionalMoment <- conditionalMoment(object,
-                                                          data = data,
-                                                          param = param,
-                                                          name.endogenous = name.endogenous,
-                                                          name.latent = name.latent,
-                                                          first.order = FALSE,
-                                                          second.order = FALSE,
-                                                          usefit = TRUE)
-        }
-
-        ## ** Compute Omega
-        if(n.latent>0){
-            Omega <- object$conditionalMoment$value$tLambda.tiIB.Psi.iIB %*% object$conditionalMoment$value$Lambda + object$conditionalMoment$value$Sigma
-        }else{
-            Omega <- object$conditionalMoment$value$Sigma
-        }
-
-        return(Omega)
-    }
-}
-
-
-
-##----------------------------------------------------------------------
-### getVarCov2.R ends here
diff --git a/R/iid2.R b/R/iid2.R
deleted file mode 100644
index 6e373ea..0000000
--- a/R/iid2.R
+++ /dev/null
@@ -1 +0,0 @@
-### iid2.R --- 
#----------------------------------------------------------------------
## author: Brice Ozenne
## created: okt 12 2017 (13:16) 
## Version: 
## last-updated: jul 31 2020 (09:57) 
##           By: Brice Ozenne
##     Update #: 483
#----------------------------------------------------------------------
## 
### Commentary: 
## 
### Change Log:
#----------------------------------------------------------------------
## 
### Code:

## * Documentation - iid2
#' @title  Extract corrected i.i.d. decomposition
#' @description  Extract corrected i.i.d. decomposition from a gaussian linear model.
#' @name iid2
#'
#' @param object a linear model or a latent variable model
#' @param param [named numeric vector] the fitted parameters.
#' @param data [data.frame] the data set.
#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.
#' @param robust [logical] if \code{FALSE}, the i.i.d. decomposition is rescaled such its the squared sum equals the model-based standard error (instead of the robust standard error).
#' @param ... arguments to be passed to \code{sCorrect}.
#'
#' @details If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
#'
#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
#'
#' @return A matrix containing the 1st order influence function relative to each sample (in rows)
#' and each model coefficient (in columns).
#' 
#' @examples
#' n <- 5e1
#' p <- 3
#' X.name <- paste0("X",1:p)
#' link.lvm <- paste0("Y~",X.name)
#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
#'
#' m <- lvm(formula.lvm)
#' distribution(m,~Id) <- Sequence.lvm(0)
#' set.seed(10)
#' d <- sim(m,n)
#'
#' ## linear model
#' e.lm <- lm(formula.lvm,data=d)
#' iid.tempo <- iid2(e.lm, bias.correct = FALSE)
#' range(iid.tempo[,1:4]-iid(e.lm))
#' 
#' ## latent variable model
#' e.lvm <- estimate(lvm(formula.lvm),data=d)
#' iid.tempo <- iid2(e.lvm, bias.correct = FALSE)
#' range(iid.tempo-iid(e.lvm))
#' ## difference due to the use of the observed info matrix vs. the expected one.
#'
#' ## rescale i.i.d using model-based standard error
#' iid.tempo <- iid2(e.lvm, robust = FALSE, bias.correct = FALSE)
#' diag(crossprod(iid.tempo))-diag(vcov(e.lvm))
#'
#' @concept small sample inference
#' @concept iid decomposition
#' @export
`iid2` <-
  function(object, ...) UseMethod("iid2")

## * iid2.lm
#' @rdname iid2
#' @export
iid2.lm <- function(object,
                    param = NULL, data = NULL, bias.correct = TRUE, ...){

    sCorrect(object, param = param, data = data, df = FALSE) <- bias.correct

    iid.tempo <- iid2(object, ...)

    ## ** keep track of the NA
    if(!is.null(object$na.action)){
        n.NA <- length(object$na.action)
        iid.withNA <- matrix(NA, nrow = NROW(iid.tempo) + n.NA, ncol = NCOL(iid.tempo),
                             dimnames = list(NULL, colnames(iid.tempo)))
        iid.withNA[-object$na.action,] <- iid.tempo
    }else{
        iid.withNA <- iid.tempo
    }
    
## ** export
    return(iid.withNA)
}

## * iid2.gls
#' @rdname iid2
#' @export
iid2.gls <- function(object, cluster = NULL,
                     param = NULL, data = NULL, bias.correct = TRUE, ...){

    sCorrect(object, cluster = cluster, param = param, data = data, df = FALSE) <- bias.correct

    if(!is.null(cluster)){
        cluster <- object$sCorrect$args$cluster
    }

### ** export
    return(iid2(object, ...))
}

## * iid2.lme
#' @rdname iid2
#' @export
iid2.lme <- iid2.lm

## * iid2.lvmfit
#' @rdname iid2
#' @export
iid2.lvmfit <- iid2.lm


## * iid2.lm2
#' @rdname iid2
#' @export
iid2.lm2 <- function(object, cluster = NULL,
                     param = NULL, data = NULL, robust = TRUE, ...){

    ## ** compute the score
    if(!is.null(param) || !is.null(data)){
        args <- object$sCorrect$args
        args$df <- FALSE
        object$sCorrect <- do.call(sCorrect,
                                   args = c(list(object, param = param, data = data),
                                            args))
    }else if(is.null(object$sCorrect$score)){
        stop("set argument \'score\' to TRUE when calling sCorrect \n")
    }

    ## ** compute iid
    if(is.null(cluster)){
        scoreCluster <- score2(object)
    }else{
        n.obs <- stats::nobs(object)

        if(length(cluster) != n.obs){
            stop("Argument \'cluster\' does not have the correct length (n=",n.obs,") \n")
        }
        scoreIndiv <- score2(object)
        scoreCluster <- do.call("rbind",tapply(1:n.obs,cluster, function(iIndex){
            colSums(scoreIndiv[iIndex,,drop=FALSE])
        }))
    }
    res <- scoreCluster %*% object$sCorrect$vcov.param
    if(robust == FALSE){
        vec.sigma <- sqrt(diag(object$sCorrect$vcov.param))
        vec.sigma.robust <- sqrt(apply(res^2,2,sum))
        res <- sweep(res, MARGIN = 2, FUN = "*", STATS = vec.sigma/vec.sigma.robust)
    }
    
    ## ** export
    return(res)

}

## * iid2.gls2
#' @rdname iid2
#' @export
iid2.gls2 <- iid2.lm2

## * iid2.lme2
#' @rdname iid2
#' @export
iid2.lme2 <- iid2.lm2

## * iid2.lvmfit2
#' @rdname iid2
#' @export
iid2.lvmfit2 <- function(object, cluster = NULL, data = NULL, ...){

    ## try to find cluster in data
    if(!is.null(object$cluster)){
        if(!is.null(cluster)){
            stop("Argument \'cluster\' must be NULL when the object already contain a cluster \n")
        }
        cluster <- object$cluster
    }else if(!is.null(cluster) && length(cluster) == 1){
        if(!is.null(data)){
            if(cluster %in%  names(data) == FALSE){
                stop("Could not find variable ",cluster," (argument \'cluster\') in argument \'data\' \n")
            }else{
                cluster <- data[[cluster]]
            }            
        }else{
            if(cluster %in% names(object$data$model.frame) == FALSE){
                stop("Could not find variable ",cluster," (argument \'cluster\') in argument object$data \n")
            }else{
                cluster <- object$data$model.frame[[cluster]]
            }            
        }
    }

    return(iid2.lm2(object = object, data = data, cluster = cluster, ...))
}



##----------------------------------------------------------------------
### iid2.R ends here
\ No newline at end of file
diff --git a/R/iidJack.R b/R/iidJack.R
index 7da4781..11d0245 100644
--- a/R/iidJack.R
+++ b/R/iidJack.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: jun 23 2017 (09:15) 
 ## Version: 
-## last-updated: okt 23 2018 (13:57) 
+## last-updated: Jan 11 2022 (16:08) 
 ##           By: Brice Ozenne
-##     Update #: 331
+##     Update #: 333
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -37,53 +37,15 @@
 #' 
 #' @examples
 #' n <- 20
-#'
-#' #### glm ####
-#' set.seed(10)
-#' m <- lvm(y~x+z)
-#' distribution(m, ~y+z) <- binomial.lvm("logit")
-#' d <- lava::sim(m,n)
-#' g <- glm(y~x+z,data=d,family="binomial")
-#' iid1 <- iidJack(g, cpus = 1)
-#' iid2 <- lava::iid(g)
-#' quantile(iid1-iid2)
-#' vcov(g)
-#' colSums(iid2^2)
-#' colSums(iid1^2)
-#' 
-#' #### Cox model ####
-#' \dontrun{
-#' library(survival)
-#' data(Melanoma, package = "riskRegression")
-#' m <- coxph(Surv(time,status==1)~ici+age, data = Melanoma, x = TRUE, y = TRUE)
-#'
-#' ## require riskRegression > 1.4.3
-#' if(utils::packageVersion("riskRegression") > "1.4.3"){
-#' library(riskRegression)
-#' iid1 <- iidJack(m)
-#' iid2 <- iidCox(m)$IFbeta
-#'
-#' apply(iid1,2,sd)
-#'
-#' print(iid2)
-#' 
-#' apply(iid2,2,sd)
-#'   }
-#' }
-#' 
-#' #### LVM ####
-#' \dontrun{
 #' set.seed(10)
-#'
 #' mSim <- lvm(c(Y1,Y2,Y3,Y4,Y5) ~ 1*eta)
 #' latent(mSim) <- ~eta
 #' categorical(mSim, K=2) <- ~G
 #' transform(mSim, Id ~ eta) <- function(x){1:NROW(x)}
 #' dW <- lava::sim(mSim, n, latent = FALSE)
-#' dL <- reshape2::melt(dW, id.vars = c("G","Id"),
-#'                      variable.name = "time", value.name = "Y")
-#' dL$time <- gsub("Y","",dL$time)
 #'
+#' #### LVM ####
+#' \dontrun{
 #' m1 <- lvm(c(Y1,Y2,Y3,Y4,Y5) ~ 1*eta)
 #' latent(m1) <- ~eta
 #' regression(m1) <- eta ~ G
@@ -98,15 +60,6 @@
 #' quantile(iid2 - iid1)
 #' }
 #'
-#' #### lme ####
-#' \dontrun{
-#' library(nlme)
-#' e2 <- lme(Y~G+time, random = ~1|Id, weights = varIdent(form =~ 1|Id), data = dL)
-#' e2 <- lme(Y~G, random = ~1|Id, data = dL)
-#'
-#' iid3 <- iidJack(e2)
-#' apply(iid3,2,sd)
-#' }
 #'
 #' @concept iid decomposition
 #' @export
@@ -164,11 +117,7 @@ iidJack.default <- function(object, data = NULL, grouping = NULL, cpus = 1,
 
     ## ** define the grouping level for the data
     if(is.null(grouping)){
-        if(any(class(object)%in%c("lme","gls","nlme"))){
-            myData$XXXgroupingXXX <- as.vector(apply(object$groups,2,interaction))
-        }else{
-            myData$XXXgroupingXXX <- 1:n.obs
-        }
+        myData$XXXgroupingXXX <- 1:n.obs
         grouping <- "XXXgroupingXXX"        
     }else{
         if(length(grouping)>1){
diff --git a/R/information2.R b/R/information2.R
deleted file mode 100644
index 11d698d..0000000
--- a/R/information2.R
+++ /dev/null
@@ -1,535 +0,0 @@
-### information2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: feb 19 2018 (14:17) 
-## Version: 
-## Last-Updated: jul 31 2020 (10:44) 
-##           By: Brice Ozenne
-##     Update #: 276
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - information2
-#' @title  Extract The full Information Matrix
-#' @description  Extract the full information matrix from a Gaussian linear model.
-#' @name information2
-#'
-#' @param object a linear model or a latent variable model
-#' @param ... arguments to be passed to \code{vcov2}.
-#'
-#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
-#'
-#' @return A matrix.
-#' 
-#' @examples
-#' n <- 5e1
-#' p <- 3
-#' X.name <- paste0("X",1:p)
-#' link.lvm <- paste0("Y~",X.name)
-#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
-#'
-#' m <- lvm(formula.lvm)
-#' distribution(m,~Id) <- Sequence.lvm(0)
-#' set.seed(10)
-#' d <- lava::sim(m,n)
-#'
-#' ## linear model
-#' e.lm <- lm(formula.lvm,data=d)
-#' info.tempo <- vcov2(e.lm, bias.correct = TRUE)
-#' info.tempo[names(coef(e.lm)),names(coef(e.lm))] - vcov(e.lm)
-#'
-#' ## latent variable model
-#' e.lvm <- estimate(lvm(formula.lvm),data=d)
-#' vcov.tempo <- vcov2(e.lvm, bias.correct = FALSE)
-#' round(vcov.tempo %*% information(e.lvm), 5)
-#'
-#' @concept small sample inference
-#' @export
-`information2` <-
-  function(object, ...) UseMethod("information2")
-
-## * information2.lm
-#' @rdname information2
-#' @export
-information2.lm <- function(object, ...){
-    return(solve(vcov2(object, ...)))
-}
-
-## * information2.gls
-#' @rdname information2
-#' @export
-information2.gls <- information2.lm
-
-## * information2.lme
-#' @rdname information2
-#' @export
-information2.lme <- information2.lm
-
-## * information2.lvmfit
-#' @rdname information2
-#' @export
-information2.lvmfit <- information2.lm
-
-## * information2.lm2
-#' @rdname information2
-#' @export
-information2.lm2 <- function(object, ...){
-    return(solve(vcov2(object, ...)))
-}
-
-## * information2.gls2
-#' @rdname information2
-#' @export
-information2.gls2 <- information2.lm2
-
-## * information2.lme2
-#' @rdname information2
-#' @export
-information2.lme2 <- information2.lm2
-
-## * information2.lvmfit
-#' @rdname information2
-#' @export
-information2.lvmfit2 <- information2.lm2
-
-## * .information2
-#' @title Compute the Expected Information Matrix From the Conditional Moments
-#' @description Compute the expected information matrix from the conditional moments.
-#' @name information2-internal
-#' 
-#' @details \code{.information2} will perform the computation individually when the
-#' argument \code{index.Omega} is not null.
-#' 
-#' @keywords internal
-.information2 <- function(dmu, dOmega,
-                          Omega, n.corrected,
-                          index.Omega, leverage, n.cluster,
-                          grid.meanparam, n.grid.meanparam,
-                          grid.varparam, n.grid.varparam,
-                          name.param, n.param){
-
-### ** Prepare
-    test.global <- is.null(index.Omega)
-    if(!test.global){
-        OmegaM1 <- lapply(1:n.cluster, function(iC){
-            return(chol2inv(chol(Omega[index.Omega[[iC]],index.Omega[[iC]]])))
-        })
-    }else{
-        OmegaM1 <- chol2inv(chol(Omega))
-    }
-    
-    Info <- matrix(0, nrow = n.param, ncol = n.param,
-                   dimnames = list(name.param,name.param))
-    if(length(dmu)>0){
-        index.meanparam <- 1:n.grid.meanparam
-    }else{
-        index.meanparam <- NULL
-    }
-    if(length(dOmega)>0){
-        index.varparam <- 1:n.grid.varparam
-    }else{
-        index.varparam <- NULL
-    } 
-
-### ** Global    
-    if(test.global){
-        ## *** Information relative to the mean parameters
-        for(iG in index.meanparam){ # iG <- 1
-            iP1 <- grid.meanparam[iG,1]
-            iP2 <- grid.meanparam[iG,2]
-
-            Info[iP1,iP2] <- Info[iP1,iP2] + sum(dmu[[iP1]] %*% OmegaM1 * dmu[[iP2]])
-        }
-
-        ## *** Information realtive to the variance parameters
-        for(iG in index.varparam){ # iG <- 1
-            iP1 <- grid.varparam[iG,1]
-            iP2 <- grid.varparam[iG,2]
-
-            iDiag <- diag(OmegaM1 %*% dOmega[[iP1]] %*% OmegaM1 %*% dOmega[[iP2]])
-            Info[iP1,iP2] <- Info[iP1,iP2] + 1/2*sum(iDiag*n.corrected)
-        }
-    }
-
-### ** Individual specific (missing data)
-    if(!test.global){
-        ## *** Information relative to the mean parameters
-        for(iC in 1:n.cluster){ # iC <- 1
-            iIndex <- index.Omega[[iC]]
-
-            for(iG in index.meanparam){ # iG <- 1
-                iP1 <- grid.meanparam[iG,1]
-                iP2 <- grid.meanparam[iG,2]
-
-                Info[iP1,iP2] <- Info[iP1,iP2] + sum(dmu[[iP1]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] * dmu[[iP2]][iC,iIndex,drop=FALSE])            
-            }
-
-            ## *** Information relative to the variance parameters
-            for(iG in index.varparam){ # iG <- 1
-                iP1 <- grid.varparam[iG,1]
-                iP2 <- grid.varparam[iG,2]
-                iDiag <- diag(OmegaM1[[iC]] %*% dOmega[[iP1]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP2]][iIndex,iIndex,drop=FALSE])
-                Info[iP1,iP2] <- Info[iP1,iP2] + 1/2 * sum(iDiag * (1 - leverage[iC,iIndex]))
-            }
-        }        
-    }
-
-    ## ** Make Info a symmetric matrix
-    Info <- symmetrize(Info, update.upper = NULL)
-        
-    ## ** export
-    return(Info)
-}
-
-## * .hessian2
-#' @title Compute the Hessian Matrix From the Conditional Moments
-#' @description Compute the Hessian matrix from the conditional moments.
-#' @name information2-internal
-#' 
-#' @details \code{.hessian} will perform the computation individually when the
-#' argument \code{index.Omega} is not null.
-#' 
-#' @keywords internal
-.hessian2 <- function(dmu, d2mu, dOmega, d2Omega, 
-                      Omega, n.corrected,
-                      index.Omega, leverage, n.cluster,
-                      grid.meanparam, n.grid.meanparam,
-                      grid.varparam, n.grid.varparam,
-                      name.param, n.param, residuals){
-
-    ## ** Prepare
-    test.global <- is.null(index.Omega)
-    if(!test.global){
-        OmegaM1 <- lapply(1:n.cluster, function(iC){
-            return(chol2inv(chol(Omega[index.Omega[[iC]],index.Omega[[iC]]])))
-        })
-    }else{
-        OmegaM1 <- chol2inv(chol(Omega))
-    }
-    
-    hessian <- array(0, dim = c(n.param, n.param, n.cluster),
-                     dimnames = list(name.param,name.param,NULL))
-    if(length(dmu)>0){
-        index.meanparam <- 1:n.grid.meanparam
-    }else{
-        index.meanparam <- NULL
-    }
-    if(length(dOmega)>0){
-        index.varparam <- 1:n.grid.varparam
-    }else{
-        index.varparam <- NULL
-    }
-
-    if((n.grid.meanparam>0) && (n.grid.varparam>0)){
-        name.meanparam <- unique(unlist(grid.meanparam[,c("Var1","Var2")]))
-        name.varparam <- unique(unlist(grid.varparam[,c("Var1","Var2")]))
-
-        grid.hybridparam <- .combination(name.meanparam,name.varparam)
-        n.hybridparam <- NROW(grid.hybridparam)
-        index.hybridparam <- 1:n.hybridparam
-    }else{
-        grid.hybridparam <- NULL
-        n.hybridparam <- 0
-        index.hybridparam <- NULL
-    }
-
-    ## ** Global    
-    if(test.global){
-        ## *** second derivative relative to the mean parameters
-        for(iG in index.meanparam){ # iG <- 1
-            iP1 <- grid.meanparam[iG,1]
-            iP2 <- grid.meanparam[iG,2]
-
-            if(grid.meanparam[iG,"deriv12"]){
-                term1 <- rowSums((d2mu[[iP1]][[iP2]] %*% OmegaM1) * residuals)
-            }else if(grid.meanparam[iG,"deriv21"]){
-                term1 <- rowSums((d2mu[[iP2]][[iP1]] %*% OmegaM1) * residuals)
-            }else{
-                term1 <- 0
-            }
-            term2 <- -rowSums((dmu[[iP1]] %*% OmegaM1) * dmu[[iP2]])
-            hessian[iP1,iP2,] <- hessian[iP1,iP2,] + term1 + term2
-            hessian[iP2,iP1,] <- hessian[iP1,iP2,]
-        }
-
-        ## *** second derivative relative to the variance parameters
-        for(iG in index.varparam){ # iG <- 1
-            iP1 <- grid.varparam[iG,1]
-            iP2 <- grid.varparam[iG,2]
-
-            term1a <- - diag(OmegaM1 %*% dOmega[[iP1]] %*% OmegaM1 %*% dOmega[[iP2]])
-            term2 <- - rowSums((residuals %*% OmegaM1 %*% dOmega[[iP2]] %*% OmegaM1 %*% dOmega[[iP1]] %*% OmegaM1) * residuals)
-            if(grid.varparam[iG,"deriv12"]){
-                term1b <- diag(OmegaM1 %*% d2Omega[[iP1]][[iP2]])
-                term3 <- 1/2 * rowSums((residuals %*% OmegaM1 %*% d2Omega[[iP1]][[iP2]] %*% OmegaM1) * residuals)
-            }else if(grid.varparam[iG,"deriv21"]){
-                term1b <- diag(OmegaM1 %*% d2Omega[[iP2]][[iP1]])
-                term3 <- 1/2 * rowSums((residuals %*% OmegaM1 %*% d2Omega[[iP2]][[iP1]] %*% OmegaM1) * residuals)
-            }else{
-                term1b <- 0
-                term3 <- 0
-            }
-            hessian[iP1,iP2,] <- hessian[iP1,iP2,] - 1/2 * rowSums( sweep(1-leverage, FUN = "*", STATS = term1a + term1b, MARGIN = 2) ) + term2 + term3
-            hessian[iP2,iP1,] <- hessian[iP1,iP2,]
-        }
-        ## *** second derivative relative to the mean and variance parameters
-        for(iG in index.hybridparam){ # iG <- 1
-            iP1 <- grid.hybridparam[iG,1]
-            iP2 <- grid.hybridparam[iG,2]
-
-            term1 <- - rowSums((dmu[[iP1]] %*% OmegaM1 %*% dOmega[[iP2]] %*% OmegaM1) * residuals)
-            if(!is.null(dmu[[iP2]]) && !is.null(dOmega[[iP1]])){
-                term2 <- - rowSums((dmu[[iP2]] %*% OmegaM1 %*% dOmega[[iP1]] %*% OmegaM1) * residuals)
-            }else{
-                term2 <- 0
-            }
-            
-            hessian[iP1,iP2,] <- hessian[iP1,iP2,] + term1 + term2
-            hessian[iP2,iP1,] <- hessian[iP1,iP2,]
-        }
-    }
-
-    ## ** Individual specific (missing data)
-    if(!test.global){
-
-        ## *** Information relative to the mean parameters
-        for(iC in 1:n.cluster){ # iC <- 1
-            iIndex <- index.Omega[[iC]]
-            
-            ## *** second derivative relative to the mean parameters
-            for(iG in index.meanparam){ # iG <- 1
-                iP1 <- grid.meanparam[iG,1]
-                iP2 <- grid.meanparam[iG,2]
-
-                if(grid.meanparam[iG,"deriv12"]){
-                    term1 <- sum((d2mu[[iP1]][[iP2]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                }else if(grid.meanparam[iG,"deriv21"]){
-                    term1 <- sum((d2mu[[iP2]][[iP1]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                }else{
-                    term1 <- 0
-                }
-                term2 <- -sum((dmu[[iP1]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * dmu[[iP2]][iC,iIndex,drop=FALSE])
-                hessian[iP1,iP2,iC] <- hessian[iP1,iP2,iC] + term1 + term2
-                hessian[iP2,iP1,iC] <- hessian[iP1,iP2,iC]
-            }
-
-            ## *** second derivative relative to the variance parameters
-            for(iG in index.varparam){ # iG <- 1
-                iP1 <- grid.varparam[iG,1]
-                iP2 <- grid.varparam[iG,2]
-
-                term1a <- - diag(OmegaM1[[iC]] %*% dOmega[[iP1]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP2]][iIndex,iIndex,drop=FALSE])
-                term2 <- - sum((residuals[iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP2]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP1]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                if(grid.varparam[iG,"deriv12"]){
-                    term1b <- diag(OmegaM1[[iC]] %*% d2Omega[[iP1]][[iP2]][iIndex,iIndex,drop=FALSE])
-                    term3 <- 1/2 * sum((residuals[iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% d2Omega[[iP1]][[iP2]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                }else if(grid.varparam[iG,"deriv21"]){
-                    term1b <- diag(OmegaM1[[iC]] %*% d2Omega[[iP2]][[iP1]][iIndex,iIndex,drop=FALSE])
-                    term3 <- 1/2 * sum((residuals[iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% d2Omega[[iP2]][[iP1]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                }else{
-                    term1b <- 0
-                    term3 <- 0
-                }
-                hessian[iP1,iP2,iC] <- hessian[iP1,iP2,iC] - 1/2 * sum( (1-leverage[iC,iIndex,drop=FALSE]) * (term1a + term1b) ) + term2 + term3
-                hessian[iP2,iP1,iC] <- hessian[iP1,iP2,iC]
-            }
-
-            ## *** second derivative relative to the mean and variance parameters
-            for(iG in index.hybridparam){ # iG <- 1
-                iP1 <- grid.hybridparam[iG,1]
-                iP2 <- grid.hybridparam[iG,2]
-
-                term1 <- - sum((dmu[[iP1]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP2]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                if(!is.null(dmu[[iP2]]) && !is.null(dOmega[[iP1]])){
-                    term2 <- - sum((dmu[[iP2]][iC,iIndex,drop=FALSE] %*% OmegaM1[[iC]] %*% dOmega[[iP1]][iIndex,iIndex,drop=FALSE] %*% OmegaM1[[iC]]) * residuals[iC,iIndex,drop=FALSE])
-                }else{
-                    term2 <- 0
-                }
-
-                hessian[iP1,iP2,iC] <- hessian[iP1,iP2,iC] + term1 + term2
-                hessian[iP2,iP1,iC] <- hessian[iP1,iP2,iC]
-            }
-        }        
-    }
-
-
-    ## ** export
-    return(hessian)
-}
-
-## * .dInformation2
-#' @title Compute the First Derivative of the Expected Information Matrix
-#' @description Compute the first derivative of the expected information matrix.
-#' @name dInformation2-internal
-#' 
-#' @details \code{.dInformation2} will perform the computation individually when the
-#' argument \code{index.Omega} is not null.
-#' 
-#' @keywords internal
-.dInformation2 <- function(dmu, d2mu, dOmega, d2Omega,
-                           Omega, OmegaM1, n.corrected,
-                           index.Omega, leverage, n.cluster,
-                           name.param, name.3deriv){
-    n.param <- length(name.param)
-    index.deriv <- match(name.3deriv, name.param)
-
-### ** prepare
-    test.global <- is.null(index.Omega)
-    if(!test.global){
-        M.template <- Omega
-        M.template[] <- 0
-    }
-    
-    dInfo <-  array(0,
-                    dim = c(n.param, n.param, length(name.3deriv)),
-                    dimnames = list(name.param, name.param, name.3deriv))
-    
-### ** loop
-    for(iDeriv in index.deriv){ # iDeriv <- 4
-        for(iP1 in 1:n.param){ # iP1 <- 1
-            for(iP2 in iP1:n.param){ # iP2 <- 1
-                
-                iNameD <- name.param[iDeriv]
-                iName1 <- name.param[iP1]
-                iName2 <- name.param[iP2]
-                 ## cat(iNameD," ",iName1,"",iName2,"\n")
-                
-                ## *** identify relevant terms
-                test.Omega1 <- !is.null(dOmega[[iNameD]]) && !is.null(dOmega[[iName1]]) && !is.null(dOmega[[iName2]])
-                test.Omega2a <- !is.null(d2Omega[[iNameD]][[iName1]]) && !is.null(dOmega[[iName2]])
-                test.Omega2b <- !is.null(d2Omega[[iName1]][[iNameD]]) && !is.null(dOmega[[iName2]])
-                test.Omega3a <- !is.null(d2Omega[[iNameD]][[iName2]]) && !is.null(dOmega[[iName1]])
-                test.Omega3b <- !is.null(d2Omega[[iName2]][[iNameD]]) && !is.null(dOmega[[iName1]])
-                
-                test.mu1a <- !is.null(d2mu[[iNameD]][[iName1]]) && !is.null(dmu[[iName2]])
-                test.mu1b <- !is.null(d2mu[[iName1]][[iNameD]]) && !is.null(dmu[[iName2]])
-                test.mu2a <- !is.null(d2mu[[iNameD]][[iName2]]) && !is.null(dmu[[iName1]])
-                test.mu2b <- !is.null(d2mu[[iName2]][[iNameD]]) && !is.null(dmu[[iName1]])
-                test.mu3 <- !is.null(dOmega[[iNameD]]) && !is.null(dmu[[iName1]]) && !is.null(dmu[[iName2]])
-
-                if((test.Omega1 + test.Omega2a + test.Omega2b + test.Omega3a + test.Omega3b + test.mu1a + test.mu1b + test.mu2a + test.mu2b + test.mu3) == 0){
-                    next
-                }
-
-                ## *** extract quantities for computations 
-                if(test.mu1a){
-                    d2mu.D1 <- d2mu[[iNameD]][[iName1]]
-                }else if(test.mu1b){
-                    d2mu.D1 <- d2mu[[iName1]][[iNameD]]
-                }
-                if(test.mu2a){
-                    d2mu.D2 <- d2mu[[iNameD]][[iName2]]
-                }else if(test.mu2b){
-                    d2mu.D2 <- d2mu[[iName2]][[iNameD]]
-                }
-                if(test.Omega2a){
-                    d2Omega.D1 <- d2Omega[[iNameD]][[iName1]]
-                }else if(test.Omega2b){
-                    d2Omega.D1 <- d2Omega[[iName1]][[iNameD]]
-                }
-                if(test.Omega3a){
-                    d2Omega.D2 <- d2Omega[[iNameD]][[iName2]]
-                }else{
-                    d2Omega.D2 <- d2Omega[[iName2]][[iNameD]]
-                }
-                
-                if(test.global){
-                    ## *** Global: extract quantities for computations
-                    if(!is.null(dOmega[[iNameD]])){
-                        OmegaM1.dOmega.D <- OmegaM1 %*% dOmega[[iNameD]]
-                    }
-                    if(!is.null(dOmega[[iName1]])){
-                        OmegaM1.dOmega.1 <- OmegaM1 %*% dOmega[[iName1]]
-                    }
-                    if(!is.null(dOmega[[iName2]])){
-                        OmegaM1.dOmega.2 <- OmegaM1 %*% dOmega[[iName2]]
-                    }                    
-
-                    ## *** Global: compute
-                    if(test.Omega1){
-                        iDiag1 <- diag(OmegaM1.dOmega.D %*% OmegaM1.dOmega.1 %*% OmegaM1.dOmega.2)
-                        iDiag2 <- diag(OmegaM1.dOmega.1 %*% OmegaM1.dOmega.D %*% OmegaM1.dOmega.2)
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - 1/2 * sum(iDiag1 * n.corrected + iDiag2 * n.corrected)
-                    }
-
-                    if(test.Omega2a || test.Omega2b){
-                        iDiag <- diag(OmegaM1 %*% d2Omega.D1 %*% OmegaM1.dOmega.2)
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * n.corrected)
-                    }
-
-                    if(test.Omega3a || test.Omega3b){
-                        iDiag <- diag(OmegaM1.dOmega.1 %*% OmegaM1 %*% d2Omega.D2)
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * n.corrected)
-                    }
-
-                    if(test.mu1a || test.mu1b){
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(d2mu.D1 %*% OmegaM1 * dmu[[iName2]])
-                    }
-
-                    if(test.mu2a || test.mu2b){
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(dmu[[iName1]] %*% OmegaM1 * d2mu.D2)
-                    }
-                    if(test.mu3){
-                        dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - sum(dmu[[iName1]] %*% OmegaM1.dOmega.D %*% OmegaM1 * dmu[[iName2]])
-                    }
-                    
-                }else{
-                    for(iC in 1:n.cluster){ # iC <- 1
-                        iIndex <- index.Omega[[iC]]
-                        
-                        if(!is.null(dOmega[[iNameD]])){
-                            OmegaM1.dOmega.D <- OmegaM1[[iC]] %*% dOmega[[iNameD]][iIndex,iIndex]
-                        }
-                        if(!is.null(dOmega[[iName1]])){
-                            OmegaM1.dOmega.1 <- OmegaM1[[iC]] %*% dOmega[[iName1]][iIndex,iIndex]
-                        }
-                        if(!is.null(dOmega[[iName2]])){
-                            OmegaM1.dOmega.2 <- OmegaM1[[iC]] %*% dOmega[[iName2]][iIndex,iIndex]
-                        }
-
-                        if(test.Omega1){
-                            iDiag1 <- diag(OmegaM1.dOmega.D %*% OmegaM1.dOmega.1 %*% OmegaM1.dOmega.2)
-                            iDiag2 <- diag(OmegaM1.dOmega.1 %*% OmegaM1.dOmega.D %*% OmegaM1.dOmega.2)
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - 1/2 * sum((iDiag1+iDiag2) * (1 - leverage[iC,iIndex]))
-                        }
-                        if(test.Omega2a || test.Omega2b){
-                            iDiag <- diag(OmegaM1[[iC]] %*% d2Omega.D1[iIndex,iIndex] %*% OmegaM1.dOmega.2)
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * (1 - leverage[iC,iIndex]))
-                        }
-
-                        if(test.Omega3a || test.Omega3b){
-                            iDiag <- diag(OmegaM1.dOmega.1 %*% OmegaM1[[iC]] %*% d2Omega.D2[iIndex,iIndex])
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * (1 - leverage[iC,iIndex]))
-                        }
-
-                        if(test.mu1a || test.mu1b){
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(d2mu.D1[iC,iIndex] %*% OmegaM1[[iC]] * dmu[[iName2]][iC,iIndex])
-                        }
-                        
-                        if(test.mu2a || test.mu2b){
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(dmu[[iName1]][iC,iIndex] %*% OmegaM1[[iC]] * d2mu.D2[iC,iIndex])
-                        }
-                        
-                        if(test.mu3){                            
-                            dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - sum(dmu[[iName1]][iC,iIndex] %*% OmegaM1.dOmega.D %*% OmegaM1[[iC]] * dmu[[iName2]][iC,iIndex])
-                        }
-                    }
-                }
-            }
-            
-        }
-        ## *** Symmetrize
-        dInfo[,,iNameD] <- symmetrize(dInfo[,,iNameD], update.upper = NULL)
-    }
-
-    ### ** export
-    return(dInfo)
-}
-
-
diff --git a/R/lavaSearch2-package.R b/R/lavaSearch2-package.R
index 63d91af..4fceddd 100644
--- a/R/lavaSearch2-package.R
+++ b/R/lavaSearch2-package.R
@@ -65,7 +65,7 @@
 #' @import Rcpp
 #' @importFrom reshape2 melt
 #' @importFrom sandwich estfun
-#' @importFrom stats anova as.formula coef cov df.residual dist formula hclust logLik median model.frame model.matrix na.omit optim p.adjust pf pnorm predict qqnorm quantile pt residuals rnorm sd setNames sigma update vcov
+#' @importFrom stats anova coef confint cov effects formula logLik model.frame model.matrix model.tables predict qqnorm quantile residuals update vcov
 #' @importFrom utils methods packageVersion setTxtProgressBar tail txtProgressBar
 #' 
 NULL
diff --git a/R/leverage.R b/R/leverage.R
deleted file mode 100644
index f034dd7..0000000
--- a/R/leverage.R
+++ /dev/null
@@ -1,126 +0,0 @@
-### leverage.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: feb 19 2018 (17:58) 
-## Version: 
-## Last-Updated: feb 11 2019 (13:26) 
-##           By: Brice Ozenne
-##     Update #: 36
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * documentation - leverage2
-#' @title Extract Leverage Values
-#' @description Extract leverage values from a Gaussian linear model. 
-#' @name leverage2
-#' 
-#' @param object a \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} object.
-#' @param param [optional] the fitted parameters.
-#' @param data [optional] the data set.
-#' @param ... arguments to be passed to \code{sCorrect}.
-#'
-#' @details The leverage are defined as the partial derivative of the fitted values with respect to the observations.
-#' \deqn{
-#' leverage_i = \frac{\partial \hat{Y}_i}{\partial Y_i}
-#' }
-#' See Wei et al. (1998). \cr \cr
-#' 
-#' If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the residuals.
-#'
-#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
-#' 
-#' @return a matrix containing the leverage relative to each sample (in rows)
-#' and each endogenous variable (in column).
-#'
-#' @references Bo-Cheng Wei et al., Generalized Leverage and its applications (1998), Scandinavian Journal of Statistics 25:1:25-37.
-#' 
-#' @examples
-#' ## simulate data
-#' set.seed(10)
-#' m <- lvm(Y1~eta,Y2~eta,Y3~eta)
-#' latent(m) <- ~eta
-#' d <- lava::sim(m,20, latent = FALSE)
-#'
-#' ## standard linear model
-#' e.lm <- lm(Y1~Y2, data = d)
-#'
-#' sCorrect(e.lm) <- TRUE
-#' range(as.double(leverage2(e.lm)) - influence(e.lm)$hat)
-#'
-#' ## latent variable model
-#' e.lvm <- estimate(m, data = d)
-#' sCorrect(e.lvm) <- TRUE
-#' leverage2(e.lvm)
-#' 
-#' @concept small sample inference
-#' @export
-`leverage2` <-
-    function(object, ...) UseMethod("leverage2")
-
-## * leverage2.lm
-#' @rdname leverage2
-#' @export
-leverage2.lm <- function(object, param = NULL, data = NULL, ...){
-
-    sCorrect(object, param = param, data = data, df = FALSE, ...) <- TRUE
-
-    ### ** export
-    return(object$sCorrect$leverage)
-}
-
-## * leverage2.gls
-#' @rdname leverage2
-#' @export
-leverage2.gls <- leverage2.lm
-
-## * leverage2.lme
-#' @rdname leverage2
-#' @export
-leverage2.lme <- leverage2.lm
-
-## * leverage2.lvmfit
-#' @rdname leverage2
-#' @export
-leverage2.lvmfit <- leverage2.lm
-
-## * leverage2.lm2
-#' @rdname leverage2
-#' @export
-leverage2.lm2 <- function(object, param = NULL, data = NULL, ...){
-
-    if(!is.null(param) || !is.null(data)){
-        args <- object$sCorrect$args
-        args$df <- FALSE
-        object$sCorrect <- do.call(sCorrect,
-                                   args = c(list(object, param = param, data = data),
-                                            args))
-    }
-    
-    return(object$sCorrect$leverage)   
-}
-
-## * leverage2.gls2
-#' @rdname leverage2
-#' @export
-leverage2.gls2 <- leverage2.lm2
-
-## * leverage2.lme2
-#' @rdname leverage2
-#' @export
-leverage2.lme2 <- leverage2.lm2
-
-## * leverage2.lvmfit2
-#' @rdname leverage2
-#' @export
-leverage2.lvmfit2 <- leverage2.lm2
-
-
-
-##----------------------------------------------------------------------
-### leverage.R ends here
diff --git a/R/matrixPower.R b/R/matrixPower.R
index f3f1def..f947832 100644
--- a/R/matrixPower.R
+++ b/R/matrixPower.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: okt 23 2017 (16:52) 
 ## Version: 
-## last-updated: nov  2 2018 (15:00) 
+## last-updated: Jan 11 2022 (16:00) 
 ##           By: Brice Ozenne
-##     Update #: 59
+##     Update #: 60
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -57,7 +57,6 @@
 ##' 
 
 ## * matrixPower (code)
-##' @rdname matrixPower
 ##' @export
 matrixPower <- function(object, power, symmetric, tol = 1e-12, print.warning = TRUE){
     object.eigen <- eigen(object, symmetric = symmetric)
diff --git a/R/calcDistMax.R b/R/modelsearch2-calcDistMax.R
similarity index 100%
rename from R/calcDistMax.R
rename to R/modelsearch2-calcDistMax.R
diff --git a/R/methods-modelsearch2.R b/R/modelsearch2-methods.R
similarity index 100%
rename from R/methods-modelsearch2.R
rename to R/modelsearch2-methods.R
diff --git a/R/print.modelsearch2.R b/R/modelsearch2-print.R
similarity index 100%
rename from R/print.modelsearch2.R
rename to R/modelsearch2-print.R
diff --git a/R/summary.modelsearch2.R b/R/modelsearch2-summary.R
similarity index 96%
rename from R/summary.modelsearch2.R
rename to R/modelsearch2-summary.R
index 4eaae92..e4d09cb 100644
--- a/R/summary.modelsearch2.R
+++ b/R/modelsearch2-summary.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: aug 30 2017 (10:46) 
 ## Version: 
-## last-updated: jun 27 2019 (14:21) 
+## last-updated: Jan 11 2022 (16:44) 
 ##           By: Brice Ozenne
-##     Update #: 115
+##     Update #: 116
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -66,7 +66,7 @@ summary.modelsearch2 <- function(object, print = TRUE, ...){
         cat(out$message.pre,sep="")
         print(out$table)
         cat(out$message.post,sep="")
-        if(any(na.omit(out$table[,"dp.Info"])<1)){
+        if(any(stats::na.omit(out$table[,"dp.Info"])<1)){
             cat("WARNING: some of the score tests could not be correctly computed, probably because extended models are not all identifiable\n",
                 "        consider using the argument \'link\' to specify only identifiable models \n")
         }
diff --git a/R/modelsearch2.R b/R/modelsearch2.R
index 07d452b..46fee6a 100644
--- a/R/modelsearch2.R
+++ b/R/modelsearch2.R
@@ -115,7 +115,6 @@
 #' 
 
 ## * modelsearch2.lvmfit (code)
-#' @rdname modelsearch2
 #' @export
 modelsearch2.lvmfit <- function(object, link = NULL, data = NULL, 
                                 method.p.adjust = "fastmax", method.maxdist = "approximate", n.sample = 1e5, na.omit = TRUE, 
@@ -544,7 +543,7 @@ modelsearch2.lvmfit <- function(object, link = NULL, data = NULL,
         newModel <- lava::fixsome(newModel, measurement.fix=TRUE, S=suffStat$S, mu=suffStat$mu, n = suffStat$n, debug=FALSE)
 
         ## *** define constrained coefficients
-        coef0.new <- setNames(rep(0, ncoef.object+1), coef(newModel))
+        coef0.new <- stats::setNames(rep(0, ncoef.object+1), coef(newModel))
         coef0.new[namecoef.object] <- coef.object
 
         ## *** compute the iid decomposition and statistic
diff --git a/R/multcomp.R b/R/multcomp.R
deleted file mode 100644
index b608aa8..0000000
--- a/R/multcomp.R
+++ /dev/null
@@ -1,487 +0,0 @@
-
-### mlf2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov 29 2017 (12:56) 
-## Version: 
-## Last-Updated: jun 14 2019 (13:44) 
-##           By: Brice Ozenne
-##     Update #: 556
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## User 
-### Code:
-
-
-## * estfun
-#' @title Extract Empirical Estimating Functions (lvmfit Object)
-#' @description Extract the empirical estimating functions of a lvmfit object.
-#' This function is for internal use but need to be public to enable its use by \code{multcomp::glht}.
-#' @name estfun
-#' 
-#' @param x an \code{lvmfit} object.
-#' @param ... arguments passed to methods.
-#'
-#' @details This function enables to use the \code{glht} function with lvmfit object.
-#' Otherwise when calling \code{multcomp:::vcov.mmm} then \code{sandwich::sandwich} and then \code{sandwich::meat}, \code{sandwich::meat} will complain that \code{estfun} is not defined for \code{lvmfit} objects.
-#'
-#' @examples
-#' library(multcomp)
-#' 
-#' #### generative model ####
-#' mSim <- lvm(X ~ Age + 0.5*Treatment,
-#'             Y ~ Gender + 0.25*Treatment,
-#'             c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
-#'             Age[40:5]~1)
-#' latent(mSim) <- ~eta
-#' categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
-#' categorical(mSim, labels = c("male","female")) <- ~Gender
-#'
-#' #### simulate data ####
-#' n <- 5e1
-#' set.seed(10)
-#' df.data <- lava::sim(mSim, n = n, latent = FALSE)
-#'
-#' #### fit separate models ####
-#' lmX <- lm(X ~ Age + Treatment, data = df.data)
-#' lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
-#' lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ eta, eta ~ Treatment), 
-#'                  data = df.data)
-#'
-#' #### create mmm object #### 
-#' e.mmm <- mmm(X = lmX, Y = lvmY, Z = lvmZ)
-#'
-#' #### create contrast matrix ####
-#' resC <- createContrast(e.mmm, var.test = "Treatment", add.variance = FALSE)
-#'
-#' #### adjust for multiple comparisons ####
-#' e.glht <- glht(e.mmm, linfct = resC$mlf)
-#' summary(e.glht)
-#' @concept multiple comparison
-
-## * estfun.lvmfit
-#' @rdname estfun
-#' @method estfun lvmfit
-#' @export
-estfun.lvmfit <- function(x, ...){
-    U <- lava::score(x, indiv = TRUE)
-    return(U)
-}
-
-## * estfun.gls
-#' @rdname estfun
-#' @method estfun gls
-#' @export
-estfun.gls <- function(x, ...){
-    if(inherits(x,"gls2")){
-        U <- score2(x)
-    }else{
-        U <- score2(x, bias.correct = FALSE)
-    }
-    return(U)
-}
-
-## * estfun.lme
-#' @rdname estfun
-#' @method estfun lme
-#' @export
-estfun.lme <- function(x, ...){
-    if(inherits(x,"lme2")){
-        U <- score2(x)
-    }else{
-        U <- score2(x, bias.correct = FALSE)
-    }
-    return(U)
-}
-
-## * Documentation - glht2
-#' @title General Linear Hypothesis
-#' @description Test general linear hypotheses and across latent variable models with small sample corrections.
-#' @name glht2
-#' 
-#' @param model a \code{lvmfit} or \code{mmm} object.
-#' The \code{mmm} object can only contain lm/gls/lme/lvmfit objects.
-#' @param linfct [matrix or vector of character] the linear hypotheses to be tested. Same as the argument \code{par} of \code{\link{createContrast}}.
-#' @param rhs [vector] the right hand side of the linear hypotheses to be tested.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias?
-#' @param df [logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-#' @param robust [logical] should robust standard error be used? 
-#' Otherwise rescale the influence function with the standard error obtained from the information matrix.
-#' @param cluster  [integer vector] the grouping variable relative to which the observations are iid.
-#'
-#' @details
-#' Whenever the argument linfct is not a matrix, it is passed to the function \code{createContrast} to generate the contrast matrix and, if not specified, rhs. \cr \cr
-#'
-#' Since only one degree of freedom can be specify in a glht object and it must be an integer, the degree of freedom of the denominator of an F test simultaneously testing all hypotheses is retained, after rounding. \cr \cr
-#'
-#' Argument rhs and null are equivalent.
-#' This redondance enable compatibility between \code{lava::compare}, \code{compare2}, \code{multcomp::glht}, and \code{glht2}.
-#' @return A \code{glht} object.
-#' 
-#' @seealso
-#' \code{\link{createContrast}} to create contrast matrices. \cr
-#' \code{\link{sCorrect}} to pre-compute quantities for the small sample correction.
-#' 
-#' @concept multiple comparisons
-#'
-#' @examples
-#' library(multcomp)
-#' 
-#' ## Simulate data
-#' mSim <- lvm(c(Y1,Y2,Y3)~ beta * eta, Z1 ~ E, Z2 ~ E, Age[40:5]~1)
-#' latent(mSim) <- "eta"
-#' set.seed(10)
-#' n <- 1e2
-#'
-#' df.data <- lava::sim(mSim, n, latent = FALSE, p = c(beta = 1))
-#'
-#' #### Inference on a single model ####
-#' e.lvm <- estimate(lvm(Y1~E), data = df.data)
-#' summary(glht2(e.lvm, linfct = c("Y1~E + Y1","Y1")))
-#' 
-#' #### Inference on separate models ####
-#' ## fit separate models
-#' lmX <- lm(Z1 ~ E, data = df.data)
-#' lvmY <- estimate(lvm(Z2 ~ E + Age), data = df.data)
-#' lvmZ <- estimate(lvm(c(Y1,Y2,Y3) ~ eta, eta ~ E), 
-#'                  data = df.data)
-#'
-#' #### create mmm object #### 
-#' e.mmm <- mmm(X = lmX, Y = lvmY, Z = lvmZ)
-#'
-#' #### create contrast matrix ####
-#' resC <- createContrast(e.mmm, var.test = "E", add.variance = TRUE)
-#'
-#' #### adjust for multiple comparisons ####
-#' e.glht2 <- glht2(e.mmm, linfct = resC$contrast, df = FALSE)
-#' summary(e.glht2)
-#'
-#' @concept multiple comparison
-#' @export
-`glht2` <-
-    function(model, linfct, rhs,
-             bias.correct, df, robust, cluster) UseMethod("glht2")
-
-
-## * glht2.lvmfit
-#' @rdname glht2
-#' @export
-glht2.lvmfit <- function(model, linfct, rhs = 0,
-                         bias.correct = TRUE, df = TRUE, robust = FALSE, cluster = NULL){
-
-    if(robust==FALSE && !is.null(cluster)){
-        stop("Argument \'cluster\' must be NULL when argument \'robust\' is FALSE \n")
-    }
-    
-    ### ** define contrast matrix
-    if(!is.matrix(linfct)){
-        resC <- createContrast(model, par = linfct)
-        linfct <- resC$contrast
-        if("rhs" %in% names(match.call()) == FALSE){
-            rhs <- resC$null
-        }
-    }
-
-### ** pre-compute quantities for the small sample correction
-    if(!inherits(model,"lvmfit2")){
-        sCorrect(model, df = df) <- bias.correct
-    }
-
-### ** Wald test with small sample correction
-    name.param <- colnames(linfct)
-    n.param <- NCOL(linfct)
-    n.hypo <- NROW(linfct)
-
-    resWald <- compare2(model, contrast = linfct, null = rhs, as.lava = FALSE)
-    ## update name according to multcomp, i.e. without second member
-    rownames(linfct) <- .contrast2name(linfct, null = NULL) 
-
-### ** Global degree of freedom
-    if(df){
-        df.global <- round(resWald["global","df"], digits = 0)
-    }else{
-        df.global <- 0
-    }
-    
-### ** compute variance-covariance matrix
-    if(robust){
-        vcov.model <- crossprod(iid2(model, cluster = cluster))
-    }else{
-        vcov.model <- vcov2(model)
-    }
-
-### ** convert to the appropriate format
-    out <- list(model = model,
-                linfct = linfct,
-                rhs = unname(rhs),
-                coef = coef(model),
-                vcov = vcov.model,
-                df = df.global,
-                alternative = "two.sided",
-                type = NULL,
-                robust = robust,
-                bias.correct = bias.correct)
-    class(out) <- c("glht2","glht")
-        
-    ### ** export
-    return(out)
-}
-
-
-## * glht2.mmm
-#' @rdname glht2
-#' @export
-glht2.mmm <- function (model, linfct, rhs = 0,
-                       bias.correct = TRUE, df = TRUE, robust = FALSE, cluster = NULL){
-
-
-    if(robust==FALSE && !is.null(cluster)){
-        stop("Argument \'cluster\' must be NULL when argument \'robust\' is FALSE \n")
-    }
-    
-    ### ** check the class of each model
-    n.model <- length(model)
-    name.model <- names(model)    
-    if(is.null(name.model)){
-        stop("Argument \'model\' must be named list. \n")
-    }
-    
-    test.lm <- sapply(model, inherits, what = "lm")
-    test.gls <- sapply(model, inherits, what = "gls")
-    test.lme <- sapply(model, inherits, what = "lme")
-    test.lvmfit <- sapply(model, inherits, what = "lvmfit")
-    if(any(test.lm + test.gls + test.lme + test.lvmfit == 0)){
-        index.wrong <- which(test.lm + test.gls + test.lme + test.lvmfit == 0)
-        stop("Argument \'model\' must be a list of objects that inherits from lm/gls/lme/lvmfit. \n",
-             "Incorrect element(s): ",paste(index.wrong, collapse = " "),".\n")
-    }
-
-
-    ## ** define the contrast matrix
-    out <- list()
-    if (is.character(linfct)){
-        resC <- createContrast(model, par = linfct, add.variance = TRUE)
-        contrast <- resC$contrast
-        ls.contrast <- resC$ls.contrast
-        if("rhs" %in% names(match.call()) == FALSE){
-            rhs <- resC$null
-        }
-    }else if(is.matrix(linfct)){
-
-        ls.contrast <- lapply(name.model, function(x){ ## x <- name.model[2]
-            iColnames <- grep(paste0("^",x,": "), colnames(linfct), value = FALSE, fixed = FALSE)
-            iRownames <- rowSums(linfct[,iColnames]!=0)>0
-            linfct[iRownames, iColnames,drop=FALSE]            
-        })
-        names(ls.contrast) <- name.model
-        contrast <- linfct
-        if("rhs" %in% names(match.call()) == FALSE){ ## left rhs to default value
-            rhs <- rep(0, NROW(contrast))
-        }else if(length(rhs)!=NROW(contrast)){
-            stop("mismatch between the dimensions of argument \'rhs\' and argument \'contrast\' \n")
-        }
-    }else{
-        stop("Argument \'linfct\' must be a matrix or a vector of characters. \n",
-             "Consider using  out <- createContrast(...) and pass out$contrast to linfct. \n")
-    }
-
-    ## ** check whether it is possible to compute df
-    if(identical(df, TRUE)){
-           
-        ## does each model has the same df?
-        ## test.df <- try(lapply(model, df.residual), silent = TRUE)
-        ## if(inherits(test.df, "try-error")){
-        ##     stop("Cannot check the degrees of freedom for each model - no \'df.residual\' method available \n",
-        ##          "Consider setting the argument \'df\' to FALSE \n")
-        ## }
-        
-        ## if(any(sapply(test.df,is.null))){
-        ##     stop("Cannot compute residual degrees of freedom for all models \n",
-        ##          "Consider setting the argument \'df\' to FALSE \n")
-        ## }
-
-        ## if(length(unique(unlist(test.df)))>1){
-        ##     stop("Residual degrees of freedom differ across models \n",
-        ##          "Consider setting the argument \'df\' to FALSE \n")
-        ## }
-
-        ## are linear hypothesis model specific?
-        ls.testPerModel <- lapply(ls.contrast, function(iModel){
-            rowSums(contrast[,colnames(iModel)]!=0)>0
-        })
-        
-        if(any(Reduce("+",ls.testPerModel)>1)){
-            stop("Cannot compute the degrees of freedom for tests performed across several models \n",
-                 "Consider setting the argument \'df\' to FALSE \n")
-        }    
-    }
-    
-    ## ** Extract influence functions from all models    
-    ls.res <- lapply(1:n.model, function(iM){ ## iM <- 1
-
-### *** Pre-compute quantities
-        if(!inherits(model[[iM]],"lm2") && !inherits(model[[iM]],"gls2") && !inherits(model[[iM]],"lme2") && !inherits(model[[iM]],"lvmfit2")){
-            sCorrect(model[[iM]], df = df) <- bias.correct
-        }
-        out$param <- model[[iM]]$sCorrect$param
-        name.param <- names(out$param)
-        name.object.param <- paste0(name.model[iM],": ",name.param)
-        out$param <- setNames(out$param, name.object.param)
-        
-### *** Compute df for each test
-        if(df){
-            ## here null does not matter since we only extract the degrees of freedom
-            iContrast <- ls.contrast[[iM]]
-            colnames(iContrast) <- name.param
-        
-            iWald <- compare2(model[[iM]], contrast = iContrast, as.lava = FALSE)
-            out$df <- iWald[1:(NROW(iWald)-1),"df"]
-        }else{
-            out$df <- Inf
-        }
-### *** get iid decomposition
-        index.missing <- model[[iM]]$na.action
-        n.obs <- stats::nobs(model[[iM]]) + length(index.missing)
-
-        out$iid <- matrix(NA, nrow = n.obs, ncol = length(name.param),
-                          dimnames = list(NULL, name.param))
-        
-        ## dim(iid2(model[[iM]], robust = robust, cluster = cluster))
-        ## dim(out$iid)
-        ## names(model[[iM]])
-        out$iid[setdiff(1:n.obs,index.missing),] <- iid2(model[[iM]], robust = robust, cluster = cluster)
-        
-        colnames(out$iid) <- name.object.param
-
-        return(out)
-        
-    })
-    seq.df <- unlist(lapply(ls.res,"[[","df"))
-    seq.param <- unlist(lapply(ls.res,"[[","param"))
-
-    if(df){
-        if(length(unique(seq.df))>1){
-            warning("Unequal degrees of freedom for the Wald statistics \n",
-                    "The median of the degrees of freedom is used.")
-        }
-        df.global <- round(stats::median(seq.df), digits = 0)
-    }else{
-        df.global <- 0
-    }
-        
-    ls.iid <- lapply(ls.res,"[[","iid")
-    n.obs <- unique(unlist(lapply(ls.iid, NROW)))
-    if(length(n.obs)>1){
-        stop("Mismatch between the number of observations in the iid \n",
-                "Likely to be due to the presence of missing values \n")
-        
-    }
-    M.iid <- do.call(cbind,ls.iid)
-    if(any(is.na(M.iid))){
-        M.iid[is.na(M.iid)] <- 0
-    }
-    vcov.model <- crossprod(M.iid)
-    
-    
-### ** convert to the appropriate format
-    out <- list(model = model,
-                linfct = linfct,
-                rhs = unname(rhs),
-                coef = seq.param,
-                vcov = vcov.model,
-                df = df.global,
-                alternative = "two.sided",
-                type = NULL,
-                robust = robust,
-                bias.correct = bias.correct)
-    class(out) <- c("glht2","glht")
-        
-    ### ** export
-    return(out)    
-}
-
-
-## * .calcClosure
-.calcClosure <- function(name, estimate, covariance, type, df){
-
-    n.hypo <- length(name)
-    correlation <- stats::cov2cor(covariance)
-
-    ## ** create all possible hypotheses
-    ls.closure <- lapply(n.hypo:1, function(iNtest){ ## iNtest <- 1  
-        iList <- list(M = utils::combn(name, m = iNtest))
-        iList$vec <- apply(iList$M, 2, paste, collapse = ",")
-        return(iList)
-    })
-
-    ## ** compute all p.values
-    for(iLevel in 1:length(ls.closure)){ ## iLevel <- 1
-        ls.closure[[iLevel]]$test <- t(apply(ls.closure[[iLevel]]$M, 2, function(iHypo){
-            index <- which(name %in% iHypo)
-            if(type == "chisq"){
-                return(.ChisqTest(estimate[index], covariance = covariance[index,index,drop=FALSE], df = df))
-            }else if(type == "max"){
-                return(.tTest(estimate[index],
-                              covariance = covariance[index,index,drop=FALSE],
-                              correlation = correlation[index,index,drop=FALSE], df = df))
-            }
-        }))
-        rownames(ls.closure[[iLevel]]$test) <- ls.closure[[iLevel]]$vec
-    }
-    
-    ## ** find all hypotheses in the closure related to an individual hypothesis
-    ls.hypo <- vector(mode = "list", length = n.hypo)
-    for(iHypo in 1:n.hypo){ ## iHypo <- 1
-        ls.hypo[[iHypo]] <- do.call(rbind,lapply(ls.closure, function(iClosure){ ## iClosure <- 1
-            iIndex <- which(colSums(iClosure$M==name[iHypo])>0)
-            data.frame(hypothesis = iClosure$vec[iIndex],
-                       statistic = as.double(iClosure$test[iIndex,"statistic"]),
-                       p.value = as.double(iClosure$test[iIndex,"p.value"]))
-        }))
-    }
-    names(ls.hypo) <- name
-        
-    ## ** adjusted p.values
-    vec.p.value <- unlist(lapply(ls.hypo, function(x){max(x$p.value)}))
-    return(list(closure = ls.closure,
-                test = ls.hypo,
-                p.value = vec.p.value))
-    
-}
-
-## * .tTest
-.tTest <- function(estimate, covariance, correlation, df, ...){
-    df1 <- length(estimate)
-    statistic <- max(abs(estimate/sqrt(diag(covariance))))
-    if(is.null(df)){
-        distribution <-  "gaussian"
-    }else{
-        distribution <- "student"
-    }
-    p.value <- .calcPmaxIntegration(statistic, p = df1, Sigma = correlation, df = df,
-                                    distribution = distribution)
-    return(c("statistic" = statistic,
-             "p.value" = p.value))
-}
-
-## * .ChisqTest
-.ChisqTest <- function(estimate, covariance, df, ...){
-    df1 <- length(estimate)
-    ## q * statistic ~ chisq or fisher
-    statistic <- as.double(matrix(estimate, nrow = 1) %*% solve(covariance) %*% matrix(estimate, ncol = 1)) / df1
-    if(!is.null(df)){
-        return(c("statistic" = statistic,
-                 "p.value" = 1-stats::pf(statistic, df1 = df1, df2 = df)))
-    }else{
-        return(c("statistic" = statistic,
-                 "p.value" = 1-stats::pchisq(statistic, df = df1)))
-        
-    }
-}
-
-
- 
diff --git a/R/nobs.R b/R/nobs.R
index 242d840..05196a2 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: maj  2 2018 (09:15) 
 ## Version: 
-## Last-Updated: jun 14 2019 (13:41) 
+## Last-Updated: nov 18 2019 (10:09) 
 ##           By: Brice Ozenne
-##     Update #: 3
+##     Update #: 4
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -20,10 +20,5 @@ nobs.lvmfit <- function(object){
     return(object$data$n)
 }
 
-nobs.gls2 <- function(object){
-    return(NROW(object$sCorrect$score))
-}
-nobs.lme2 <- nobs.gls2
-
 ######################################################################
 ### nobs.R ends here
diff --git a/R/p.adjust2.R b/R/p.adjust2.R
new file mode 100644
index 0000000..14db540
--- /dev/null
+++ b/R/p.adjust2.R
@@ -0,0 +1,69 @@
+### p.adjust2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: dec 19 2019 (11:28) 
+## Version: 
+## Last-Updated: Jan 11 2022 (17:38) 
+##           By: Brice Ozenne
+##     Update #: 24
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+
+p.adjust2 <- function(p, method, vcov.param = NULL){
+
+    traditional.method <- c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none")
+    new.method <- c("AB1","AB2")
+    all.method <- c(traditional.method,new.method)
+    
+    if(length(method)!=1){
+        stop("Argument \'method\' must have length 1 \n")
+    }
+    
+    if(method %in% traditional.method){
+        out <- stats::p.adjust(p = p, method = method)
+    }else{
+        if(method %in% new.method == FALSE){
+            stop("Argument \'method\' must be one of \"",paste(all.method,collapse = "\" \""),"\"\n")
+        }
+        
+        if(method %in% c("AB1","AB2") && is.null(vcov.param)){
+            stop("Argument \'vcov.param\' must not be NULL when argument \'method\' is \"AB1\" or \"AB2\" \n")
+        }
+        if(method %in% c("AB1","AB2")){
+            
+            if(is.null(names(p))){
+                stop("Argument \'p\' must be named \n")
+            }
+            if(!all(names(p) %in% colnames(vcov.param))){
+                stop("The column names of argument \'vcov.param\' must match the names of argument \'p\' \n")
+            }
+
+            ## compute average correlation
+            M.rho <- stats::cov2cor(vcov.param)
+            diag(M.rho) <- NA
+            r <- colMeans(abs(M.rho[,names(p),drop=FALSE]), na.rm = TRUE)
+            k <- length(p)
+            out <- switch(method,
+                          "AB1" = pmin(1, p*(k-(k-1)*sqrt(abs(r)))),
+                          "AB2" = pmin(1, p*k^(1-sqrt(abs(r))))
+                          )
+            attr(out,"r") <- r
+                   
+            
+        }
+    }
+
+    names(out) <- names(p)
+
+    return(out)
+}
+
+######################################################################
+### p.adjust2.R ends here
diff --git a/R/package-butils-extractData.R b/R/package-butils-extractData.R
deleted file mode 100644
index 5fdefba..0000000
--- a/R/package-butils-extractData.R
+++ /dev/null
@@ -1,248 +0,0 @@
-## * Documentation
-#' @title Extract Data From a Model
-#' 
-#' @description Extract data from a model using \code{nlme::getData}, \code{riskRegression::coxDesign} or \code{model.frame}.. 
-#' If it fails it will try to extract it by its name according to \code{model$call$data}.
-#' 
-#' @param object the fitted model.
-#' @param design.matrix [logical] should the data be extracted after transformation (e.g. conversion of categorical variables to dummy variables)?
-#' Otherwise the original data will be returned.
-#' @param as.data.frame [logical] should the output be converted into a \code{data.frame} object?
-#' @param envir [environment] the environment from which to search the data.
-#'
-#' @return a dataset.
-#' 
-#' @examples
-#' set.seed(10)
-#' n <- 101
-#'
-#' #### linear regression ####
-#' Y1 <- rnorm(n, mean = 0)
-#' Y2 <- rnorm(n, mean = 0.3)
-#' Id <- findInterval(runif(n), seq(0.1,1,0.1))
-#' data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
-#'            data.frame(Y=Y2,G="2",Id = Id)
-#'            )
-#' m.lm <- lm(Y ~ G, data = data.df)
-#' a <- extractData(m.lm, design.matrix = TRUE)
-#' b <- extractData(m.lm, design.matrix = FALSE)
-#' 
-#' library(nlme)
-#' m.gls <- gls(Y ~ G, weights = varIdent(form = ~ 1|Id), data = data.df)
-#' c <- extractData(m.gls)
-#' m.lme <- lme(Y ~ G, random = ~ 1|Id, data = data.df)
-#' d <- extractData(m.lme)
-#' 
-#' library(lava)
-#' e.lvm <- estimate(lvm(Y ~ G), data = data.df)
-#' e <- extractData(e.lvm)
-#' e <- extractData(e.lvm, design.matrix = TRUE)
-#' 
-#' #### survival #### 
-#' library(survival)
-#'
-#' \dontrun{
-#'   library(riskRegression) ## needs version >=1.4.3
-#'   dt.surv <- sampleData(n, outcome = "survival")
-#'   m.cox <- coxph(Surv(time, event) ~ X1 + X2, data = dt.surv, x = TRUE, y = TRUE)
-#'   f <- extractData(m.cox, design.matrix = FALSE)
-#'   f <- extractData(m.cox, design.matrix = TRUE)
-#'   m.cox <- coxph(Surv(time, event) ~ strata(X1) + X2, data = dt.surv, x = TRUE, y = TRUE)
-#'   f <- extractData(m.cox, design.matrix = TRUE)
-#' }
-#' 
-#' #### nested fuuctions ####
-#' fct1 <- function(m){
-#'    fct2(m)
-#' }
-#' fct2 <- function(m){ 
-#'    extractData(m)
-#' }
-#' g <- fct1(m.gls)
-#' @concept extractor
-#' @export
-`extractData` <-
-    function(object, design.matrix, as.data.frame, envir){
-        UseMethod("extractData", object)
-    }
-
-## * method extractData.lm
-#' @rdname extractData
-#' @export
-extractData.lm <- function(object, design.matrix = FALSE, as.data.frame = TRUE,
-                           envir = environment()){
-    ## ** check arguments
-    validLogical(design.matrix, valid.length = 1)
-    validLogical(as.data.frame, valid.length = 1)
-
-    ## ** extract data
-    if(design.matrix){
-        data <- model.matrix(object)
-    }else{
-        ## cannot use model.frame because it only returned the part of the dataset relevant for fitting the model
-        ## this is not enougth for modelsearch2
-        ## data <- try(model.frame(object), silent = TRUE)
-        ## data <- object$model
-        data <- evalInParentEnv(object$call$data)
-        if("function" %in% class(data)){
-            stop("data has the same name as a function \n",
-                 "consider renaming data before generating object \n")
-        }
-        if(!inherits(data, "data.frame")){
-            stop("Could not extract the data from the model \n")
-        } 
-    }
-
-    ## ** normalize data
-    if(as.data.frame){
-        data <- as.data.frame(data)        
-    }
-
-    ## ** export
-    return(data)
-}
-
-## * method extractData.coxph
-#' @rdname extractData
-#' @export
-extractData.coxph <- function(object, design.matrix = FALSE, as.data.frame = TRUE,
-                              envir = environment()){
-    ## ** check arguments
-    validLogical(design.matrix, valid.length = 1)
-    validLogical(as.data.frame, valid.length = 1)
-
-    ## ** extract data
-    if(design.matrix){
-         tryPkg <- requireNamespace("riskRegression")
-         if("try-error" %in% class(tryPkg)){
-            stop(tryPkg)
-        }else if(utils::packageVersion("riskRegression")<="1.4.3"){
-            stop("riskRegression version must be > 1.4.3 \n",
-                 "latest version available on Github at tagteam/riskRegression \n")
-        }else{
-            #### [:toUpdate]
-            ##  data <- try(riskRegression::coxDesign(object), silent = TRUE)
-            ##  strataVar <- riskRegression::coxVariableName(object)$stratavars.original
-
-            ## this is a temporary modification waiting for the update of riskRegression on CRAN
-            coxDesign.rr <- get("coxDesign", envir = asNamespace("riskRegression"), inherits = FALSE)
-            coxVariableName.rr <- get("coxVariableName", envir = asNamespace("riskRegression"), inherits = FALSE)
-            data <- try(coxDesign.rr(object), silent = TRUE)
-            strataVar <- coxVariableName.rr(object)$stratavars.original
-        } 
-      
-        if(length(strataVar)>0){         
-            data2 <- evalInParentEnv(object$call$data)
-            data <- cbind(as.data.frame(data),
-                          as.data.frame(data2)[,strataVar,drop=FALSE])        
-        }
-    }else{
-        data <- evalInParentEnv(object$call$data)
-        
-        if("function" %in% class(data)){
-            stop("data has the same name as a function \n",
-                 "consider renaming data before generating object \n")
-        }
-        if(!inherits(data, "data.frame")){
-            stop("Could not extract the data from the model \n")
-        } 
-    }
-
-    ## ** normalize data
-    if(as.data.frame){
-        data <- as.data.frame(data)        
-    }
-
-    ## ** export
-    return(data)
-    
-}
-
-## * method extractData.cph
-#' @rdname extractData
-#' @export
-extractData.cph <- extractData.coxph
-
-## * method extractData.lvmfit
-#' @rdname extractData
-#' @export
-extractData.lvmfit <- function(object, design.matrix = FALSE, as.data.frame = TRUE,
-                               envir = environment()){
-    ## ** check arguments
-    validLogical(design.matrix, valid.length = 1)
-    validLogical(as.data.frame, valid.length = 1)
-
-    ## ** extract data
-    if(design.matrix){
-        data <- object$data$model.frame
-        keep.cols <- intersect(c("(Intercept)",lava::vars(object)), names(data))
-        data <- data[,keep.cols,drop=FALSE]
-    }else{
-        data <- evalInParentEnv(object$call$data)
-        
-        if("function" %in% class(data)){
-            stop("data has the same name as a function \n",
-                 "consider renaming data before generating object \n")
-        }
-        
-        if(!inherits(data, "data.frame")){
-            data <- model.frame(object)
-        }
-    }
-
-    ## ** normalize data
-    if(as.data.frame){
-        data <- as.data.frame(data)        
-    }
-
-    ## ** export
-    return(data)
-    
-}
-
-## * method extractData.gls
-#' @rdname extractData
-#' @export
-extractData.gls <- function(object, design.matrix = FALSE, as.data.frame = TRUE,
-                            envir = environment()){
-    ## ** check arguments
-    validLogical(design.matrix, valid.length = 1)
-    validLogical(as.data.frame, valid.length = 1)
-
-    ## ** extract data
-    if(design.matrix){
-
-        # assign the dataset to the object if not in the current environment
-        name.data <- as.character(object$call$data)
-        if((length(name.data) == 1) && (name.data %in% ls() == FALSE)){
-            object$data <- evalInParentEnv(object$call$data)
-        }
-      
-        data <- try(nlme::getData(object), silent = TRUE)
-
-    }else{
-        data <- evalInParentEnv(object$call$data)
-        
-        if("function" %in% class(data)){
-            stop("data has the same name as a function \n",
-                 "consider renaming data before generating object \n")
-        }
-        if(!inherits(data, "data.frame")){
-            stop("Could not extract the data from the model \n")
-        } 
-    }
-
-    ## ** normalize data
-    if(as.data.frame){
-        data <- as.data.frame(data)        
-    }
-
-    ## ** export
-    return(data)
-    
-}
-
-## * method extractData.lme
-#' @rdname extractData
-#' @export
-extractData.lme <- extractData.gls
diff --git a/R/package-butils-valid.R b/R/package-butils-valid.R
deleted file mode 100644
index 240a50a..0000000
--- a/R/package-butils-valid.R
+++ /dev/null
@@ -1,428 +0,0 @@
-## * Documentation
-#' @name validFCTs
-#' @aliases validClass
-#' @aliases validDimension
-#' @aliases validInteger
-#' @aliases validLogical
-#' @aliases validNames
-#' @aliases validNumeric
-#' @aliases validPath
-#' @title Check Arguments of a function.
-#' 
-#' @description Check the validity of the arguments in functions.
-#' 
-#' @param value1 the value of the (first) argument to be checked
-#' @param value2 the second value of a second argument whose dimensions should be consistent with the first one
-#' @param name1 the name of the (first) argument.
-#' @param name2 the name of the second argument.
-#' @param validClass the acceptable classes(s) for the argument. 
-#' @param validDimension the acceptable dimension for the argument. If \code{NULL} then name2 is used as a reference.
-#' @param valid.length the acceptable length(s) for the argument. If \code{NULL} no test is performed.
-#' @param valid.values the acceptable value(s) for the argument. If \code{NULL} no test is performed. Can also be "character" or "character_or_logical".
-#' @param super.classes uses the \code{is} function instead of \code{class} to test the class of the object.
-#' @param refuse.NULL should an error be output if value is \code{NULL}.
-#' @param refuse.NA should an error be output if value contains \code{NA}.
-#' @param refuse.duplicates should an error be output if value contains duplicated values.
-#' @param refuse.values values that must not appear in the argument
-#' @param type For \code{validDimension}: the type of operator used to check the dimensions. For \code{validPath} either "dir" or "file" to check whether to path points to an existing directory or file.
-#' @param required.values values that must appear in the argument
-#' @param min the minimum acceptable value
-#' @param max the maximum acceptable value
-#' @param extension filter the files by the type of extension. 
-#' @param method the name of the function using the argument.
-#' @param check.fsep display a warning when the separator is not correctly specified in 
-#' @param addPP add ": " after the name of the function in the error message.
-#' 
-#' @return An invisible \code{TRUE} or an error message.
-#' 
-#' @concept check
-#' @keywords internal
-
-## * validCharacter
-#' @rdname validFCTs
-validCharacter <- function(value1, name1 = as.character(substitute(value1)), valid.length, 
-                           valid.values = "character", refuse.NULL = TRUE, refuse.duplicates = FALSE, 
-                           method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  if(is.null(value1)){
-    
-    if(refuse.NULL == TRUE){
-      stop(method, "\'", name1, "\' must not be NULL \n")
-    }
-    
-  }else{
-    
-    #### check size
-    n.value1 <- length(value1)
-    
-    if(!is.null(valid.length) && n.value1 %in% valid.length == FALSE){
-      stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), "  \n", 
-           "length(", name1, ") : ", n.value1, "\n")
-    }
-    
-    #### check duplicates
-    if(refuse.duplicates == TRUE && any(duplicated(value1))){
-      stop(method, "\'", name1, "\' contains duplicated values: ", "\"",paste(unique(value1[duplicated(value1)]), collapse = "\" \""), "\" \n")
-    }
-    
-    #### check values
-    if(identical(valid.values,"character")){
-      
-      if(any(is.character(value1) == FALSE)){
-        stop(method, "\'", name1, "\' must be a ", if(n.value1 == 1){"character"}else{"vector of characters"}," \n", 
-             "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n")
-      }
-      
-    } else if(identical(valid.values,"character_or_logical")){
-      
-      if(any( (is.character(value1) == FALSE) * (is.logical(value1) == FALSE) > 0 )){
-        stop(method, "\'", name1, "\' must be a ", if(n.value1 == 1){"character or logical"}else{"vector of characters or logicals"}," \n", 
-             "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n")
-      }
-      
-    } else if(!is.null(valid.values) && any(value1 %in% valid.values == FALSE)){
-      
-      stop(method, "wrong specification of \'", name1, "\' \n", 
-           "valid values for \'", name1, "\' : ", if(refuse.NULL == FALSE){"NULL"}, " \"", paste(valid.values, collapse = "\" \""), "\" \n", 
-           "refused value",if(sum(value1 %in% valid.values == FALSE)>1){"s"}," for \'", name1, "\' : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = "\" \""), "\"\n")
-      
-    }
-    
-  }
-  
-  return(invisible(TRUE))
-  
-}
-
-## * validClass
-#' @rdname validFCTs
-validClass <- function(value1, name1 = as.character(substitute(value1)), validClass, 
-                       super.classes = TRUE, method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  if(super.classes == TRUE){
-    
-    if( all(is(value1) %in% validClass == FALSE) ){
-      stop(method, "class of \'", name1, "\' must be one of the following \"", paste(validClass,collapse="\" \""), "\"  \n", 
-           "proposed superclass : \"", paste(is(value1),collapse="\" \""), "\" \n")
-    }  
-    
-  }else{
-    
-    if( class(value1) %in% validClass == FALSE){
-      stop(method, "class of \'", name1, "\' must be \"", paste(validClass,collapse="\" \""),"\"  \n", 
-           "proposed class : ", class(value1)[[1]], "\n")
-    }  
-    
-  }
-  
-  return(invisible(TRUE))
-  
-}
-
-## * validDimension
-#' @rdname validFCTs
-validDimension <- function(value1, value2 = NULL, name1 = as.character(substitute(value1)), name2 = as.character(substitute(value2)),
-                           validDimension = NULL,
-                           type = c("NROW","NCOL"), method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  n.type <- length(type)
-  
-  #### dimension 1
-  testDimension <- sapply(1:n.type, function(x){
-    do.call(type[x], list(value1))
-  })
-  
-  #### dimension 2
-  
-  
-  if(is.null(validDimension)){
-    
-    validDimension <- sapply(1:n.type, function(x){
-      do.call(type[x], list(value2))
-    })
-    test.validDimension <- TRUE
-    
-  }else if(is.null(name2)){
-    
-    test.validDimension <- FALSE
-    
-  }else{
-    
-    test.validDimension <- TRUE
-    
-  }
-  
-  #### main
-  for(iter_type in 1:n.type){
-    
-    if(testDimension[iter_type] != validDimension[iter_type]){
-      
-      if(test.validDimension){
-        stop(method, "dimension mismatch between argument \'", name1, "\' and argument \'", name2, "\' \n", 
-             type[iter_type],"(", name1, ") = ", testDimension[iter_type], " \n", 
-             type[iter_type],"(", name2, ") = ", validDimension[iter_type], " \n")  
-      }else{
-        stop(method, "dimension mismatch between argument \'", name1, "\' and argument \'", name2, "\' \n", 
-             type[iter_type],"(", name1, ") = ", testDimension[iter_type], " \n", 
-             type[iter_type],"(", name2, ") = ", validDimension[iter_type], " \n")
-        
-      }
-      
-    }
-    
-  }
-  
-  return(invisible(TRUE))
-}
-
-## * validInteger
-#' @rdname validFCTs
-validInteger <- function(value1, name1 = as.character(substitute(value1)), valid.length, 
-                         valid.values = NULL, min = NULL, max = NULL, 
-                         refuse.NA = TRUE, refuse.NULL = TRUE, refuse.duplicates = FALSE, 
-                         method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  validNumeric(value1 = value1, name1 = name1, valid.length = valid.length, min = min, max = max, 
-               refuse.NA = refuse.NA, refuse.NULL = refuse.NULL, refuse.duplicates = refuse.duplicates, method = method)
-  
-  #### check integer
-  if(!is.null(value1) && any(value1 %% 1 > 0)){
-    stop(method, "\'", name1, "\' must contain integers not doubles \n",        
-         "invalid value(s) in ", name1, " : ", paste(value1[value1 %% 1 > 0], collapse = " "), "\n")
-  }
-  
-  return(invisible(TRUE))
-}
-
-## * validLogical
-#' @rdname validFCTs
-validLogical <- function(value1, name1 = as.character(substitute(value1)), valid.length, 
-                         refuse.NULL = TRUE, refuse.NA = TRUE, 
-                         method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  if(is.null(value1)){
-    
-    #### NULL
-    if(refuse.NULL == TRUE){
-      stop(method, "\'", name1, "\' must be logical ",if(refuse.NA == FALSE){"or NA"}," and not NULL \n")
-    }
-    
-  }else{ 
-    
-    #### Size
-    if(!is.null(valid.length) && length(value1) %in% valid.length == FALSE){
-      stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), "  \n", 
-           "length(", name1, ") : ", length(value1), "\n")
-    } 
-    
-    #### Type
-    if(any(is.logical(value1) == FALSE)){
-      stop(method, "\'", name1, "\' must be ", if(refuse.NULL == FALSE){"NULL or "}, if(refuse.NA == FALSE){"NA or "},"TRUE or FALSE \n",        
-           "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n")
-    }
-    
-    if(refuse.NA == TRUE && any(is.na(value1)) ){
-      stop(method, "\'", name1, "\' must be logical ",if(refuse.NULL == FALSE){"or NULL"}," and not NA \n")
-    }
-    
-  }
-  
-  return(invisible(TRUE))
-}
-
-## * validNames
-#' @rdname validFCTs
-validNames <- function(value1, name1 = as.character(substitute(value1)), refuse.NULL = TRUE,
-                       valid.length = NULL, valid.values = NULL, required.values = NULL, refuse.values = NULL,
-                       method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  ## type
-  if(is.matrix(value1)){
-    value1 <- colnames(value1)
-  }
-  
-  if(inherits(value1,"data.frame") || is.list(value1)){
-    value1 <- names(value1)
-  }
-  
-  ## tests
-  if(is.null(value1)){
-    
-    if(refuse.NULL == TRUE){
-      stop(method, "names of \'", name1, "\' must not be NULL \n")
-    }
-    
-  }else{
-    #### check size
-    n.value1 <- length(value1)
-    
-    if(!is.null(valid.length) && n.value1 %in% valid.length == FALSE){
-      stop(method, "\'", name1, "\' must have ", paste(valid.length, collapse = " or ")," names  \n", 
-           "length(names(", name1, ")) : ", n.value1, "\n")
-    }
-    
-    #### check content
-    
-    if(!is.null(required.values) && any(required.values %in% value1 == FALSE)){
-      
-      stop(method, "\'", name1, "\' must contains specific names \n",
-           "missing names : \"",paste(required.values[required.values %in% value1 == FALSE], collapse = "\" \""),"\" \n", 
-           "proposed names : \"", paste(value1, collapse = "\" \""), "\"\n")  
-      
-    }
-    
-    if(!is.null(valid.values) && any(value1 %in% valid.values == FALSE)){
-      
-      stop(method, "wrong specification of \'", name1, "\' \n", 
-           "valid names for \'", name1, "\' : \"",paste(valid.values, collapse = "\" \""),"\" \n", 
-           "refused names : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = " "), "\"\n")  
-      
-    }
-    
-    if(!is.null(refuse.values) && any(value1 %in% refuse.values)){
-      
-      stop(method, "\'", name1, "\' contains forbidden names:", paste(value1[value1 %in% refuse.values], collapse = " "), "\"\n")  
-      
-    }
-    
-    if(any(duplicated(value1))){
-      stop(method, "\'", name1, "\' must not contain duplicated names \n", 
-           "duplicated names : \"", paste(value1[duplicated(value1)], collapse = " "), "\"\n")  
-    }
-    
-  }
-  
-  return(invisible(TRUE))
-  
-}
-
-## * validNumeric
-#' @rdname validFCTs
-validNumeric <- function(value1, name1 = as.character(substitute(value1)), valid.length,
-                         valid.values = NULL , min = NULL, max = NULL,
-                         refuse.NA = TRUE, refuse.NULL = TRUE, refuse.duplicates = FALSE, 
-                         method = NULL, addPP = TRUE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  if(is.null(value1)){
-    
-    if(refuse.NULL == TRUE){
-      stop(method, "\'", name1, "\' must not be NULL \n")
-    }
-    
-  }else{
-    
-    #### check length
-    if(!is.null(valid.length) && length(value1) %in% valid.length == FALSE){
-      stop(method, "\'", name1, "\' must have length ", paste(valid.length, collapse = " or "), "  \n", 
-           "length(", name1, ") : ", length(value1), "\n")
-    }
-    
-    #### check NA
-    if(refuse.NA == TRUE && any(is.na(value1))){
-      stop(method, "\'", name1, "\' must not contain any NA \n", 
-           "index of NA values : ", paste(which(is.na(value1)), collapse = " "), "\n")
-    }
-    
-    #### check numeric
-    if(any( (is.numeric(value1) == FALSE) * (is.na(value1) == FALSE) )){
-      stop(method, "\'", name1, "\' must be numeric \n",        
-           "is(", name1, ") : ", paste(is(value1), collapse = " "), "\n")
-    }
-    
-    #### check duplicates
-    if(refuse.duplicates == TRUE && any(duplicated(value1))){
-      stop(method, "\'", name1, "\' contains duplicated values: ", paste(unique(value1[duplicated(value1)]), collapse = " "), "\n")
-    }
-    
-    #### check min value1
-    if(!is.null(min) && any(stats::na.omit(value1) < min)){
-      stop(method, "\'", name1, "\' must be bigger than ", min, " \n",        
-           "invalid value(s): ", paste(value1[stats::na.omit(value1) < min], collapse = " "), "\n")
-    }
-    
-    #### check max value1
-    if(!is.null(max) && any(stats::na.omit(value1) > max)){
-      stop(method, "\'", name1, "\' must be smaller than ", max, " \n",        
-           "invalid value(s): ", paste(value1[stats::na.omit(value1) > max], collapse = " "), "\n")
-    }
-    
-    #### check valid values
-    if(!is.null(valid.values) && any(value1 %in% valid.values == FALSE)){
-      
-      stop(method, "\'", name1, "\' contain invalid values \n", 
-           "valid values for \'", name1, "\' : ", if(refuse.NULL == FALSE){"NULL"}, " \"", paste(valid.values, collapse = "\" \""), "\" \n", 
-           "refused value",if(sum(value1 %in% valid.values == FALSE)>1){"s"}," for \'", name1, "\' : \"", paste(value1[value1 %in% valid.values == FALSE], collapse = " "), "\"\n")
-      
-    }
-  }
-  
-  return(invisible(TRUE))
-}
-
-## * validPath
-#' @rdname validFCTs
-validPath <- function(value1, name1 = as.character(substitute(value1)), type,
-                      method = NULL, addPP = TRUE, extension = NULL, check.fsep = FALSE){
-  
-  if(!is.null(method) && addPP){
-    method <- paste0(method, ": ")
-  }
-  
-  validCharacter(type, valid.length = 1, valid.values = c("file", "dir"))
-  
-  try_path <- switch(type,
-                     file = file.exists(value1),
-                     dir = dir.exists(value1)
-  )
-  
-  if(try_path == FALSE){
-    stop(method, "\'", name1, "\' does not lead to an existing ",switch(type,"file"="file","dir"="directory")," \n", 
-         "proposed value: \"", value1, "\"\n", 
-         "current path: ", getwd(), "\n")
-  }
-  
-  
-  if(type == "dir"){ 
-    if(check.fsep == TRUE && substr(value1, start = nchar(value1), stop = nchar(value1)) != "/"){
-      warning(method, "possible bad specification of \'", name1, "\' \n", 
-              "it should end with a fsep (e.g. \"/\") \n")
-    }
-  }else if(type == "file" && !is.null(extension)){
-    fileExtension <- tools::file_ext(value1) 
-    if(fileExtension %in% extension == FALSE){
-      stop(method, "\'", name1, "\' has not the expected extension \n", 
-           "proposed extension: \"", fileExtension, "\" \n", 
-           "expected extension: \"", paste(extension, collapse = "\" \""), "\"\n")
-    }
-  }
-  
-  return(invisible(TRUE))
-}
diff --git a/R/prepareScore2.R b/R/prepareScore2.R
deleted file mode 100644
index 1d1b559..0000000
--- a/R/prepareScore2.R
+++ /dev/null
@@ -1,21 +0,0 @@
-### prepareScore2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: feb 19 2018 (17:01) 
-## Version: 
-## Last-Updated: feb 19 2018 (18:52) 
-##           By: Brice Ozenne
-##     Update #: 1
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-
-
-##----------------------------------------------------------------------
-### prepareScore2.R ends here
diff --git a/R/residuals2.R b/R/residuals2.R
deleted file mode 100644
index 4454edc..0000000
--- a/R/residuals2.R
+++ /dev/null
@@ -1,123 +0,0 @@
-### residuals2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov  8 2017 (09:05) 
-## Version: 
-## Last-Updated: feb 11 2019 (13:25) 
-##           By: Brice Ozenne
-##     Update #: 935
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * documentation - residuals2
-#' @title Extract Corrected Residuals
-#' @description Extract correct residuals from a gaussian linear model.
-#' @name residuals2
-#' 
-#' @param object a \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} object.
-#' @param type [character] the type of residual to extract:
-#' \code{"response"} for raw residuals,
-#' \code{"studentized"} for studentized residuals,
-#' \code{"normalized"} for normalized residuals.
-#' @param param [named numeric vector] the fitted parameters.
-#' @param data [data.frame] the data set.
-#'
-#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
-#'
-#' @details If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the residuals. \cr
-#'
-#' The raw residuals are defined by  observation minus the fitted value:
-#' \deqn{
-#' \varepsilon = (Y_1 - \mu_1, ..., Y_m - \mu_m)
-#' }
-#' The studentized residuals divided the raw residuals relative to each endogenous variable by the modeled variance of the endogenous variable.
-#' \deqn{
-#' \varepsilon_{stud} =(\frac{Y_1 - \mu_1}{\sigma_1}, ..., \frac{Y_m - \mu_m}{\sigma_m})
-#' }
-#' The normalized residuals multiply the raw residuals by the inverse of the square root of the modeled residual variance covariance matrix.
-#' \deqn{
-#' \varepsilon_{norm} = \varepsilon \Omega^{-1/2}
-#' }
-#' @return a matrix containing the residuals relative to each sample (in rows)
-#' and each endogenous variable (in column).
-#'
-#' @examples
-#' ## simulate data
-#' set.seed(10)
-#' m <- lvm(Y1~eta,Y2~eta,Y3~eta)
-#' latent(m) <- ~eta
-#' d <- lava::sim(m,20, latent = FALSE)
-#'
-#' ## standard linear model
-#' e.lm <- lm(Y1~Y2, data = d)
-#' sCorrect(e.lm) <- TRUE
-#' 
-#' sigma(e.lm)^2
-#' mean(residuals(e.lm)^2)
-#' mean(residuals2(e.lm)^2)
-#' 
-#' ## latent variable model
-#' e.lvm <- estimate(m, data = d)
-#' sCorrect(e.lvm) <- TRUE
-#' mean(residuals2(e.lvm)^2)
-#'
-#' @concept small sample inference
-#' @export
-`residuals2` <-
-    function(object, param, data, type) UseMethod("residuals2")
-
-## * residuals2.lm2
-#' @rdname residuals2
-#' @export
-residuals2.lm2 <- function(object, param = NULL, data = NULL, type = "response"){
-
-    type <- match.arg(type, choices = c("response","studentized","normalized"), several.ok = FALSE)
-
-    if(!is.null(param) || !is.null(data)){
-        args <- object$sCorrect$args
-        args$df <- FALSE
-        object$sCorrect <- do.call(sCorrect,
-                                   args = c(list(object, param = param, data = data),
-                                            args))
-    }
-    if(type=="response"){
-        residuals <- object$sCorrect$residuals
-    }else if(type=="studentized"){
-        residuals <- sweep(object$sCorrect$residuals,
-                           STATS = sqrt(diag(object$sCorrect$Omega)),
-                           FUN = "/",
-                           MARGIN = 2)
-        ## object$sCorrect$residuals/residuals
-    }else if(type=="normalized"){
-        residuals <- object$sCorrect$residuals %*% matrixPower(object$sCorrect$Omega, symmetric = TRUE, power = -1/2)
-        colnames(residuals) <- colnames(object$sCorrect$residuals)
-        ## object$sCorrect$residuals/residuals
-        ## var(residuals)        
-    }
-    return(residuals)
-}
-
-## * residuals2.gls2
-#' @rdname residuals2
-#' @export
-residuals2.gls2 <- residuals2.lm2
-
-## * residuals2.lme2
-#' @rdname residuals2
-#' @export
-residuals2.lme2 <- residuals2.lm2
-
-## * residuals2.lvmfit2
-#' @rdname residuals2
-#' @export
-residuals2.lvmfit2 <- residuals2.lm2
-
-
-##----------------------------------------------------------------------
-### residuals2.R ends here
diff --git a/R/sCorrect-coef2.R b/R/sCorrect-coef2.R
new file mode 100644
index 0000000..1906807
--- /dev/null
+++ b/R/sCorrect-coef2.R
@@ -0,0 +1,118 @@
+### sCorrect-coef2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 18 2019 (10:14) 
+## Version: 
+## Last-Updated: jan 17 2022 (15:06) 
+##           By: Brice Ozenne
+##     Update #: 320
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Model Coefficients With Small Sample Correction
+#' @description Extract the coefficients from a latent variable model.
+#' Similar to \code{lava::compare} but with small sample correction.
+#' @name coef2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the model coefficients.
+#' 
+#' @return A numeric vector named with the names of the coefficients.
+#' 
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#' 
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`coef2` <-
+    function(object, as.lava, ...) UseMethod("coef2")
+
+
+## * Examples
+#' @rdname coef2
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' dW <- sampleRepeated(10, format = "wide")
+#' set.seed(10)
+#' dL <- sampleRepeated(10, format = "long")
+#' dL$time2 <- paste0("visit",dL$time)
+#' 
+#' #### latent variable models ####
+#' e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+#' coef(e.lvm)
+#' coef2(e.lvm)
+#' coef2(e.lvm, as.lava = FALSE)
+
+## * coef2.lvmfit
+#' @export
+coef2.lvmfit <- function(object, as.lava = TRUE, ssc = lava.options()$ssc, ...){
+
+    return(coef(estimate2(object, ssc = ssc, ...), as.lava = as.lava))
+
+}
+
+## * coef2.lvmfit2
+#' @export
+coef2.lvmfit2 <- function(object, as.lava = TRUE, ...){
+    dots <- list(...)
+    if(any(names(dots) %in% c("type","symbol","labels") == FALSE)){
+        if(length(dots)>0){
+            warning("Argument(s) \'",paste(setdiff(names(dots), c("type","symbol","labels")), collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+        }
+    }
+    if(length(dots)>1){ ## for the print function
+
+        ## new values
+        res <- model.tables(object, as.lava = TRUE)
+
+        ## extract structure from lava
+        object0 <- object
+        class(object0) <- setdiff(class(object), "lvmfit2")
+        out <- do.call(stats::coef, args = c(list(object0),dots)) ## this does not necessarily output the full parameter name
+        ## Y1~~Y1 may be abreviated into Y1 which is confusion with Y1 the intercept
+        dots$symbol <- NULL
+        out.names <- intersect(rownames(do.call(stats::coef, args = c(list(object0),dots))), ## full name
+                               rownames(res))
+        index.out.names <- match(out.names, rownames(do.call(stats::coef, args = c(list(object0),dots))))
+
+        ## rownames(res) <- as.character(object$sCorrect$skeleton$originalLink2param)
+        out[index.out.names,"Estimate"] <- res[out.names,"estimate"]
+        out[index.out.names,"Std. Error"] <- res[out.names,"se"]
+        out[index.out.names,3] <- res[out.names,"statistic"] ## use 3 instead of Z value / Z-value
+        if(object$sCorrect$df=="satterthwaite"){ 
+            colnames(out)[3] <- "t value"
+            if(colnames(out)[4]=="Pr(>|z|)"){ 
+                colnames(out)[4] <- "Pr(>|t|)"
+            }
+        }
+        out[index.out.names,4] <- res[out.names,"p.value"] ## use 4 instead of P-value / Pr(>|z|)
+
+    }else{        
+        out <- object$sCorrect$param[names(object$sCorrect$skeleton$originalLink2param)]
+        if(as.lava==FALSE){
+            names(out) <- as.character(object$sCorrect$skeleton$originalLink2param)
+        }
+    }
+    return(out)
+}
+
+## * coef.lvmfit2
+#' @export
+coef.lvmfit2 <- coef2.lvmfit2
+
+######################################################################
+### sCorrect-coef2.R ends here
diff --git a/R/sCorrect-compare2.R b/R/sCorrect-compare2.R
new file mode 100644
index 0000000..d13c99c
--- /dev/null
+++ b/R/sCorrect-compare2.R
@@ -0,0 +1,433 @@
+### compare2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: jan 30 2018 (14:33) 
+## Version: 
+## Last-Updated: Apr 11 2023 (22:31) 
+##           By: Brice Ozenne
+##     Update #: 903
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - compare2
+#' @title Test Linear Hypotheses With Small Sample Correction
+#' @description Test Linear Hypotheses using Wald statistics in a latent variable model.
+#' Similar to \code{lava::compare} but with small sample correction.
+#' @name compare2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param linfct [matrix or vector of character] the linear hypotheses to be tested. Same as the argument \code{par} of \code{\link{createContrast}}.
+#' @param rhs [vector] the right hand side of the linear hypotheses to be tested.
+#' @param robust [logical] should the robust standard errors be used instead of the model based standard errors?
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] should the output be similar to the one return by \code{lava::compare}?
+#' @param F.test [logical] should a joint test be performed?
+#' @param conf.level [numeric 0-1] level of the confidence intervals.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details The \code{linfct} argument and \code{rhs} specify the set of linear hypotheses to be tested. They can be written:
+#' \deqn{
+#'   linfct * \theta = rhs
+#' }
+#' where \eqn{\theta} is the vector of the model coefficients. \cr
+#' The \code{par} argument must contain expression(s) involving the model coefficients.
+#' For example \code{"beta = 0"} or \code{c("-5*beta + alpha = 3","-alpha")} are valid expressions if alpha and beta belong to the set of model coefficients.
+#' A contrast matrix and the right hand side will be generated inside the function. \cr
+#' 
+#' When directly specified, the contrast matrix must contain as many columns as there are coefficients in the model (mean and variance coefficients).
+#' Each hypothesis correspond to a row in the contrast matrix. \cr
+#'
+#' The rhs vector should contain as many elements as there are row in the contrast matrix. \cr
+#' 
+#' @seealso \code{\link{createContrast}} to create contrast matrices. \cr
+#' \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#' 
+#' @return If \code{as.lava=TRUE} an object of class \code{htest}.
+#' Otherwise a \code{data.frame} object.
+
+## * example - compare2
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' mSim <- lvm(Y~0.1*X1+0.2*X2)
+#' categorical(mSim, labels = c("a","b","c")) <- ~X1
+#' transform(mSim, Id~Y) <- function(x){1:NROW(x)}
+#' df.data <- lava::sim(mSim, 1e2)
+#'
+#' #### with lvm ####
+#' m <- lvm(Y~X1+X2)
+#' e.lvm <- estimate(m, df.data)
+#' 
+#' compare2(e.lvm, linfct = c("Y~X1b","Y~X1c","Y~X2"))
+#' compare2(e.lvm, linfct = c("Y~X1b","Y~X1c","Y~X2"), robust = TRUE)
+#' 
+#' @concept inference
+#' @keywords smallSampleCorrection
+#' @export
+`compare2` <-
+    function(object, linfct, rhs,
+             robust, cluster,
+             as.lava, F.test,
+             conf.level, ...) UseMethod("compare2")
+
+## * compare2.lvmfit
+#' @rdname compare2
+#' @export
+compare2.lvmfit <- function(object, linfct = NULL, rhs = NULL,
+                            robust = FALSE, cluster = NULL,
+                            as.lava = TRUE, F.test = TRUE,
+                            conf.level = 0.95,
+                            ssc = lava.options()$ssc, df = lava.options()$df, ...){
+
+    return(compare(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...),
+                    linfct = linfct, rhs = rhs, robust = robust, cluster = cluster, as.lava = as.lava, F.test = F.test, conf.level = conf.level)
+           )
+
+}
+
+## * compare2.lvmfit2
+#' @rdname compare2
+#' @export
+compare2.lvmfit2 <- function(object, linfct = NULL, rhs = NULL,
+                              robust = FALSE, cluster = NULL,
+                              as.lava = TRUE, F.test = TRUE,
+                              conf.level = 0.95, ...){
+   
+    dots <- list(...)
+    if(any(names(dots)=="par")){
+        stop("Argument \'par\' is no longer used as it has been replaced by \'linfct\'. \n")
+    }
+    if(length(dots[names(dots) %in% "sep" == FALSE])>0){
+        warning("Argument(s) \'",paste(setdiff(names(dots),"sep"),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+    if(is.null(linfct)){ ## necessary for lava::gof to wo
+        object0 <- object
+        class(object0) <- setdiff(class(object0),"lvmfit2")
+        return(lava::compare(object0))
+    }
+    if(!is.logical(robust)){ 
+        stop("Argument \'robust\' should be TRUE or FALSE \n")
+    }
+    if(robust==FALSE && !is.null(cluster)){
+        stop("Argument \'cluster\' must be NULL when argument \'robust\' is FALSE \n")
+    }
+
+    ## ** extract information
+    df <- object$sCorrect$df
+    
+    ## 0-order: param
+    param <- coef(object, as.lava = FALSE)
+    n.param <- length(param)
+    name.param <- names(param)
+
+    ## 1-order: score
+    if(robust){
+        score <- score(object, cluster = cluster, as.lava = FALSE, indiv = TRUE)
+    }else{
+        score <- NULL
+    }
+    
+    ## 2-order: variance covariance
+    vcov.param <- vcov(object, as.lava = FALSE)
+    warn <- attr(vcov.param, "warning")
+    attr(vcov.param, "warning") <- NULL
+    if(robust){
+        rvcov.param <- vcov(object, robust = TRUE, cluster = cluster, as.lava = FALSE)
+    }
+
+    ## 3-order: derivative of the variance covariance matrices
+    if(df == "satterthwaite"){
+        dVcov.param <- object$sCorrect$dVcov.param[names(object$sCorrect$skeleton$originalLink2param),
+                                                   names(object$sCorrect$skeleton$originalLink2param),
+                                                   names(object$sCorrect$skeleton$originalLink2param),
+                                                   drop=FALSE]
+        dimnames(dVcov.param) <- list(as.character(object$sCorrect$skeleton$originalLink2param),
+                                      as.character(object$sCorrect$skeleton$originalLink2param),
+                                      as.character(object$sCorrect$skeleton$originalLink2param))
+        keep.param <- dimnames(dVcov.param)[[3]]
+
+        if(robust && (lava.options()$df.robust != 1)){
+            if(!is.null(cluster) || is.null(object$sCorrect$dRvcov.param)){
+                ## update derivative according to cluster
+                hessian <- hessian2(object, cluster = cluster, as.lava = FALSE)
+                dRvcov.param <- .dRvcov.param(score = score,
+                                              hessian = hessian,
+                                              vcov.param = vcov.param,
+                                              dVcov.param = dVcov.param,
+                                              n.param = n.param,
+                                              name.param = name.param)
+                                              
+            }else{
+                dRvcov.param <- object$sCorrect$dRvcov.param[names(object$sCorrect$skeleton$originalLink2param),
+                                                             names(object$sCorrect$skeleton$originalLink2param),
+                                                             names(object$sCorrect$skeleton$originalLink2param),
+                                                             drop=FALSE]
+                dimnames(dRvcov.param) <- list(as.character(object$sCorrect$skeleton$originalLink2param),
+                                               as.character(object$sCorrect$skeleton$originalLink2param),
+                                               as.character(object$sCorrect$skeleton$originalLink2param))
+            }
+        }
+    }
+
+    ## ** normalize linear hypotheses
+    if(!is.matrix(linfct)){
+        res.C <- createContrast(object, linfct = linfct, rowname.rhs = FALSE, ...)
+        if(any(colnames(res.C$contrast)!=name.param) && all(colnames(res.C$contrast) == names(object$sCorrect$skeleton$originalLink2param))){
+            colnames(res.C$contrast) <- as.character(object$sCorrect$skeleton$originalLink2param)
+        }
+        linfct <- res.C$contrast
+        if(is.null(rhs)){
+            rhs <- res.C$null
+        }else{
+            if(length(rhs)!=length(res.C$null)){
+                stop("Incorrect argument \'rhs\' \n",
+                     "Must have length ",length(res.C$null),"\n")
+            }
+            rhs <- stats::setNames(rhs, names(res.C$null))
+        }
+        name.hypoShort <- rownames(linfct)
+        name.hypo <- paste0(name.hypoShort," = ",rhs)
+    }else{
+        if(is.null(colnames(linfct))){
+            stop("Argument \'linfct\' must have column names \n")
+        }
+        if(NCOL(linfct) != n.param){
+            stop("Argument \'linfct\' should be a matrix with ",n.param," columns \n")
+        }
+        if(any(colnames(linfct) %in% name.param == FALSE)){
+            txt <- setdiff(colnames(linfct), name.param)
+            stop("Argument \'linfct\' has incorrect column names \n",
+                 "invalid name(s): \"",paste(txt, collapse = "\" \""),"\"\n")
+        }
+        if(any(name.param %in% colnames(linfct) == FALSE)){
+            txt <- setdiff(name.param, colnames(linfct))
+            stop("Argument \'linfct\' has incorrect column names \n",
+                 "missing name(s): \"",paste(txt, collapse = "\" \""),"\"\n")
+        }
+        ## reorder columns according to coefficients
+        linfct <- linfct[,name.param,drop=FALSE]
+        if(F.test && any(abs(svd(linfct)$d)<1e-10)){
+            stop("Argument \'linfct\' is singular \n")
+        }
+        if(is.null(rhs)){
+            rhs <- stats::setNames(rep(0,NROW(linfct)),rownames(linfct))
+        }else if(length(rhs)!=NROW(linfct)){
+            stop("The length of argument \'rhs\' must match the number of rows of argument \'linfct' \n")
+        }
+        if(is.null(rownames(linfct))){
+            rownames(linfct) <- .contrast2name(linfct, null = rhs)
+            rhs <- stats::setNames(rhs, rownames(linfct))
+        }
+        name.hypo <- rownames(linfct)
+        name.hypoShort <- sapply(strsplit(name.hypo, split = " = ", fixed = TRUE),"[[",1)
+    }
+
+    n.hypo <- length(name.hypo)
+    linfct <- linfct[,names(param),drop=FALSE] ## column in contrast may not be in the same order as param
+
+    ## ** Univariate Wald test
+    ## coefficient (used for F.test and lava export)
+    C.p <- linfct %*% param
+    C.p.rhs <- C.p - rhs
+
+    ## variance (used for F.test and lava export)
+    if(robust){
+        C.vcov.C <- linfct %*% rvcov.param %*% t(linfct)
+    }else{
+        C.vcov.C <- linfct %*% vcov.param %*% t(linfct)
+    }
+
+    ## df
+    if(df == "satterthwaite"){
+        df.Wald  <- dfSigma(contrast = linfct,
+                            score = score,
+                            vcov = vcov.param,
+                            rvcov = rvcov.param,
+                            dVcov = dVcov.param,
+                            dRvcov = dRvcov.param,
+                            keep.param = keep.param,                            
+                            type = if(robust){lava.options()$df.robust}else{1})
+
+        ##
+        ## 2 * vcov.param["Y","Y"]^2 / (vcov.param["Y~~Y","Y~~Y"]*dVcov.param["Y","Y","Y~~Y"]^2)
+        ## 
+    }else{
+        df.Wald <- rep(Inf, n.hypo)
+    }
+
+    ## ** Multivariate Wald test
+    error <- NULL
+    if(F.test){
+        iC.vcov.C <- try(solve(C.vcov.C), silent = TRUE)
+        if(!inherits(iC.vcov.C,"try-error")){
+            stat.F <- t(C.p.rhs) %*% iC.vcov.C %*% (C.p.rhs) / n.hypo
+
+            ## df (independent t statistics)
+            if(df == "satterthwaite"){
+                svd.tempo <- eigen(iC.vcov.C)
+                D.svd <- diag(svd.tempo$values, nrow = n.hypo, ncol = n.hypo)
+                P.svd <- svd.tempo$vectors
+     
+                C.anova <- sqrt(D.svd) %*% t(P.svd) %*% linfct
+
+                nu_m  <- dfSigma(contrast = C.anova,
+                                 score = score,
+                                 vcov = vcov.param,
+                                 rvcov = rvcov.param,
+                                 dVcov = dVcov.param,
+                                 dRvcov = dRvcov.param,
+                                 keep.param = keep.param,                            
+                                 type = if(robust){lava.options()$df.robust}else{1})
+                EQ <- sum(nu_m/(nu_m-2))
+                df.F <- 2*EQ / (EQ - n.hypo)
+            }else{
+                df.F <- Inf
+            }
+            ## store
+            F.res <- c("statistic" = as.numeric(stat.F),
+                       "df" = df.F,
+                       "p.value" = 1 - stats::pf(stat.F,
+                                                 df1 = n.hypo,
+                                                 df2 = df.F)
+                       )
+        }else{
+            warning("Unable to invert the variance-covariance matrix after application of the contrasts \n")
+            error <- iC.vcov.C
+        }
+    }
+
+    ## ** export
+    if(as.lava == TRUE){
+        level.inf <- (1-conf.level)/2
+        level.sup <- 1-level.inf
+
+        level.inf.label <- paste0(100*level.inf,"%")
+        level.sup.label <- paste0(100*level.sup,"%")
+
+        df.estimate <- matrix(NA, nrow = n.hypo, ncol = 5,
+                              dimnames = list(name.hypoShort,c("Estimate", "Std.Err", "df", level.inf.label, level.sup.label)))
+        df.estimate[,"Estimate"] <- C.p
+        df.estimate[,"Std.Err"] <- sqrt(diag(C.vcov.C))
+        df.estimate[,"df"] <- df.Wald
+        df.estimate[,level.inf.label] <- df.estimate[,"Estimate"] + stats::qt(level.inf, df = df.estimate[,"df"]) * df.estimate[,"Std.Err"]
+        df.estimate[,level.sup.label] <- df.estimate[,"Estimate"] + stats::qt(level.sup, df = df.estimate[,"df"]) * df.estimate[,"Std.Err"]
+
+        dimnames(C.vcov.C) <- list(name.hypoShort,name.hypoShort)
+        out <- list(statistic = stats::setNames(F.res["statistic"],"F-statistic"),
+                    parameter = stats::setNames(round(F.res["df"],2), paste0("df1 = ",n.hypo,", df2")), ## NOTE: cannot not be change to coefficients because of lava
+                    p.value = F.res["p.value"],
+                    method = c("- Wald test -", "", "Null Hypothesis:", name.hypo),
+                    estimate = df.estimate,
+                    vcov = C.vcov.C,
+                    coef = stats::setNames(C.p[,1], name.hypoShort),
+                    null = stats::setNames(rhs, name.hypoShort),
+                    cnames = name.hypo                    
+                    )
+        if(robust){
+            colnames(out$estimate)[2] <- "robust SE"
+        }
+        rownames(linfct) <- name.hypo
+        attr(out, "B") <- linfct
+        class(out) <- "htest"
+    }else{
+        if(length(unique(df.Wald))==1){
+            df.Wald <- df.Wald[1]
+        }
+        out <- list(model = object,
+                    linfct = linfct,
+                    rhs = unname(rhs),
+                    coef = param,
+                    vcov = if(robust){rvcov.param}else{vcov.param},
+                    df = df.Wald,
+                    alternative = "two.sided",
+                    type = NULL,
+                    robust = robust,
+                    ssc = object$sCorrect$ssc$type,
+                    global = if(F.test){F.res}else{NULL})
+        class(out) <- c("glht2","glht")
+    }
+    attr(out,"warning") <- warn
+    attr(out,"error") <- error
+    return(out)
+
+}
+
+## * compare.lvmfit2
+#' @rdname compare2
+#' @export
+compare.lvmfit2 <- compare2.lvmfit2
+
+## * dfSigma
+##' @title Degree of Freedom for the Chi-Square Test
+##' @description Computation of the degrees of freedom of the chi-squared distribution
+##' relative to the model-based variance
+##'
+##' @param contrast [numeric vector] the linear combination of parameters to test
+##' @param score [numeric matrix] the individual score for each parameter.
+##' @param vcov [numeric matrix] the model-based variance-covariance matrix of the parameters.
+##' @param rvcov [numeric matrix] the robust variance-covariance matrix of the parameters.
+##' @param dVcov [numeric array] the first derivative of the model-based variance-covariance matrix of the parameters.
+##' @param dRvcov [numeric array] the first derivative of the robust variance-covariance matrix of the parameters.
+##' @param keep.param [character vector] the name of the parameters with non-zero first derivative of their variance parameter.
+##' @param type [integer] 1 corresponds to the Satterthwaite approximation of the the degrees of freedom applied to the model-based variance,
+##' 2 to the Satterthwaite approximation of the the degrees of freedom applied to the robust variance,
+##' 3 to the approximation described in (Pan, 2002) section 2 and 3.1.
+##'
+##' @references
+##' Wei Pan and Melanie M. Wall, Small-sample adjustments in using the sandwich variance estiamtor in generalized estimating equations. Statistics in medicine (2002) 21:1429-1441.
+##' 
+dfSigma <- function(contrast, score, vcov, rvcov, dVcov, dRvcov, keep.param, type){
+    if(type==1){
+        C.vcov.C <- rowSums(contrast %*% vcov * contrast) ## variance matrix of the linear combination
+        C.dVcov.C <- sapply(keep.param, function(x){
+            rowSums(contrast %*% dVcov[,,x] * contrast)
+        })
+        numerator <- 2 *(C.vcov.C)^2
+        denom <- rowSums(C.dVcov.C %*% vcov[keep.param,keep.param,drop=FALSE] * C.dVcov.C)
+        df <- numerator/denom
+    }else if(type==2){
+        C.rvcov.C <- rowSums(contrast %*% rvcov * contrast) ## variance matrix of the linear combination
+        C.dRvcov.C <- sapply(keep.param, function(x){
+            rowSums(contrast %*% dRvcov[,,x] * contrast)
+        })
+        numerator <- 2 *(C.rvcov.C)^2
+        denom <- rowSums(C.dRvcov.C %*% rvcov[keep.param,keep.param,drop=FALSE] * C.dRvcov.C)
+        df <- numerator/denom
+    }else if(type==3){
+        vcov.S <- contrast %*% vcov
+        index.var <- diag(matrix(1:NROW(contrast)^2,NROW(contrast),NROW(contrast)))
+        
+        K <- NROW(score)
+        ls.Pi <- lapply(1:K, function(iC){as.double(tcrossprod(score[iC,]))})
+        M.Pi <- do.call(rbind,ls.Pi)
+        M.Pi_center <- sweep(M.Pi, MARGIN = 2, STATS = colMeans(M.Pi), FUN = "-")
+        ## M.Pi_center - M.Pi
+        T <- t(M.Pi_center) %*% M.Pi_center / (K*(K-1))
+        ## range(var(M.Pi)/K - T)
+        eq.3 <- K^2 * (vcov.S %x% vcov.S) %*% T %*%  (vcov.S %x% vcov.S)
+
+        Vs <- (vcov.S %x% vcov.S) %*% Reduce("+",ls.Pi)
+        ## range(Vs - as.double(rvcov))
+        ## range(Vs[index.var] - diag(rvcov))
+        df <- 2*Vs[index.var]^2/sapply(index.var, function(iIndex){eq.3[iIndex,iIndex]})
+        
+    }
+    
+    return(stats::setNames(df, rownames(contrast)))
+}
+
+
+##----------------------------------------------------------------------
+### compare2.R ends here
diff --git a/R/sCorrect-dInformation2.R b/R/sCorrect-dInformation2.R
new file mode 100644
index 0000000..9543eef
--- /dev/null
+++ b/R/sCorrect-dInformation2.R
@@ -0,0 +1,402 @@
+### sCorrect-dInformation2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: dec 11 2019 (14:09) 
+## Version: 
+## Last-Updated: Jan 17 2022 (18:44) 
+##           By: Brice Ozenne
+##     Update #: 349
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * .dInformation2
+#' @title Compute the First Derivative of the Expected Information Matrix
+#' @description Compute the first derivative of the expected information matrix.
+#' @name .dinformation2-internal
+#' 
+#' @details \code{calc_dinformation} will perform the computation individually when the
+#' argument \code{index.Omega} is not null.
+#' 
+#' @keywords internal
+.dInformation2 <- function(dmu, dOmega, d2mu, d2Omega, OmegaM1,
+                           missing.pattern, unique.pattern, name.pattern,
+                           grid.3varD1, grid.2meanD1.1varD1, grid.2meanD2.1meanD1, grid.2varD2.1varD1,
+                           name.param, leverage, n.cluster, weights){
+
+    if(lava.options()$debug){cat(".dInformation2\n")}
+    if(!is.null(weights)){
+        stop(".dInformation2 does not support weights. \n")
+    }
+    symmetrize <- TRUE
+
+    ## ** Prepare
+    n.param <- length(name.param)
+    n.pattern <- length(name.pattern)
+
+    n.grid.3varD1 <- NROW(grid.3varD1)
+    if(symmetrize && n.grid.3varD1>0){
+        grid.3varD1 <- grid.3varD1[grid.3varD1$duplicatedXYZ==FALSE,,drop=FALSE]
+        n.grid.3varD1 <- NROW(grid.3varD1)
+    }
+
+    n.grid.2meanD1.1varD1 <- NROW(grid.2meanD1.1varD1)
+    if(symmetrize && n.grid.3varD1>0){
+        grid.2meanD1.1varD1 <- grid.2meanD1.1varD1[grid.2meanD1.1varD1$duplicatedXY==FALSE,,drop=FALSE]
+        n.grid.2meanD1.1varD1 <- NROW(grid.2meanD1.1varD1)
+    }
+
+    n.grid.2varD2.1varD1 <- NROW(grid.2varD2.1varD1)
+    if(symmetrize && n.grid.2varD2.1varD1>0){
+        grid.2varD2.1varD1 <- grid.2varD2.1varD1[grid.2varD2.1varD1$d2XZ+grid.2varD2.1varD1$d2YZ>0,,drop=FALSE]
+        n.grid.2varD2.1varD1 <- NROW(grid.2varD2.1varD1)
+
+        grid.2varD2.1varD1[,"d2XZ"] <- grid.2varD2.1varD1[,"d2XZ"]*(1-grid.2varD2.1varD1[,"duplicatedXZ"])
+        grid.2varD2.1varD1[,"d2YZ"] <- grid.2varD2.1varD1[,"d2YZ"]*(1-grid.2varD2.1varD1[,"duplicatedYZ"])
+    }
+    
+    n.grid.2meanD2.1meanD1 <- NROW(grid.2meanD2.1meanD1)
+    if(symmetrize && n.grid.2meanD2.1meanD1>0){
+        grid.2meanD2.1meanD1 <- grid.2meanD2.1meanD1[grid.2meanD2.1meanD1$d2XZ+grid.2meanD2.1meanD1$d2YZ>0,,drop=FALSE]
+        n.grid.2meanD2.1meanD1 <- NROW(grid.2meanD2.1meanD1)
+
+        grid.2meanD2.1meanD1[,"d2XZ"] <- grid.2meanD2.1meanD1[,"d2XZ"]*(1-grid.2meanD2.1meanD1[,"duplicatedXZ"])
+        grid.2meanD2.1meanD1[,"d2YZ"] <- grid.2meanD2.1meanD1[,"d2YZ"]*(1-grid.2meanD2.1meanD1[,"duplicatedYZ"])
+    }
+
+    dInfo <- array(0,
+                   dim = c(n.param, n.param, n.param),
+                   dimnames = list(name.param, name.param, name.param))
+    
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        if(!is.null(leverage)){
+            iN.corrected <- length(iIndex) - colSums(leverage[iIndex,iY,drop=FALSE])
+        }else{
+            iN.corrected <- length(iIndex)
+        }
+        idmu <- .subsetList(dmu, indexRow = iIndex, indexCol = iY)
+        idOmega <- .subsetList(dOmega, indexRow = iY, indexCol = iY)
+        id2mu <- .subsetList2(d2mu, indexRow = iIndex, indexCol = iY)
+        id2Omega <- .subsetList2(d2Omega, indexRow = iY, indexCol = iY)
+
+        ## *** 3 first derivative regarding the variance
+        if(n.grid.3varD1>0){
+            for(iGrid in 1:n.grid.3varD1){ # iGrid <- 1
+                iName1 <- grid.3varD1[iGrid,"X"]
+                iName2 <- grid.3varD1[iGrid,"Y"]
+                iName3 <- grid.3varD1[iGrid,"Z"]
+
+                ## term 1
+                iDiag1 <- diag(iOmegaM1 %*% idOmega[[iName3]] %*% iOmegaM1 %*% idOmega[[iName2]] %*% iOmegaM1 %*% idOmega[[iName1]])
+                iDiag2 <- diag(iOmegaM1 %*% idOmega[[iName2]] %*% iOmegaM1 %*% idOmega[[iName3]] %*% iOmegaM1 %*% idOmega[[iName1]])
+                dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] - 1/2 * sum(iDiag1 * iN.corrected + iDiag2 * iN.corrected)
+
+                ## symmetrize (XYZ = XZY = YXZ = YZX = ZXY = ZYX)
+                if(symmetrize){
+                    dInfo[iName1,iName3,iName2] <- dInfo[iName1,iName2,iName3]
+                    dInfo[iName2,iName1,iName3] <- dInfo[iName1,iName2,iName3]
+                    dInfo[iName2,iName3,iName1] <- dInfo[iName1,iName2,iName3]
+                    dInfo[iName3,iName1,iName2] <- dInfo[iName1,iName2,iName3]
+                    dInfo[iName3,iName2,iName1] <- dInfo[iName1,iName2,iName3]
+                }
+            }
+        }
+
+        ## *** 2 first derivative regarding the mean and one regarding the variance
+        if(n.grid.2meanD1.1varD1>0){
+            for(iGrid in 1:n.grid.2meanD1.1varD1){ # iGrid <- 1
+                iName1 <- grid.2meanD1.1varD1[iGrid,"X"]
+                iName2 <- grid.2meanD1.1varD1[iGrid,"Y"]
+                iName3 <- grid.2meanD1.1varD1[iGrid,"Z"]
+
+                ## term 4
+                dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] - sum(idmu[[iName1]] %*% iOmegaM1 %*% idOmega[[iName3]] %*% iOmegaM1 * idmu[[iName2]])
+                
+                ## symmetrize (XYZ = YXZ)
+                if(symmetrize){
+                    dInfo[iName2,iName1,iName3] <- dInfo[iName1,iName2,iName3]
+                }
+
+            }
+        }
+        
+        ## *** 1 second derivative and 1 first derivative regarding the variance
+        if(n.grid.2varD2.1varD1>0){
+            for(iGrid in 1:n.grid.2varD2.1varD1){ # iGrid <- 1
+                iName1 <- grid.2varD2.1varD1[iGrid,"X"]
+                iName2 <- grid.2varD2.1varD1[iGrid,"Y"]
+                iName3 <- grid.2varD2.1varD1[iGrid,"Z"]
+               
+                ## term 2
+                if(grid.2varD2.1varD1[iGrid,"d2YZ"]){
+                    d2.Var1 <- grid.2varD2.1varD1[iGrid,"d2YZ.Var1"]
+                    d2.Var2 <- grid.2varD2.1varD1[iGrid,"d2YZ.Var2"]
+                    iDiag <- 1/2 * sum(diag(iOmegaM1 %*% id2Omega[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 %*% idOmega[[iName1]]) * iN.corrected)
+                    dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] + iDiag
+
+                    ## symmetrize (XYZ = XZY)
+                    if(symmetrize && (iName2 != iName3)){
+                        dInfo[iName1,iName3,iName2] <- dInfo[iName1,iName3,iName2] + iDiag
+                    }
+                }
+
+                ## term 3
+                if(grid.2varD2.1varD1[iGrid,"d2XZ"]){
+                    d2.Var1 <- grid.2varD2.1varD1[iGrid,"d2XZ.Var1"]
+                    d2.Var2 <- grid.2varD2.1varD1[iGrid,"d2XZ.Var2"]
+                    iDiag <- 1/2 * sum(diag(iOmegaM1 %*% idOmega[[iName2]] %*% iOmegaM1 %*% id2Omega[[d2.Var1]][[d2.Var2]]) * iN.corrected)
+                    dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] + iDiag
+
+                    ## symmetrize (XYZ = ZYX)
+                    if(symmetrize && (iName1 != iName3)){
+                        dInfo[iName3,iName2,iName1] <- dInfo[iName3,iName2,iName1] + iDiag
+                    }
+                }
+
+            }
+        }
+
+        ## *** 1 second derivative and 1 first derivative regarding the mean
+        if(n.grid.2meanD2.1meanD1>0){
+            for(iGrid in 1:n.grid.2meanD2.1meanD1){ # iGrid <- 1
+                iName1 <- grid.2meanD2.1meanD1[iGrid,"X"]
+                iName2 <- grid.2meanD2.1meanD1[iGrid,"Y"]
+                iName3 <- grid.2meanD2.1meanD1[iGrid,"Z"]
+
+                ## term 5
+                if(grid.2meanD2.1meanD1[iGrid,"d2XZ"]){
+                    d2.Var1 <- grid.2meanD2.1meanD1[iGrid,"d2XZ.Var1"]
+                    d2.Var2 <- grid.2meanD2.1meanD1[iGrid,"d2XZ.Var2"]
+                    iDiag <- sum(id2mu[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 * idmu[[iName2]])
+                    dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] + iDiag
+
+                    ## symmetrize (XYZ=ZYX)
+                    if(symmetrize && (iName1 != iName3)){
+                        dInfo[iName3,iName2,iName1] <- dInfo[iName3,iName2,iName1] + iDiag
+                    }
+                }
+
+                ## term 6
+                if(grid.2meanD2.1meanD1[iGrid,"d2YZ"]){
+                    d2.Var1 <- grid.2meanD2.1meanD1[iGrid,"d2YZ.Var1"]
+                    d2.Var2 <- grid.2meanD2.1meanD1[iGrid,"d2YZ.Var2"]
+                    iDiag <- sum(idmu[[iName1]] %*% iOmegaM1 * id2mu[[d2.Var1]][[d2.Var2]])
+                    dInfo[iName1,iName2,iName3] <- dInfo[iName1,iName2,iName3] + iDiag
+
+                    ## symmetrize (XYZ = XZY)
+                    if(symmetrize && (iName2 != iName3)){
+                        dInfo[iName1,iName3,iName2] <- dInfo[iName1,iName3,iName2] + iDiag
+                    }
+                }
+            }
+        }
+    }
+    
+    ## dInfo.bis <- .old_dInformation2(dmu = dmu, dOmega = dOmega, d2mu = d2mu, d2Omega = d2Omega, OmegaM1 = OmegaM1,
+    ##                                 missing.pattern = missing.pattern, unique.pattern = unique.pattern, name.pattern = name.pattern, 
+    ##                                 grid.dInformation = expand.grid(X = name.param, Y = name.param, Z = name.param, duplicated = FALSE, stringsAsFactors = FALSE),
+    ##                                 name.param = name.param, name.param.dInformation = name.param,
+    ##                                 leverage = leverage, n.cluster = n.cluster)
+    ## dInfo.ter <- .old_dInformation2(dmu = dmu, dOmega = dOmega, d2mu = d2mu, d2Omega = d2Omega, OmegaM1 = OmegaM1,
+    ##                                 missing.pattern = missing.pattern, unique.pattern = unique.pattern, name.pattern = name.pattern, 
+    ##                                 grid.dInformation = expand.grid(X = name.param[2:3], Y = name.param[2:3], Z = name.param[2:3], duplicated = FALSE, stringsAsFactors = FALSE),
+    ##                                 name.param = name.param[2:3], name.param.dInformation = name.param[2:3],
+    ##                                 leverage = leverage, n.cluster = n.cluster)
+
+    ## dInfo.bis[,,1]-t(dInfo.bis[,,1])
+    ## dInfo.bis[,,2]-t(dInfo.bis[,,2])
+    ## dInfo.bis[,,3]-t(dInfo.bis[,,3])
+    ## print(range(dInfo.bis - dInfo))
+    ## print(range(dInfo.ter  - dInfo))
+    
+    ## ** export
+    return(dInfo)
+}
+
+## * .dVcov.param
+.dVcov.param <- function(vcov.param, dInformation, n.param, name.param){
+    dVcov.param <- array(0,
+                         dim = c(n.param,n.param,n.param),
+                         dimnames = list(name.param,name.param,name.param)
+                         )
+    
+    for(iP in name.param){ ## iP <- "Y1"
+        if(any(dInformation[,,iP]!=0)){
+            dVcov.param[,,iP] <- - vcov.param %*% dInformation[,,iP] %*% vcov.param
+        }
+    }
+    
+    return(dVcov.param)
+}
+
+## * .dRvcov.param
+.dRvcov.param <- function(score, hessian, vcov.param, dVcov.param, n.param, name.param){
+
+    dRvcov.param <- array(0,
+                          dim = c(n.param,n.param,n.param),
+                          dimnames = list(name.param,name.param,name.param)
+                          )
+    
+    score2_vcov.param <- crossprod(score) %*% vcov.param
+    score_vcov.param <- score %*% vcov.param
+        
+    for(iP in name.param){ ## iP <- 1
+        if(any(dVcov.param[,,iP]!=0)){
+            term1 <- dVcov.param[,,iP] %*% score2_vcov.param
+        }else{
+            term1 <- matrix(0, nrow = n.param, ncol = n.param)
+        }
+        term2 <- vcov.param %*% hessian[iP,,] %*% score_vcov.param
+        ## dRvcov.param[,,iP] <- term2 + t(term2) ## (what was lavaSearch2 doing before version 2.0.0)
+        dRvcov.param[,,iP] <- term1 + t(term1) + term2 + t(term2)
+    }
+
+    return(dRvcov.param)
+}
+
+
+
+## * .old_dInformation2
+## .old_dInformation2 <- function(dmu, dOmega, d2mu, d2Omega, OmegaM1,
+##                                missing.pattern, unique.pattern, name.pattern, 
+##                                grid.dInformation, name.param, name.param.dInformation,
+##                                leverage, n.cluster){
+
+##     if(lava.options()$debug){cat(".dInformation2\n")}
+
+##     ## ** Prepare
+##     n.grid <- NROW(grid.dInformation)
+##     n.param <- length(name.param)
+##     n.param.dInformation <- length(name.param.dInformation)
+##     n.pattern <- length(name.pattern)
+
+##     dInfo <-  array(0,
+##                     dim = c(n.param, n.param, n.param.dInformation),
+##                     dimnames = list(name.param, name.param, name.param.dInformation))
+
+##     index.duplicated <- which(grid.dInformation$duplicated)
+##     index.Nduplicated <- setdiff(1:n.grid, index.duplicated)
+##     ## grid.dInformation[index.Nduplicated,,drop=FALSE]
+    
+##     ## ** loop over missing data pattern
+##     for(iP in 1:n.pattern){ ## iP <- 1
+##         iPattern <- name.pattern[iP]
+##         iOmegaM1 <- OmegaM1[[iPattern]]
+##         iIndex <- missing.pattern[[iPattern]]
+##         iY <- which(unique.pattern[iP,]==1)
+
+##         if(!is.null(leverage)){
+##             iN.corrected <- length(iIndex) - colSums(leverage[iIndex,iY,drop=FALSE])
+##         }else{
+##             iN.corrected <- length(iIndex)
+##         }
+##         for(iGrid in index.Nduplicated){ # iGrid <- 1
+##             iName1 <- grid.dInformation[iGrid,"X"]
+##             iName2 <- grid.dInformation[iGrid,"Y"]
+##             iNameD <- grid.dInformation[iGrid,"Z"]
+##             ## cat("* ", iNameD," ",iName1,"",iName2,"\n")
+
+##             ## *** identify relevant terms
+##             test.Omega1 <- !is.null(dOmega[[iNameD]]) && !is.null(dOmega[[iName1]]) && !is.null(dOmega[[iName2]])
+##             test.Omega2a <- !is.null(d2Omega[[iNameD]][[iName1]]) && !is.null(dOmega[[iName2]])
+##             test.Omega2b <- !is.null(d2Omega[[iName1]][[iNameD]]) && !is.null(dOmega[[iName2]])
+##             test.Omega3a <- !is.null(d2Omega[[iNameD]][[iName2]]) && !is.null(dOmega[[iName1]])
+##             test.Omega3b <- !is.null(d2Omega[[iName2]][[iNameD]]) && !is.null(dOmega[[iName1]])
+
+##             test.mu1a <- !is.null(d2mu[[iNameD]][[iName1]]) && !is.null(dmu[[iName2]])
+##             test.mu1b <- !is.null(d2mu[[iName1]][[iNameD]]) && !is.null(dmu[[iName2]])
+##             test.mu2a <- !is.null(d2mu[[iNameD]][[iName2nn]]) && !is.null(dmu[[iName1]])
+##             test.mu2b <- !is.null(d2mu[[iName2]][[iNameD]]) && !is.null(dmu[[iName1]])
+##             test.mu3 <- !is.null(dOmega[[iNameD]]) && !is.null(dmu[[iName1]]) && !is.null(dmu[[iName2]])
+
+##             if((test.Omega1 + test.Omega2a + test.Omega2b + test.Omega3a + test.Omega3b + test.mu1a + test.mu1b + test.mu2a + test.mu2b + test.mu3) == 0){
+##                 next
+##             }
+##             ## *** extract quantities for computations 
+##             if(test.mu1a){
+##                 d2mu.D1 <- d2mu[[iNameD]][[iName1]][iIndex,iY,drop=FALSE]
+##             }else if(test.mu1b){
+##                 d2mu.D1 <- d2mu[[iName1]][[iNameD]][iIndex,iY,drop=FALSE]
+##             }
+##             if(test.mu2a){
+##                 d2mu.D2 <- d2mu[[iNameD]][[iName2]][iIndex,iY,drop=FALSE]
+##             }else if(test.mu2b){
+##                 d2mu.D2 <- d2mu[[iName2]][[iNameD]][iIndex,iY,drop=FALSE]
+##             }
+##             if(test.Omega2a){
+##                 d2Omega.D1 <- d2Omega[[iNameD]][[iName1]][iY,iY,drop=FALSE]
+##             }else if(test.Omega2b){
+##                 d2Omega.D1 <- d2Omega[[iName1]][[iNameD]][iY,iY,drop=FALSE]
+##             }
+##             if(test.Omega3a){
+##                 d2Omega.D2 <- d2Omega[[iNameD]][[iName2]][iY,iY,drop=FALSE]
+##             }else{
+##                 d2Omega.D2 <- d2Omega[[iName2]][[iNameD]][iY,iY,drop=FALSE]
+##             }
+                
+##             ## *** pre-compute 
+##             if(!is.null(dOmega[[iName1]])){
+##                 iOmegaM1.dOmega.1 <- iOmegaM1 %*% dOmega[[iName1]][iY,iY,drop=FALSE]
+##             }
+##             if(!is.null(dOmega[[iName2]])){
+##                 iOmegaM1.dOmega.2 <- iOmegaM1 %*% dOmega[[iName2]][iY,iY,drop=FALSE]
+##             }                    
+##             if(!is.null(dOmega[[iNameD]])){
+##                 iOmegaM1.dOmega.D <- iOmegaM1 %*% dOmega[[iNameD]][iY,iY,drop=FALSE]
+##             }
+            
+##             ## *** evaluate contributions to dInformation
+##             if(test.Omega1){
+
+##                 iOmegaM1.dOmega.1 %*% iOmegaM1.dOmega.D %*% iOmegaM1.dOmega.2
+                
+##                 iDiag1 <- diag(iOmegaM1.dOmega.D %*% iOmegaM1.dOmega.2 %*% iOmegaM1.dOmega.1)
+##                 iDiag2 <- diag(iOmegaM1.dOmega.2 %*% iOmegaM1.dOmega.D %*% iOmegaM1.dOmega.1)
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - 1/2 * sum((iDiag1 + iDiag2) * iN.corrected)
+##                 ## if(iName1!=iName2){
+##                 ##     cat(iName1,"/",iName2,"/",iNameD,": ",iDiag1," ",iDiag2,"\n")
+##                 ## }
+##             }
+
+##             if(test.Omega3a || test.Omega3b){
+##                 iDiag <- diag(iOmegaM1 %*% d2Omega.D2 %*% iOmegaM1.dOmega.1)
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * iN.corrected)                
+##             }
+            
+##             if(test.Omega2a || test.Omega2b){
+##                 iDiag <- diag(iOmegaM1.dOmega.2 %*% iOmegaM1 %*% d2Omega.D1)
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + 1/2 * sum(iDiag * iN.corrected)
+##             }
+
+##             if(test.mu1a || test.mu1b){
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(d2mu.D1 %*% iOmegaM1 * dmu[[iName2]][iIndex,iY,drop=FALSE])
+##             }
+
+##             if(test.mu2a || test.mu2b){
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] + sum(dmu[[iName1]][iIndex,iY,drop=FALSE] %*% iOmegaM1 * d2mu.D2)
+##             }
+                    
+##             if(test.mu3){
+##                 dInfo[iName1,iName2,iNameD] <- dInfo[iName1,iName2,iNameD] - sum(dmu[[iName1]][iIndex,iY,drop=FALSE] %*% iOmegaM1.dOmega.D %*% iOmegaM1 * dmu[[iName2]][iIndex,iY,drop=FALSE])
+##             }
+
+##         }
+##     }
+
+##     ### ** export
+##     return(dInfo)
+## }
+
+######################################################################
+### sCorrect-dInformation2.R ends here
diff --git a/R/sCorrect-effects2.R b/R/sCorrect-effects2.R
new file mode 100644
index 0000000..5a7d537
--- /dev/null
+++ b/R/sCorrect-effects2.R
@@ -0,0 +1,285 @@
+### effects2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: mar  4 2019 (10:28) 
+## Version: 
+## Last-Updated: jan 24 2022 (12:03) 
+##           By: Brice Ozenne
+##     Update #: 384
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * effects2 (documentation)
+#' @title Effects Through Pathways With Small Sample Correction 
+#' @description Test whether a path in the latent variable model correspond to a null effect.
+#' Similar to \code{lava::effects} but with small sample correction (if any).
+#' So far it only work for a single path related two variable composed of one or two edges.
+#' @name effects2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param linfct [character vector] The path for which the effect should be assessed (e.g. \code{"A~B"}),
+#' i.e. the effect of the right variable (B) on the left variable (A). 
+#' @param robust [logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param conf.level [numeric, 0-1] level of the confidence intervals.
+#' @param from,to alternative to argument \code{linfct}. See \code{lava::effects}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object.
+#' 
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+#' 
+#' @return A data.frame with a row per path.
+#' 
+#' @concept inference
+#' @keywords smallSampleCorrection
+#' @export
+`effects2` <-
+  function(object, linfct, robust, cluster, conf.level, ...) UseMethod("effects2")
+
+## * effects2 (examples)
+## TODO
+
+## * effects2.lvmfit
+#' @rdname effects2
+#' @export
+effects2.lvmfit <- function(object, linfct, robust = FALSE, cluster = NULL, conf.level = 0.95, to = NULL, from = NULL, df = lava.options()$df, ssc = lava.options()$ssc, ...){
+
+    return(effects2(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...), linfct = linfct, to = to, from = from, robust = robust, cluster = cluster, conf.level = conf.level))
+
+}
+
+## * effects2.lvmfit2
+#' @rdname effects2
+#' @export
+effects2.lvmfit2 <- function(object, linfct, robust = FALSE, cluster = NULL, conf.level = 0.95, to = NULL, from = NULL, ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+    object0 <- object
+    class(object0) <- setdiff(class(object0),"lvmfit2")
+        
+    ## ** identify path
+    if(!is.null(to) || !is.null(from)){
+        n.hypo <- 1
+
+        if(!missing(linfct)){
+            stop("Cannot specify argument \'linfct\' at the same time as argument \'from\' or \'to\'. \n")
+        }
+        e.effects <- effects(object0, from = from, to = to)
+        pathEffect <- stats::setNames(list(stats::setNames(list(e.effects$path), paste0(e.effects["to"],"~",e.effects["from"]))),paste0(e.effects["to"],"~",e.effects["from"]))
+        type <- "total"
+        null <- 0
+
+    }else{
+        n.hypo <- length(linfct)
+        pathEffect <- vector(mode = "list", length = n.hypo)
+        if(is.null(names(linfct))){
+            names(pathEffect) <- linfct
+        }else{
+            if(any(duplicated(names(linfct)))){
+                stop("Duplicated names for argument \'linfct\'. \n")
+            }
+            names(pathEffect) <- names(linfct)
+        }
+        type <- rep(as.character(NA), n.hypo)
+        null <- rep(as.numeric(NA), n.hypo)
+
+        for(iH in 1:n.hypo){
+
+            if(grepl("|",linfct[iH], fixed=TRUE)){
+                type[iH] <- base::trimws(strsplit(linfct[iH],split="|",fixed=TRUE)[[1]][2], which = "both")
+                type[iH] <- match.arg(type[iH], c("indirect","direct","total"))
+                linfct[iH] <- strsplit(linfct[iH],split="|",fixed=TRUE)[[1]][1]
+            }else{
+                type[iH] <- "total"
+            }
+            
+            ## extract left and right side of the equation
+            if(length(grep("=",linfct[iH]))>1){
+                stop("Each element of argument \'linfct\' should contain at most one \'=\' sign.\n",
+                     "Something like: coef1-2*coef2=0. \n")
+            }
+            iContrast <- createContrast(linfct[iH])
+            if(iContrast$null==0){
+                iContrast <- createContrast(linfct[iH], rowname.rhs = FALSE)
+            }
+
+            null[iH] <- unname(iContrast$null)
+            iLHS.hypo_factor <- as.double(iContrast$contrast)
+            iLHS.hypo_coef <- unname(colnames(iContrast$contrast))
+            iN.param <- length(iLHS.hypo_coef)
+
+            pathEffect[[iH]] <- stats::setNames(vector(mode = "list", length = iN.param), iLHS.hypo_coef)
+            attr(pathEffect[[iH]], "factor") <- iLHS.hypo_factor
+            
+            for(iCoef in 1:iN.param){ ## iCoef <- 1
+                ## extract all paths for each coefficient
+                pathEffect[[iH]][[iCoef]] <- effects(object0, stats::as.formula(iLHS.hypo_coef[[iCoef]]))$path
+                if(length(pathEffect[[iH]][[iCoef]])==0){
+                    stop("Could not find path relative to coefficient ",iLHS.hypo_coef[[iCoef]]," (linfct=",linfct[iH],"). \n")
+                }else if(type[iH]=="direct" && any(sapply(pathEffect[[iH]][[iCoef]],length)>2)){
+                    pathEffect[[iH]][[iCoef]] <- pathEffect[[iH]][[iCoef]][sapply(pathEffect[[iH]][[iCoef]],length)==2]
+                    if(length(pathEffect[[iH]][[iCoef]])==0){
+                        stop("Could not find direct path relative to coefficient ",iLHS.hypo_coef[[iCoef]]," (linfct=",linfct[iH],"). \n")
+                    }
+                }else if(type[iH]=="indirect" && any(sapply(pathEffect[[iH]][[iCoef]],length)>2)){
+                    pathEffect[[iH]][[iCoef]] <- pathEffect[[iH]][[iCoef]][sapply(pathEffect[[iH]][[iCoef]],length)>2]
+                    if(length(pathEffect[[iH]][[iCoef]])==0){
+                        stop("Could not find indirect path relative to coefficient ",iLHS.hypo_coef[[iCoef]]," (linfct=",linfct[iH],"). \n")
+                    }
+                }
+            }
+            
+        }
+    }
+
+    ## ** extract information
+    ## 0-order: param
+    object.param <- coef(object, as.lava = FALSE)
+    object.paramAll <- coef2(object, type = 9, labels = 1)[,"Estimate"]
+    name.param <- names(object.param)
+    n.param <- length(name.param)
+
+    ## 1-order: score
+    if(robust){
+        object.score <- score(object, cluster = cluster, as.lava = FALSE)
+    }
+
+    ## 2-order: variance covariance
+    object.vcov.param <- vcov(object, as.lava = FALSE)
+    if(robust){
+        object.rvcov.param <- vcov(object, robust = TRUE, cluster = cluster, as.lava = FALSE)
+    }
+    
+    test.df <- (object$sCorrect$df == "satterthwaite")
+    if(test.df){
+        object.dVcov.param <- object$sCorrect$dVcov.param
+
+        if(robust && (lava.options()$df.robust != 1)){
+
+            if(!is.null(cluster)){ ## update derivative according to cluster
+                object.dRvcov.param <- .dRvcov.param(score = object.score,
+                                                     hessian = hessian2(object, cluster = cluster),
+                                                     vcov.param = object.vcov.param,
+                                                     dVcov.param = object.dVcov.param,
+                                                     n.param = n.param,
+                                                     name.param = name.param)
+                                              
+            }else{
+                dRvcov.param <- object$sCorrect$dRvcov.param
+            }
+        }
+    }
+
+    coefEffect <- pathEffect
+
+    ## ** identify coefficients corresponding to path
+    for(iH in 1:n.hypo){ ## iH <- 1
+        for(iCoef in 1:iN.param){ ## iCoef <- 1
+            iN.path <- length(pathEffect[[iH]][[iCoef]])
+            for(iPath in 1:iN.path){ ## iPath <- 1
+                coefEffect[[iH]][[iCoef]][[iPath]] <- paste(pathEffect[[iH]][[iCoef]][[iPath]][-1], pathEffect[[iH]][[iCoef]][[iPath]][-length(pathEffect[[iH]][[iCoef]][[iPath]])], sep = lava.options()$symbols[1])
+                if(any(coefEffect[[iH]][[iCoef]][[iPath]] %in% name.param) == FALSE){
+                    stop("Incorrect path: ",paste(pathEffect[[iH]][[iCoef]][[iPath]], collapse="->"),"\n",
+                         "Could not find coefficient: \"",paste(coefEffect[[iH]][[iCoef]][[iPath]][coefEffect[[iH]][[iCoef]][[iPath]] %in% name.param == FALSE], collapse = "\" \""),"\".\n")
+                }
+            }
+        }
+    }
+
+    ## ** point estimate
+    vec.beta <- stats::setNames(rep(NA, length = n.hypo), names(pathEffect))
+    for(iH in 1:n.hypo){ ## iH <- 1
+        iValue.param <- lapply(coefEffect[[iH]], function(iCoef){ ## for each coefficient (e.g. Y~E1 - Y~E2 = 0)
+            iValue.path <- lapply(iCoef, function(iName){prod(object.paramAll[iName])}) ## get effect through each path corresponding to a coefficient (e.g. Y~E: Y~E and Y~X and X~E, i.e. \beta1 and \beta2*\beta3)
+            return(do.call("sum", iValue.path)) ## return total effect (e.g. \beta1 + \beta2*\beta3)
+        })
+        if(is.null(attr(coefEffect[[iH]],"factor"))){
+            vec.beta[iH] <- unlist(iValue.param)
+        }else{
+            vec.beta[iH] <- sum(attr(coefEffect[[iH]],"factor") * unlist(iValue.param))
+        }
+    }
+
+    ## ** variance
+    ## *** partial derivative
+    dbeta.dtheta <- matrix(NA, nrow = n.hypo, ncol = n.param, dimnames = list(names(pathEffect), name.param))
+    for(iH in 1:n.hypo){ ## iH <- 1
+        iValue.param <- lapply(coefEffect[[iH]], function(iCoef){  ## iCoef <- coefEffect[[iH]][[1]] ## for each coefficient (e.g. Y~E1 - Y~E2 = 0)
+            iDValue.path <- colSums(do.call(rbind,lapply(iCoef, function(iName){ ## iName <- iCoef[[1]] ## get derivative through each path corresponding to a coefficient (e.g. Y~E: Y~E and Y~X and X~E, i.e. \beta1 and \beta2*\beta3)
+                iDeriv <- stats::setNames(rep(0, n.param), name.param)
+                iDeriv[intersect(iName,name.param)] <- prod(object.paramAll[iName])/object.paramAll[intersect(iName,name.param)]
+                return(iDeriv)
+            })))
+        })
+        if(is.null(attr(coefEffect[[iH]],"factor"))){
+            dbeta.dtheta[iH,] <- iValue.param[[1]]
+        }else{
+            dbeta.dtheta[iH,] <- attr(coefEffect[[iH]],"factor") %*% do.call(rbind,iValue.param)
+        }   
+    }
+
+    if(robust){
+        Mvcov.beta <- dbeta.dtheta %*% object.rvcov.param %*% t(dbeta.dtheta)
+    }else{
+        Mvcov.beta <- dbeta.dtheta %*% object.vcov.param %*% t(dbeta.dtheta)
+    }
+
+    ## ** compute df
+    if(test.df){
+        vec.df <- dfSigma(contrast = dbeta.dtheta,
+                          score = object.score,
+                          vcov = object.vcov.param,
+                          rvcov = object.rvcov.param,
+                          dVcov = object.dVcov.param,
+                          dRvcov = object.dRvcov.param,
+                          keep.param = dimnames(object.dVcov.param)[[3]],                            
+                          type = if(robust){lava.options()$df.robust}else{1})
+    }else{
+        vec.df <- rep(0, n.hypo)
+    }
+
+    ## ** gather everything in glht object
+    linfct2 <- diag(1, ncol = n.hypo, nrow = n.hypo)
+    dimnames(linfct2) <- list(names(pathEffect),names(pathEffect))
+
+    out <- list(model = object,
+                linfct = linfct2,
+                rhs = null,
+                coef = vec.beta,
+                vcov = Mvcov.beta,
+                df = vec.df,
+                alternative = "two.sided",
+                type = NULL,
+                robust = robust,
+                ssc = object$sCorrect$ssc$type,
+                grad = dbeta.dtheta,
+                path = pathEffect,
+                global = NULL)
+    class(out) <- c("glht2","glht")
+
+    ## ** export
+    return(out)
+
+}
+
+## * effects.lvmfit2
+#' @rdname effects2
+#' @export
+effects.lvmfit2 <- effects2.lvmfit2
+
+######################################################################
+### effects2.R ends here
diff --git a/R/sCorrect-estimate2.R b/R/sCorrect-estimate2.R
new file mode 100644
index 0000000..5a7d1cd
--- /dev/null
+++ b/R/sCorrect-estimate2.R
@@ -0,0 +1,296 @@
+### sCorrect-estimate2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: jan  3 2018 (14:29) 
+## Version: 
+## Last-Updated: Jan 22 2022 (14:00) 
+##           By: Brice Ozenne
+##     Update #: 2168
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - estimate2
+#' @title  Satterthwaite Correction and Small Sample Correction
+#' @description Correct the bias of the ML estimate of the variance and compute the first derivative of the information matrix.
+#' @name estimate2
+#'
+#' @param object a \code{lvm} object.
+#' @param param [numeric vector, optional] the values of the parameters at which to perform the correction.
+#' @param data [data.frame, optional] the dataset relative to which the correction should be performed.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param tol.max [numeric >0] the largest acceptable absolute difference between two succesive estimates of the bias correction.
+#' @param iter.max [integer >0] the maximum number of iterations used to estimate the bias correction.
+#' @param derivative [character] should the first derivative of the information matrix be computed using a formula (\code{"analytic"}) or numerical derivative (\code{"numeric"})?
+#' @param hessian [logical] should the hessian be stored? Can be \code{NULL} to indicate only if computed during the small sample correction.
+#' @param dVcov.robust [logical] should the first derivative of robust variance-covariance matrix be stored?
+#' @param trace [logical] should the execution of the function be traced.
+#' @param ...  arguments passed to \code{lava::estimate} when using a \code{lvm} object.
+#'
+#' @details The argument \code{value} is equivalent to the argument \code{bias.correct} of the function \code{summary2}.
+#' 
+#' @concept estimator
+#' @keywords smallSampleCorrection
+#' 
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' dW <- sampleRepeated(10, format = "wide")
+#' 
+#' #### latent variable model ####
+#' m.lvm <- lvm(Y1~X1+X2+Z1)
+#'
+#' e2.lvm <- estimate2(m.lvm, data = dW)
+#' summary2(e2.lvm)
+#' 
+#' @export
+`estimate2` <-
+    function(object, param, data,
+             ssc, df,
+             derivative, hessian, dVcov.robust,
+             iter.max, tol.max, trace , ...) UseMethod("estimate2")
+
+
+## * estimate2.lvm
+#' @rdname estimate2
+#' @export
+estimate2.lvm <- function(object, param = NULL, data = NULL,
+                          ssc = lava.options()$ssc, df = lava.options()$df,
+                          derivative = "analytic", hessian = FALSE, dVcov.robust = FALSE,
+                          iter.max = 100, tol.max = 1e-6, trace = 0, ...){
+
+    if(!is.null(param)){
+        warning("Argument \'param\' not used with lvm objects. \n")
+    }
+    
+    out <- lava::estimate(x = object, data = data, ...)
+    return(estimate2(out, param = NULL, data = NULL,
+                     ssc = ssc, df = df,
+                     derivative = derivative, hessian = hessian, dVcov.robust = FALSE,
+                     iter.max = iter.max, tol.max = tol.max, trace = trace))
+
+}
+
+## * estimate2.lvmfit
+#' @rdname estimate2
+#' @export
+estimate2.lvmfit <- function(object, param = NULL, data = NULL,
+                             ssc = lava.options()$ssc, df = lava.options()$df,
+                             derivative = "analytic", hessian = FALSE, dVcov.robust = FALSE,
+                             iter.max = 100, tol.max = 1e-6, trace = 0, ...){
+
+    ## ** preliminary tests
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    if("multigroupfit" %in% class(object)){
+        stop("estimate2 cannot handle multigroup models \n")
+    }
+
+    if(inherits(object,"lvmfit") && length(object$model$attributes$ordinal)>0){
+        name.t <- names(object$model$attributes$ordinal)
+        stop("estimate2 does not handle ordinal variables \n",
+             "ordinal variable(s): \"",paste(name.t, collapse = "\" \""),"\"\n")
+    }
+    
+    if(inherits(object,"lvmfit") && length(object$model$attributes$transform)>0){
+        name.t <- names(object$model$attributes$transform)
+        stop("estimate2 does not handle transformed variables \n",
+             "transformed variable(s): \"",paste(name.t, collapse = "\" \""),"\"\n")
+    }
+
+    ## arguments
+    if(identical(ssc,FALSE) || identical(ssc,NA)){
+        ssc <- "none"
+    }
+    if(identical(df,FALSE) || identical(df,NA)){
+        df <- "none"
+    }
+    ssc <- match.arg(tolower(ssc), c("none","residuals","residuals0","cox"))
+    df <- match.arg(tolower(df), c("none","satterthwaite"))
+
+    if(df %in% "satterthwaite" || ssc %in% "cox"){
+        second.order <- TRUE
+    }else if(ssc %in% c("residuals","residuals0")){
+        second.order <- FALSE
+    }else{
+        second.order <- FALSE
+    }
+
+    ## ** initialize object
+    if(trace>0){cat("Initialization:")}
+    object$sCorrect <- moments2(object, data = data, param = param, Psi = NULL,
+                                initialize = TRUE, usefit = TRUE,
+                                score = TRUE, information = TRUE, hessian = hessian, vcov = TRUE,
+                                dVcov = (ssc == "cox")  || (ssc == "none" && df == "satterthwaite"), dVcov.robust = (ssc == "none" && df == "satterthwaite" && dVcov.robust),
+                                residuals = TRUE, leverage = FALSE, derivative = derivative)  ## setting leverage to FALSE is like initialization to 0
+    if(trace>0){cat(" done \n")}
+
+    ## ** bias correction    
+    if(ssc != "none"){
+
+        ## *** initialize bias correction
+        if(trace>0){cat("Initialize bias correction \n")}
+        if(ssc=="Cox"){
+            object.ssc <- list(type = "Cox",
+                               param0 = object$sCorrect$param,
+                               Omega0 = object$sCorrect$moment$Omega)
+        }else if(ssc %in% c("residuals","residuals0")){
+            object.ssc <- .init_sscResiduals(object)
+        }
+        
+        ## *** perform bias correction
+        if(trace>0){cat("Perform bias correction \n")}
+        iCV <- FALSE
+        iIter <- 0
+        iTol <- Inf
+        iiParam <- object$sCorrect$param
+        ## cat(iTol," (",iiParam,") \n")
+            
+        while(iCV == FALSE && iIter < iter.max){
+            if(trace>0){cat("*")}
+
+            ## bias correction
+            if(ssc == "Cox"){
+                iParam <- .sscCoxSnell(object, ssc = object.ssc)
+                object.ssc$JJK <- attr(iParam,"JJK")
+                object.ssc$lm <- attr(iParam,"lm")
+                object.ssc$Psi <- object$sCorrect$moment$Omega - object.ssc$Omega0
+            }else if(ssc %in% c("residuals","residuals0")){
+                iParam <- .sscResiduals(object, ssc = object.ssc)
+                object.ssc$Omega <- attr(iParam,"Omega")
+                object.ssc$Psi <- attr(iParam,"Psi")
+                ## use previous Omega to compute leverage and residuals
+                attr(object.ssc$Omega,"Omega.leverage") <- object$sCorrect$moment$Omega
+                attr(object.ssc$Omega,"dOmega.leverage") <- object$sCorrect$dmoment$dOmega
+                attr(object.ssc$Omega,"Omega.residuals") <- object$sCorrect$moment$Omega
+            }
+            ## object.ssc$Omega0 + object.ssc$Psi - object.ssc$Omega
+
+            ## cv criteria
+            iIter <- iIter + 1
+            iTol <- max(abs(iParam-iiParam))
+            ## cat(iTol," (",iParam,") \n")
+            iiParam <- iParam
+            iCV <- iTol <= tol.max
+
+            ## update moments
+            ## if ssc=="residuals0" then do not rescale the residuals according the the bias
+            if(iCV==FALSE && iIter < iter.max){
+                object$sCorrect <- moments2(object, param = iParam, Psi = if(ssc!="residuals0"){object.ssc$Psi}else{NULL}, Omega = object.ssc$Omega, 
+                                            initialize = FALSE, usefit = TRUE,
+                                            score = TRUE, information = TRUE, hessian = FALSE, vcov = TRUE,
+                                            dVcov = (ssc == "cox"), dVcov.robust = FALSE,
+                                            residuals = TRUE, leverage = TRUE, derivative = derivative)
+            }else{
+                object$sCorrect <- moments2(object, param = iParam, Psi = if(ssc!="residuals0"){object.ssc$Psi}else{NULL}, Omega = object.ssc$Omega, 
+                                            initialize = FALSE, usefit = TRUE,
+                                            score = TRUE, information = TRUE, hessian = hessian, vcov = TRUE,
+                                            dVcov = (df == "satterthwaite"), dVcov.robust = dVcov.robust,
+                                            residuals = TRUE, leverage = TRUE, derivative = derivative)
+
+                object$sCorrect$ssc <- c(object.ssc,
+                                         cv = iCV,
+                                         iter = iIter,
+                                         tol = iTol,
+                                         iter.max = iter.max,
+                                         tol.max = tol.max
+                                         )
+                
+            }
+        }
+        
+        ## *** assess convergence
+        if(iCV == FALSE){
+            warning("small sample correction did not reach convergence after ",iIter," iteration",if(iIter>1){"s"}else{""},". \n")
+        }
+        if(trace > 0){
+            cat("\n")
+        }
+
+    }else{
+        object$sCorrect$ssc$type <- "none"
+    }
+
+    ## ** degrees of freedom    
+    object$sCorrect$df <- df ## degrees of freedom are computed later (by compare2)
+    
+    ## ** restaure original param order
+    name.param <- object$sCorrect$name.param
+    if(!is.null(name.param)){
+        object$sCorrect$param <- object$sCorrect$param[name.param]
+        names(object$sCorrect$param) <- names(name.param)
+
+        if(!is.null(object$sCorrect$score)){
+            object$sCorrect$score <- object$sCorrect$score[,name.param,drop=FALSE]
+            colnames(object$sCorrect$score) <- names(name.param)
+        }
+        if(!is.null(object$sCorrect$information)){
+            object$sCorrect$information <- object$sCorrect$information[name.param,name.param,drop=FALSE]
+            dimnames(object$sCorrect$information) <- list(names(name.param),names(name.param))
+        }
+        if(!is.null(object$sCorrect$vcov.param)){
+            object$sCorrect$vcov.param <- object$sCorrect$vcov.param[name.param,name.param,drop=FALSE]
+            dimnames(object$sCorrect$vcov.param) <- list(names(name.param),names(name.param))
+        }
+        if(!is.null(object$sCorrect$hessian)){
+            object$sCorrect$hessian <- object$sCorrect$hessian[name.param,name.param,,drop=FALSE]
+            dimnames(object$sCorrect$hessian) <- list(names(name.param),names(name.param),NULL)
+        }
+        if(!is.null(object$sCorrect$dInformation)){
+            object$sCorrect$dInformation <- object$sCorrect$dInformation[name.param,name.param,name.param,drop=FALSE]
+            dimnames(object$sCorrect$dInformation) <- list(names(name.param),names(name.param),names(name.param))
+        }
+        if(!is.null(object$sCorrect$dVcov.param)){
+            object$sCorrect$dVcov.param <- object$sCorrect$dVcov.param[name.param,name.param,name.param,drop=FALSE]
+            dimnames(object$sCorrect$dVcov.param) <- list(names(name.param),names(name.param),names(name.param))
+        }
+        if(!is.null(object$sCorrect$dRvcov.param)){
+            object$sCorrect$dRvcov.param <- object$sCorrect$dRvcov.param[name.param,name.param,name.param,drop=FALSE]
+            dimnames(object$sCorrect$dRvcov.param) <- list(names(name.param),names(name.param),names(name.param))
+        }
+    }
+    
+    ## ** export
+    class(object) <- append("lvmfit2",class(object))
+    return(object)    
+}
+
+## * estimate2.list
+#' @rdname estimate2
+#' @export
+estimate2.list <- function(object, ...){
+    object.class <- class(object)
+    object <- lapply(object, estimate2, ...)
+    class(object) <- object.class
+    return(object)
+}
+
+## * estimate2.mmm
+#' @rdname estimate2
+#' @export
+estimate2.mmm <- estimate2.list
+
+##----------------------------------------------------------------------
+### sCorrect.R ends here
+
+
+
+
+
+
+
+
+
diff --git a/R/sCorrect-extractData.R b/R/sCorrect-extractData.R
new file mode 100644
index 0000000..d1c08b3
--- /dev/null
+++ b/R/sCorrect-extractData.R
@@ -0,0 +1,88 @@
+## * Documentation
+#' @title Extract Data From a Latent Variable Model
+#' @description Extract data from a latent variable model.
+#' @name extractData
+#' 
+#' @param object the fitted model.
+#' @param design.matrix [logical] should the data be extracted after transformation (e.g. conversion of categorical variables to dummy variables)?
+#' Otherwise the original data will be returned.
+#' @param as.data.frame [logical] should the output be converted into a \code{data.frame} object?
+#' @param rm.na [logical] should the lines containing missing values in the dataset be removed?
+#' @param envir [environment] the environment from which to search the data.
+#'
+#' @return a dataset.
+#' @concept extractor
+#' @export
+`extractData` <-
+    function(object, design.matrix, as.data.frame, envir, rm.na){
+        UseMethod("extractData", object)
+    }
+
+## * Example
+#' @rdname extractData
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' n <- 101
+#'
+#' Y1 <- rnorm(n, mean = 0)
+#' Y2 <- rnorm(n, mean = 0.3)
+#' Id <- findInterval(runif(n), seq(0.1,1,0.1))
+#' data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
+#'            data.frame(Y=Y2,G="2",Id = Id)       
+#'            )
+#'
+#' #### latent variable model ####
+#' library(lava)
+#' e.lvm <- estimate(lvm(Y ~ G), data = data.df)
+#' extractData(e.lvm)
+#' extractData(e.lvm, design.matrix = TRUE)
+#' 
+
+
+## * extractData.lvmfit
+#' @export
+extractData.lvmfit <- function(object, design.matrix = FALSE, as.data.frame = TRUE,
+                               envir = environment(), rm.na = TRUE){
+    ## ** check arguments
+    if(!is.logical(design.matrix)){
+        stop("Argument \'design.matrix\' must be of type logical")
+    }
+    if(!is.logical(as.data.frame)){
+        stop("Argument \'as.data.frame\' must be of type logical")
+    }
+
+    ## ** extract data
+    data <- object$data$model.frame
+    if(!inherits(data, "data.frame")){
+        data <- as.data.frame(data)
+    }
+
+    if(design.matrix){
+        keep.cols <- intersect(c("(Intercept)",lava::vars(object)), names(data))
+        data <- data[,keep.cols,drop=FALSE]
+    }
+
+    ## ** normalize data
+    if(as.data.frame){
+        data <- as.data.frame(data)        
+    }
+
+    ## ** remove missing values relative to the exogenous variables
+    test.na <- rowSums(is.na(data[,lava::exogenous(object),drop=FALSE]))
+    if(rm.na == TRUE && any(test.na>0)){ ## remove rows corresponding to missing values
+        if(!inherits(object,"lvm.missing")){
+            data <- data[setdiff(1:NROW(data),which(test.na>0)),,drop=FALSE]
+        }else{
+            test.na <- rowSums(is.na(data[,exogenous(object)])) > 0
+            data <- data[setdiff(1:NROW(data),which(test.na>0)),,drop=FALSE]
+            warnings("Missing values in the exogenous variables \n",
+                     "May not extract the appropriate dataset \n")
+        }
+    }
+
+    ## ** export
+    return(data)
+    
+}
+
diff --git a/R/sCorrect-getGroups2.R b/R/sCorrect-getGroups2.R
new file mode 100644
index 0000000..52c6b58
--- /dev/null
+++ b/R/sCorrect-getGroups2.R
@@ -0,0 +1,95 @@
+### sCorrect-getGroups2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 18 2019 (10:58) 
+## Version: 
+## Last-Updated: Jan 17 2022 (18:43) 
+##           By: Brice Ozenne
+##     Update #: 170
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Reconstruct the Cluster variable
+#' @description Reconstruct the cluster variable.
+#' Similar to \code{nlme::getGroups}.
+#' @noRd
+#'
+#' @param object a \code{lvmfit} object.
+#' @param data dataset.
+#' @param index.Omega  [list] for each cluster, the position of the observed endogenous variables (i.e. how to subset the residual variance-covariance matrix).
+#' @param endogenous [character vector] name of the endogenous variables.
+#' @param ... [internal] Only used by the generic method.
+#'  
+#' @return A list containing:
+#' \itemize{
+#' \item index.cluster: the cluster index for each observation.
+#' \item name.cluster: a unique identifier for each cluster.
+#' \item n.cluster: the number of clusters.
+#' }
+#' 
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' dW <- sampleRepeated(10, format = "wide")
+#' set.seed(10)
+#' dL <- sampleRepeated(10, format = "long")
+#' dL$time2 <- paste0("visit",dL$time)
+#' 
+#' #### latent variable model ####
+#' e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+#' .getGroups2(e.lvm, data = dW)
+#' 
+#' @concept extractor
+#' @keywords internal
+`.getGroups2` <-
+    function(object, data, index.Omega, endogenous) UseMethod(".getGroups2")
+
+## * .getGroups2.lvm
+.getGroups2.lvm <- function(object, data = NULL, index.Omega = NULL, endogenous = NULL){
+    if(is.null(data)){
+        data <- extractData(object)
+    }
+    if(is.null(index.Omega)){
+        index.Omega <- .getIndexOmega(object, data = data)
+    }
+    if(is.null(endogenous)){
+        endogenous <- lava::endogenous(object)
+    }
+    n.endogenous <- length(endogenous)
+
+    ## ** find clusters
+    n.cluster <- NROW(data)
+    name.cluster <- 1:n.cluster
+    missing <- any(is.na(index.Omega))
+    index.cluster <- unlist(lapply(name.cluster, rep, times = n.endogenous))
+
+    index.Omega <- tapply(index.Omega, index.cluster, function(iVec){list(stats::na.omit(iVec))})
+    Uindex.Omega <- unique(index.Omega[sapply(index.Omega,length)>0])
+    return(list(index.cluster = index.cluster,
+                name.cluster = name.cluster,
+                n.cluster = n.cluster,
+                index.Omega = index.Omega,
+                index2endogenous = stats::setNames(as.list(Uindex.Omega),Uindex.Omega)
+                ))
+    
+}
+
+## * .getGroups2.lvmfit
+.getGroups2.lvmfit <- .getGroups2.lvm
+
+
+
+
+
+
+
+
+######################################################################
+### sCorrect-getGroups2.R ends here
diff --git a/R/sCorrect-getIndexOmega.R b/R/sCorrect-getIndexOmega.R
new file mode 100644
index 0000000..d4b7f62
--- /dev/null
+++ b/R/sCorrect-getIndexOmega.R
@@ -0,0 +1,85 @@
+### sCorrect-getIndexOmega.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 25 2019 (10:52) 
+## Version: 
+## Last-Updated: jan 17 2022 (14:26) 
+##           By: Brice Ozenne
+##     Update #: 103
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Identify the Endogenous Variables
+#' @description Identify the endogenous variables, i.e., returns a vector with length the number of observations,
+#' whose values are the index of the repetitions.
+#' @name getIndexOmega
+#'
+#' @param object a \code{lvmfit} object.
+#' @param data dataset.
+#' @param ... [internal] Only used by the generic method.
+#'  
+#' @concept extractor
+#' @keywords internal
+`.getIndexOmega` <-
+    function(object, data, ...) UseMethod(".getIndexOmega")
+
+## * Examples
+#' @examples
+#' \dontrun{
+#' #### simulate data ####
+#' set.seed(10)
+#' dW <- sampleRepeated(10, format = "wide")
+#' set.seed(10)
+#' dL <- sampleRepeated(10, format = "long")
+#' dL$time2 <- paste0("visit",dL$time)
+#' 
+#' #### lvm model ####
+#' e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+#' ## lavaSearch2:::.getIndexOmega(e.lvm, data = dW)
+#' }
+
+## * .getIndexOmega.lvm
+#' @rdname getIndexOmega
+.getIndexOmega.lvm <- function(object, data, ...){
+
+    ## ** check missing value in exogenous variables
+    name.exogenous <- exogenous(object)
+    missing.var <- name.exogenous[name.exogenous %in% names(data) == FALSE]
+
+    if(length(missing.var)>0){
+        cat2bin <- var2dummy(object$model, var = names(data), data = data)
+        name.exogenous[name.exogenous %in% missing.var] <- names(cat2bin)[cat2bin %in% missing.var]
+        name.exogenous <- unique(name.exogenous)
+    }
+    test.na <- Reduce("+",lapply(name.exogenous, function(iCol){is.na(data[[iCol]])}))
+    if(any(test.na>0)){
+        stop("Does not support missing values in exogenous variables. \n",
+             "Consider removing the corresponding rows in the dataset. \n")
+    }
+
+    ## ** index.Omega
+    n.obs <- NROW(data)
+    name.endogenous <- endogenous(object)
+    n.endogenous <- length(name.endogenous)
+
+    M.index <- matrix(1:n.endogenous, nrow = n.obs, ncol = n.endogenous, byrow = TRUE)
+    index.na <- which(is.na(as.data.frame(data)[,name.endogenous]))
+    if(length(index.na)>0){
+        M.index[index.na] <- NA
+    }
+    return(as.double(t(M.index)))
+}
+
+## * .getIndexOmega.lvmfit
+#' @rdname getIndexOmega
+.getIndexOmega.lvmfit <- .getIndexOmega.lvm
+
+######################################################################
+### sCorrect-getIndexOmega.R ends here
diff --git a/R/sCorrect-getVarCov2.R b/R/sCorrect-getVarCov2.R
new file mode 100644
index 0000000..033ba4a
--- /dev/null
+++ b/R/sCorrect-getVarCov2.R
@@ -0,0 +1,83 @@
+### sCorrect-getVarCov2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 18 2019 (11:00) 
+## Version: 
+## Last-Updated: Jan 12 2022 (16:32) 
+##           By: Brice Ozenne
+##     Update #: 84
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * getVarCov2
+#' @title Residual Variance-Covariance Matrix With Small Sample Correction.
+#' @description Reconstruct the residual variance-covariance matrix from a latent variable model. 
+#' It is similar to \code{nlme::getVarCov} but with small sample correction.
+#' @name getVarCov2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#' 
+#' @return A matrix with as many rows and column as the number of endogenous variables
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the residuals.
+#' 
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' n <- 101
+#'
+#' Y1 <- rnorm(n, mean = 0)
+#' Y2 <- rnorm(n, mean = 0.3)
+#' Id <- findInterval(runif(n), seq(0.1,1,0.1))
+#' data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
+#'            data.frame(Y=Y2,G="2",Id = Id)
+#'            )
+#'
+#' #### latent variable models ####
+#' library(lava)
+#' e.lvm <- estimate(lvm(Y ~ G), data = data.df)
+#' getVarCov2(e.lvm)
+#' 
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`getVarCov2` <-
+    function(object, ...) UseMethod("getVarCov2")
+
+## * getVarCov2.lvmfit
+#' @rdname getVarCov2
+#' @export
+getVarCov2.lvmfit <- function(object, ssc = lava.options()$ssc, ...){
+
+    return(getVarCov2(estimate2(object, ssc = ssc, ...)))
+
+}
+
+## * getVarCov2.lvmfit2
+#' @rdname getVarCov2
+#' @export
+getVarCov2.lvmfit2 <- function(object, ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+    
+
+    Omega <- object$sCorrect$moment$Omega
+    attr(Omega, "detail") <- NULL
+    return(Omega)
+    
+}
+
+######################################################################
+### sCorrect-getVarCov2.R ends here
diff --git a/R/sCorrect-glht2.R b/R/sCorrect-glht2.R
new file mode 100644
index 0000000..ee42eee
--- /dev/null
+++ b/R/sCorrect-glht2.R
@@ -0,0 +1,381 @@
+### sCorrect-glht2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 29 2017 (12:56) 
+## Version: 
+## Last-Updated: apr 11 2023 (10:50) 
+##           By: Brice Ozenne
+##     Update #: 810
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## User 
+### Code:
+
+## * Documentation - glht2
+#' @title General Linear Hypothesis Testing With Small Sample Correction
+#' @description Test linear hypotheses on coefficients from a latent variable models with small sample corrections.
+#' @name glht2
+#' 
+#' @param object,model a \code{lvmfit}, \code{lvmfit2}, or \code{mmm} object.
+#' @param linfct [matrix or vector of character] the linear hypotheses to be tested. Same as the argument \code{par} of \code{\link{createContrast}}.
+#' @param rhs [vector] the right hand side of the linear hypotheses to be tested.
+#' @param robust [logical] should robust standard error be used? 
+#' Otherwise rescale the influence function with the standard error obtained from the information matrix.
+#' @param cluster  [integer vector] the grouping variable relative to which the observations are iid.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... [logical] arguments passed to lower level methods.
+#'
+#' @details
+#' Whenever the argument linfct is not a matrix, it is passed to the function \code{createContrast} to generate the contrast matrix and, if not specified, rhs. \cr \cr
+#'
+#' Since only one degree of freedom can be specify in a glht object and it must be an integer, the degree of freedom of the denominator of an F test simultaneously testing all hypotheses is retained, after rounding. \cr \cr
+#'
+#' Argument rhs and null are equivalent.
+#' This redondance enable compatibility between \code{lava::compare}, \code{compare2}, \code{multcomp::glht}, and \code{glht2}.
+#' @return A \code{glht} object.
+#' 
+#' @seealso
+#' \code{\link{createContrast}} to create contrast matrices. \cr
+#' \code{\link{estimate2}} to pre-compute quantities for the small sample correction.
+#' 
+#' @concept multiple comparisons
+#'
+#' @examples
+#' library(multcomp)
+#' 
+#' ## Simulate data
+#' mSim <- lvm(c(Y1,Y2,Y3)~ beta * eta, Z1 ~ E, Z2 ~ E, Age[40:5]~1)
+#' latent(mSim) <- "eta"
+#' set.seed(10)
+#' n <- 1e2
+#'
+#' df.data <- lava::sim(mSim, n, latent = FALSE, p = c(beta = 1))
+#'
+#' #### Inference on a single model ####
+#' e.lvm <- estimate(lvm(Y1~E), data = df.data)
+#' summary(glht2(e.lvm, linfct = c("Y1~E + Y1","Y1")))
+#' 
+#' #### Inference on separate models ####
+#' ## fit separate models
+#' lvmX <- estimate(lvm(Z1 ~ E), data = df.data)
+#' lvmY <- estimate(lvm(Z2 ~ E + Age), data = df.data)
+#' lvmZ <- estimate(lvm(c(Y1,Y2,Y3) ~ eta, eta ~ E), 
+#'                  data = df.data)
+#'
+#' #### create mmm object #### 
+#' e.mmm <- mmm(X = lvmX, Y = lvmY, Z = lvmZ)
+#'
+#' #### create contrast matrix ####
+#' resC <- createContrast(e.mmm, linfct = "E")
+#'
+#' #### adjust for multiple comparisons ####
+#' e.glht2 <- glht2(e.mmm, linfct = c(X="E"), df = FALSE)
+#' summary(e.glht2)
+#'
+#' @concept multiple comparison
+#' @export
+`glht2` <-
+    function(object, ...) UseMethod("glht2")
+
+
+## * glht2.lvmfit
+#' @rdname glht2
+#' @export
+glht2.lvmfit <- function(object, linfct, rhs = NULL, robust = FALSE, cluster = NULL, ssc = lava.options()$ssc, df = lava.options()$df, ...){
+    return(glht2(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...), linfct = linfct, rhs = rhs, robust = robust, cluster = cluster))
+
+}
+
+## * glht2.lvmfit2
+#' @rdname glht2
+#' @export
+glht2.lvmfit2 <- function(object, linfct, rhs = NULL,
+                          robust = FALSE, cluster = NULL,
+                          ...){
+
+    out <- compare2(object, linfct = linfct, rhs = rhs,
+                    robust = robust, cluster = cluster,
+                    as.lava = FALSE, F.test = FALSE, ...)
+        
+    return(out)
+}
+
+
+## * glht2.mmm
+#' @rdname glht2
+#' @export
+glht2.mmm <- function (object, linfct, rhs = 0,
+                       robust = FALSE, cluster = NULL,
+                       ...){
+
+    ## ** check the class of each model
+    n.object <- length(object)
+    name.object <- names(object)    
+    if(is.null(name.object)){
+        stop("Argument \'object\' must be named list. \n")
+    }
+
+    test.lvmfit <- sapply(object, inherits, what = "lvmfit")
+    if(any(test.lvmfit == 0)){
+        index.wrong <- which(test.lvmfit == 0)
+        stop("Argument \'object\' must be a list of objects that inherits from lvmfit. \n",
+             "Incorrect element(s): ",paste(index.wrong, collapse = " "),".\n")
+    }
+    test.lvmfit2 <- sapply(object, inherits, what = "lvmfit2")
+    if(any(test.lvmfit2 == 0)){
+        for(iO in which(test.lvmfit2==0)){
+            object[[iO]] <- estimate2(object[[iO]], dVcov.robust = robust, ...)
+        }
+    }
+    
+    ## ** define the contrast matrix
+    out <- list()
+    if (is.character(linfct)){
+        resC <- createContrast(object, linfct = linfct, rowname.rhs = FALSE)
+        linfct <- resC$contrast
+        ls.contrast <- resC$mlf
+        if("rhs" %in% names(match.call()) == FALSE){
+            rhs <- resC$null
+        }
+    }else if(is.matrix(linfct)){
+        ls.contrast <- lapply(name.object, function(x){ ## x <- name.object[2]
+            iColnames <- grep(paste0("^",x,": "), colnames(linfct), value = FALSE, fixed = FALSE)
+            iRownames <- rowSums(linfct[,iColnames]!=0)>0
+            linfct[iRownames, iColnames,drop=FALSE]            
+        })
+        names(ls.contrast) <- name.object
+        contrast <- linfct
+        if("rhs" %in% names(match.call()) == FALSE){ ## left rhs to default value
+            rhs <- rep(0, NROW(contrast))
+        }else if(length(rhs)!=NROW(contrast)){
+            stop("mismatch between the dimensions of argument \'rhs\' and argument \'contrast\' \n")
+        }
+    }else{
+        stop("Argument \'linfct\' must be a matrix or a vector of characters. \n",
+             "Consider using  out <- createContrast(...) and pass out$contrast to linfct. \n")
+    }
+
+    ## ** check whether it is possible to compute df
+    ## i.e. are linear hypothesis model specific?
+    test.df <- all(unlist(lapply(object, function(iModel){iModel$sCorrect$df == "satterthwaite"})))
+    if(test.df){
+        n.hypo <- NROW(linfct)
+        ls.modelPerTest <- lapply(1:n.hypo, function(iHypo){ ## iHypo <- 1
+            iContrast <- linfct[iHypo,]
+            iNames <- names(iContrast)[abs(iContrast)>0]
+            iModels <- unlist(lapply(strsplit(iNames, split = ":"),"[[",1))
+            return(length(unique(iModels)))
+        })
+        
+        if(any(unlist(ls.modelPerTest)>1)){
+            stop("Cannot compute the degrees of freedom for tests performed across several models \n",
+                 "Consider setting the argument \'df\' to FALSE \n")
+        }    
+    }
+
+    ## ** Total number of observations
+    if(!is.null(cluster)){
+        ls.cluster <- lapply(object, function(iO){extractData(iO, rm.na = FALSE)[[cluster]]})
+        Ucluster <- unique(unlist(ls.cluster))
+        n.cluster <- length(Ucluster)
+    }
+    
+    ## ** Extract influence functions from all models
+    ls.res <- lapply(1:n.object, function(iM){ ## iM <- 1
+
+        ## *** Pre-compute quantities
+        if(!inherits(object[[iM]],"lvmfit2")){
+            object[[iM]] <- estimate2(object[[iM]], ...)
+        }
+        out$param <- coef(object[[iM]], as.lava = FALSE)
+        name.param <- names(out$param)
+        name.object.param <- paste0(name.object[iM],": ",name.param)
+        out$param <- stats::setNames(out$param, name.object.param)
+        
+        ## *** Compute df for each test
+        if(!is.na(object[[iM]]$sCorrect$df)){
+            ## here null does not matter since we only extract the degrees of freedom
+            iContrast <- ls.contrast[[iM]]
+            colnames(iContrast) <- name.param
+            iWald <- compare2(object[[iM]], linfct = iContrast, as.lava = FALSE, F.test = FALSE)
+            out$df <- iWald$df
+        }else{
+            out$df <- Inf
+        }
+        ## *** get iid decomposition
+        iid.tempo <- iid(object[[iM]], robust = robust, cluster = cluster, as.lava = FALSE)
+        if(!is.null(cluster)){
+            out$iid <- matrix(NA, nrow = n.cluster, ncol = length(name.param),
+                              dimnames = list(Ucluster, name.param))
+            out$iid[attr(iid.tempo,"cluster"),] <- iid.tempo
+        }else{
+            out$iid <- iid.tempo
+        }
+        colnames(out$iid) <- name.object.param
+
+        ## *** get se
+        if(robust){
+            out$se <- sqrt(diag(crossprod(iid.tempo)))
+        }else{
+            out$se <- sqrt(diag(vcov(object[[iM]], as.lava = FALSE)))
+        }
+        return(out)
+        
+    })
+    seq.df <- unlist(lapply(ls.res,"[[","df"))
+    seq.param <- unlist(lapply(ls.res,"[[","param"))
+
+    if(test.df){
+        df.global <- round(stats::median(seq.df), digits = 0)
+    }else{
+        df.global <- 0
+    }
+    ls.iid <- lapply(ls.res,"[[","iid")
+    ls.se <- lapply(ls.res,"[[","se")
+    n.obs <- unique(unlist(lapply(ls.iid, NROW)))
+    if(length(n.obs)>1){
+        stop("Mismatch between the number of observations in the iid \n",
+             "Likely to be due to the presence of missing values \n",
+             "Consider specifying the \'cluster\' argument \n")
+    }
+    M.iid <- do.call(cbind,ls.iid)
+    diag.se <- diag(do.call(c,ls.se))
+    if(any(is.na(M.iid))){
+       M.iid[is.na(M.iid)] <- 0
+    }
+    vcov.object <- diag.se %*% stats::cov2cor(crossprod(M.iid)) %*% diag.se ## same as multcomp:::vcov.mmm
+    dimnames(vcov.object) <- list(colnames(M.iid), colnames(M.iid))
+    
+    ## ** sanity check
+    name.param <- names(seq.param)
+    if(!identical(colnames(linfct),name.param)){
+        stop("Column names of the contrast matrix does not match the one of the coefficients \n")
+    }
+    if(!identical(colnames(vcov.object),name.param)){
+        stop("Column names of the variance covariance matrix does not match the one of the coefficients \n")
+    }
+    if(!identical(rownames(vcov.object),name.param)){
+        stop("Rownames names of the variance covariance matrix does not match the one of the coefficients \n")
+    
+    }
+
+    ## ** convert to the appropriate format    
+    out <- list(model = object,
+                linfct = linfct,
+                rhs = unname(rhs),
+                coef = seq.param,
+                vcov = vcov.object,
+                df = df.global,
+                alternative = "two.sided",
+                type = NULL,
+                robust = robust)
+    class(out) <- c("glht2","glht")
+        
+    ### ** export
+    return(out)    
+}
+
+
+## * glht.lvmfit2
+#' @rdname glht2
+#' @export
+glht.lvmfit2 <- function(model, linfct, rhs = NULL,
+                         robust = FALSE, cluster = NULL,
+                         ...){
+
+    out <- compare2(model, linfct = linfct, rhs = rhs,
+                    robust = robust, cluster = cluster,
+                    as.lava = FALSE, F.test = FALSE, ...)
+        
+    return(out)
+}
+
+## * .calcClosure
+.calcClosure <- function(name, estimate, covariance, type, df){
+
+    n.hypo <- length(name)
+    correlation <- stats::cov2cor(covariance)
+
+    ## ** create all possible hypotheses
+    ls.closure <- lapply(n.hypo:1, function(iNtest){ ## iNtest <- 1  
+        iList <- list(M = utils::combn(name, m = iNtest))
+        iList$vec <- apply(iList$M, 2, paste, collapse = ",")
+        return(iList)
+    })
+
+    ## ** compute all p.values
+    for(iLevel in 1:length(ls.closure)){ ## iLevel <- 1
+        ls.closure[[iLevel]]$test <- t(apply(ls.closure[[iLevel]]$M, 2, function(iHypo){
+            index <- which(name %in% iHypo)
+            if(type == "chisq"){
+                return(.ChisqTest(estimate[index], covariance = covariance[index,index,drop=FALSE], df = df))
+            }else if(type == "max"){
+                return(.tTest(estimate[index],
+                              covariance = covariance[index,index,drop=FALSE],
+                              correlation = correlation[index,index,drop=FALSE], df = df))
+            }
+        }))
+        rownames(ls.closure[[iLevel]]$test) <- ls.closure[[iLevel]]$vec
+    }
+    
+    ## ** find all hypotheses in the closure related to an individual hypothesis
+    ls.hypo <- vector(mode = "list", length = n.hypo)
+    for(iHypo in 1:n.hypo){ ## iHypo <- 1
+        ls.hypo[[iHypo]] <- do.call(rbind,lapply(ls.closure, function(iClosure){ ## iClosure <- 1
+            iIndex <- which(colSums(iClosure$M==name[iHypo])>0)
+            data.frame(hypothesis = iClosure$vec[iIndex],
+                       statistic = as.double(iClosure$test[iIndex,"statistic"]),
+                       p.value = as.double(iClosure$test[iIndex,"p.value"]))
+        }))
+    }
+    names(ls.hypo) <- name
+        
+    ## ** adjusted p.values
+    vec.p.value <- unlist(lapply(ls.hypo, function(x){max(x$p.value)}))
+    return(list(closure = ls.closure,
+                test = ls.hypo,
+                p.value = vec.p.value))
+    
+}
+
+## * .tTest
+.tTest <- function(estimate, covariance, correlation, df, ...){
+    df1 <- length(estimate)
+    statistic <- max(abs(estimate/sqrt(diag(covariance))))
+    if(is.null(df)){
+        distribution <-  "gaussian"
+    }else{
+        distribution <- "student"
+    }
+    p.value <- .calcPmaxIntegration(statistic, p = df1, Sigma = correlation, df = df,
+                                    distribution = distribution)
+    return(c("statistic" = statistic,
+             "p.value" = p.value))
+}
+
+## * .ChisqTest
+.ChisqTest <- function(estimate, covariance, df, ...){
+    df1 <- length(estimate)
+    ## q * statistic ~ chisq or fisher
+    statistic <- as.double(matrix(estimate, nrow = 1) %*% solve(covariance) %*% matrix(estimate, ncol = 1)) / df1
+    if(!is.null(df)){
+        return(c("statistic" = statistic,
+                 "p.value" = 1-stats::pf(statistic, df1 = df1, df2 = df)))
+    }else{
+        return(c("statistic" = statistic,
+                 "p.value" = 1-stats::pchisq(statistic, df = df1)))
+        
+    }
+}
+
+
+ 
diff --git a/R/sCorrect-hessian2.R b/R/sCorrect-hessian2.R
new file mode 100644
index 0000000..4f1dcf4
--- /dev/null
+++ b/R/sCorrect-hessian2.R
@@ -0,0 +1,305 @@
+### sCorrect-hessian2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: dec 11 2019 (14:09) 
+## Version: 
+## Last-Updated: Jan 17 2022 (23:21) 
+##           By: Brice Ozenne
+##     Update #: 143
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - hessian2
+#' @title  Hessian With Small Sample Correction.
+#' @description  Extract the hessian from a latent variable model, with small sample correction
+#' @name hessian2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param indiv [logical] If \code{TRUE}, the hessian relative to each observation is returned. Otherwise the total hessian is returned.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the hessian.
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#' 
+#' @return An array containing the second derivative of the likelihood relative to each sample (dim 3)
+#' and each pair of model coefficients (dim 1,2).
+#' 
+#' @examples
+#' #### simulate data ####
+#' n <- 5e1
+#' p <- 3
+#' X.name <- paste0("X",1:p)
+#' link.lvm <- paste0("Y~",X.name)
+#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
+#'
+#' m <- lvm(formula.lvm)
+#' distribution(m,~Id) <- Sequence.lvm(0)
+#' set.seed(10)
+#' d <- lava::sim(m,n)
+#'
+#' #### latent variable models ####
+#' e.lvm <- estimate(lvm(formula.lvm),data=d)
+#' hessian2(e.lvm)
+#'
+#' @concept small sample inference
+#' @export
+`hessian2` <-
+  function(object, indiv, cluster, as.lava, ...) UseMethod("hessian2")
+
+## * hessian2.lvmfit
+#' @rdname hessian2
+#' @export
+hessian2.lvmfit <- function(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ssc = lava.options()$ssc, ...){
+
+    return(hessian2(estimate2(object, ssc = ssc, hessian = TRUE, ...), cluster = cluster, as.lava = as.lava))
+
+}
+
+
+## * hessian2.lvmfit2
+#' @rdname hessian2
+#' @export
+hessian2.lvmfit2 <- function(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...){
+    
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    ## ** define cluster
+    if(is.null(cluster)){
+        n.cluster <- object$sCorrect$cluster$n.cluster
+        cluster.index <- 1:n.cluster
+    }else{
+        if(!is.numeric(cluster)){
+            data <- object$sCorrect$data
+            if(length(cluster)==1){                
+                if(cluster %in% names(data) == FALSE){
+                    stop("Invalid \'cluster\' argument \n",
+                         "Could not find variable \"",cluster,"\" in argument \'data\' \n")
+                }
+                cluster <- data[[cluster]]
+            }
+            cluster.index <- as.numeric(factor(cluster, levels = unique(cluster)))            
+        }else{
+            cluster.index <- as.numeric(factor(cluster, levels = unique(cluster)))
+        }
+
+        n.cluster <- length(unique(cluster.index))
+    }
+
+    ## ** get hessian
+    hessian <- object$sCorrect$hessian
+    if(is.null(hessian)){
+        hessian <- .hessian2(dmu = object$sCorrect$dmoment$dmu,
+                             dOmega = object$sCorrect$dmoment$dOmega,
+                             d2mu = object$sCorrect$d2moment$d2mu,
+                             d2Omega = object$sCorrect$d2moment$d2Omega,
+                             epsilon = object$sCorrect$residuals,                                     
+                             OmegaM1 = object$sCorrect$moment$OmegaM1.missing.pattern,
+                             missing.pattern = object$sCorrect$missing$pattern,
+                             unique.pattern = object$sCorrect$missing$unique.pattern,
+                             name.pattern = object$sCorrect$missing$name.pattern,
+                             grid.mean = object$sCorrect$skeleton$grid.dmoment$mean, 
+                             grid.var = object$sCorrect$skeleton$grid.dmoment$var, 
+                             grid.hybrid = object$sCorrect$skeleton$grid.dmoment$hybrid, 
+                             name.param = object$sCorrect$skeleton$Uparam,
+                             leverage = object$sCorrect$leverage,
+                             n.cluster = object$sCorrect$cluster$n.cluster,
+                             weights = object$sCorrect$weights)
+
+        hessian.name <- stats::setNames(names(object$sCorrect$skeleton$originalLink2param),object$sCorrect$skeleton$originalLink2param)[object$sCorrect$skeleton$Uparam]
+        dimnames(hessian) <- list(as.character(hessian.name),
+                                  as.character(hessian.name),
+                                  NULL)
+    }
+
+    if(!is.null(cluster)){ ## aggregate hessian by cluster
+        hessianSave <- hessian
+        hessian <- array(0, dim = dim(hessian),
+                         dimnames = dimnames(hessian))
+        for(i in 1:length(cluster)){
+            hessian[,,cluster[i]] <- hessian[,,cluster[i]] + hessianSave[,,i]
+        }
+    }
+
+    ## ** export
+    hessian <- hessian[names(object$sCorrect$skeleton$originalLink2param),
+                       names(object$sCorrect$skeleton$originalLink2param),
+                      ,
+                       drop=FALSE]
+    if(as.lava == FALSE){
+        dimnames(hessian) <- list(as.character(object$sCorrect$skeleton$originalLink2param),
+                                  as.character(object$sCorrect$skeleton$originalLink2param),
+                                  NULL)
+    }
+    if(indiv==FALSE){
+        hessian <- apply(hessian, 1:2, sum)
+        
+    }else if(!is.null(cluster)){
+        index2.cluster <- tapply(1:length(cluster),cluster,list)
+        attr(hessian,"cluster") <- names(index2.cluster)
+    }
+    
+    return(hessian)
+}
+
+## * .hessian2
+#' @title Compute the Hessian Matrix From the Conditional Moments
+#' @description Compute the Hessian matrix from the conditional moments.
+#' @name hessian2-internal
+#' 
+#' @details \code{calc_hessian} will perform the computation individually when the
+#' argument \code{index.Omega} is not null.
+#' 
+#' @keywords internal
+.hessian2 <- function(dmu, dOmega, d2mu, d2Omega, epsilon, OmegaM1,
+                      missing.pattern, unique.pattern, name.pattern,
+                      grid.mean, grid.var, grid.hybrid, name.param,
+                      leverage, n.cluster, weights){
+    if(lava.options()$debug){cat(".hessian2\n")}
+
+    ## ** Prepare
+    n.grid.mean <- NROW(grid.mean)
+    n.grid.var <- NROW(grid.var)
+    n.grid.hybrid <- NROW(grid.hybrid)
+    n.param <- length(name.param)
+    n.pattern <- length(name.pattern)
+
+    hessian <- array(NA, dim = c(n.param, n.param, n.cluster),
+                     dimnames = list(name.param,name.param,NULL))
+    if(length(dmu)>0){
+        index.mean <- 1:n.grid.mean
+    }else{
+        index.mean <- NULL
+    }
+    if(length(dOmega)>0){
+        index.var <- 1:n.grid.var
+    }else{
+        index.var <- NULL
+    }
+    if(length(dmu)>0 && length(dOmega)>0){
+        index.hybrid <- 1:n.grid.hybrid
+    }else{
+        index.hybrid <- NULL
+    } 
+
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        iEpsilon <- epsilon[iIndex,iY,drop=FALSE]
+        idmu <- .subsetList(dmu, indexRow = iIndex, indexCol = iY)
+        idOmega <- .subsetList(dOmega, indexRow = iY, indexCol = iY)
+        id2mu <- .subsetList2(d2mu, indexRow = iIndex, indexCol = iY)
+        id2Omega <- .subsetList2(d2Omega, indexRow = iY, indexCol = iY)
+        if(!is.null(leverage)){
+            iLeverage <- leverage[iIndex,iY,drop=FALSE]
+        }
+        hessian[,,iIndex] <- 0 ##  ## initialize (keep NA for missing values)
+
+        ## *** second derivative relative to the mean parameters
+        for(iG in index.mean){ # iG <- 1
+            iP1 <- grid.mean[iG,1]
+            iP2 <- grid.mean[iG,2]
+            if(grid.mean[iG,"d2.12"]){
+                term1 <- rowSums((id2mu[[iP1]][[iP2]] %*% iOmegaM1) * iEpsilon)
+            }else if(grid.mean[iG,"d2.21"]){
+                term1 <- rowSums((id2mu[[iP2]][[iP1]] %*% iOmegaM1) * iEpsilon)
+            }else{
+                term1 <- 0
+            }
+            term2 <- -rowSums((idmu[[iP1]] %*% iOmegaM1) * idmu[[iP2]])
+            hessian[iP1,iP2,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE] + term1 + term2
+            hessian[iP2,iP1,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE]
+        }
+        ## *** second derivative relative to the variance parameters
+        for(iG in index.var){ # iG <- 2
+            iP1 <- grid.var[iG,1]
+            iP2 <- grid.var[iG,2]
+
+            term1a <- - diag(iOmegaM1 %*% idOmega[[iP1]] %*% iOmegaM1 %*% idOmega[[iP2]])
+            term2 <- - rowSums((iEpsilon %*% iOmegaM1 %*% idOmega[[iP2]] %*% iOmegaM1 %*% idOmega[[iP1]] %*% iOmegaM1) * iEpsilon)
+            if(grid.var[iG,"d2.12"]){
+                term1b <- diag(iOmegaM1 %*% id2Omega[[iP1]][[iP2]])
+                term3 <- 1/2 * rowSums((iEpsilon %*% iOmegaM1 %*% id2Omega[[iP1]][[iP2]] %*% iOmegaM1) * iEpsilon)
+            }else if(grid.var[iG,"d2.21"]){
+                term1b <- diag(iOmegaM1 %*% id2Omega[[iP2]][[iP1]])
+                term3 <- 1/2 * rowSums((iEpsilon %*% iOmegaM1 %*% id2Omega[[iP2]][[iP1]] %*% iOmegaM1) * iEpsilon)
+            }else{
+                term1b <- 0
+                term3 <- 0
+            }
+            if(is.null(leverage)){
+                hessian[iP1,iP2,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE] - 1/2 * rep(sum(term1a + term1b), length(iIndex)) + term2 + term3
+            }else{
+                hessian[iP1,iP2,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE] - 1/2 * rowSums( sweep(1-iLeverage, FUN = "*", STATS = term1a + term1b, MARGIN = 2) ) + term2 + term3
+            }
+            hessian[iP2,iP1,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE]
+        }
+        
+        ## *** second derivative relative to the mean and variance parameters
+        for(iG in index.hybrid){ # iG <- 1
+            iP1 <- grid.hybrid[iG,1]
+            iP2 <- grid.hybrid[iG,2]
+
+            if(!is.null(idmu[[iP1]]) && !is.null(idOmega[[iP2]])){
+                term1 <- - rowSums((idmu[[iP1]] %*% iOmegaM1 %*% idOmega[[iP2]] %*% iOmegaM1) * iEpsilon)
+            }else{
+                term1 <- 0
+            }
+            if(!is.null(idmu[[iP2]]) && !is.null(idOmega[[iP1]])){
+                term2 <- - rowSums((idmu[[iP2]] %*% iOmegaM1 %*% idOmega[[iP1]] %*% iOmegaM1) * iEpsilon)
+            }else{
+                term2 <- 0
+            }
+            
+            hessian[iP1,iP2,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE] + term1 + term2
+            hessian[iP2,iP1,iIndex] <- hessian[iP1,iP2,iIndex,drop=FALSE]
+        }
+    }
+
+    ## ** export
+    if(!is.null(weights)){
+        for(iI in 1:length(weights)){
+            hessian[,,iI] <- weights[iI] * hessian[,,iI]
+        }
+    }
+    return(hessian)
+}
+
+## * .subsetList
+.subsetList <- function(object, indexRow, indexCol){
+    if(length(object)==0){
+        return(object)
+    }else{    
+        return(lapply(object, FUN = function(iL){iL[indexRow,indexCol,drop=FALSE]}))
+    }
+}
+## * .subsetList2
+.subsetList2 <- function(object, indexRow, indexCol){
+    if(length(object)==0){
+        return(object)
+    }else{    
+        return(lapply(object, FUN = .subsetList, indexRow = indexRow, indexCol = indexCol))
+    }
+}
+
+
+######################################################################
+### sCorrect-hessian2.R ends here
diff --git a/R/sCorrect-iid2.R b/R/sCorrect-iid2.R
new file mode 100644
index 0000000..a214847
--- /dev/null
+++ b/R/sCorrect-iid2.R
@@ -0,0 +1 @@
+### iid2.R --- 
#----------------------------------------------------------------------
## author: Brice Ozenne
## created: okt 12 2017 (13:16) 
## Version: 
## last-updated: jan 18 2022 (09:38) 
##           By: Brice Ozenne
##     Update #: 625
#----------------------------------------------------------------------
## 
### Commentary: 
## 
### Change Log:
#----------------------------------------------------------------------
## 
### Code:

## * Documentation - iid2
#' @title  Influence Function With Small Sample Correction.
#' @description  Extract the influence function from a latent variable model.
#' It is similar to \code{lava::iid} but with small sample correction.
#' @name iid2
#'
#' @param object,x a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
#' @param robust [logical] if \code{FALSE}, the influence function is rescaled such its the squared sum equals the model-based standard error (instead of the robust standard error).
#' Do not match the model-based correlation though.
#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
#' @param ssc [character] method used to correct the small sample bias of the variance coefficients (\code{"none"}, \code{"residual"}, \code{"cox"}). Only relevant when using a \code{lvmfit} object. 
#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
#'
#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the variance-covariance matrix.
#'
#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
#'
#' @return A matrix containing the 1st order influence function relative to each sample (in rows)
#' and each model coefficient (in columns).
#' 
#' @examples
#' #### simulate data ####
#' n <- 5e1
#' p <- 3
#' X.name <- paste0("X",1:p)
#' link.lvm <- paste0("Y~",X.name)
#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
#'
#' m <- lvm(formula.lvm)
#' distribution(m,~Id) <- Sequence.lvm(0)
#' set.seed(10)
#' d <- sim(m,n)
#'
#' #### latent variable model ####
#' e.lvm <- estimate(lvm(formula.lvm),data=d)
#' iid.tempo <- iid2(e.lvm)
#'
#'
#' @concept extractor
#' @keywords smallSampleCorrection
#' @export
`iid2` <-
  function(object, ...) UseMethod("iid2")

## * iid2.lvmfit
#' @rdname iid2
#' @export
iid2.lvmfit <- function(object, robust = TRUE, cluster = NULL, as.lava = TRUE, ssc = lava.options()$ssc,...){

    return(lava::iid(estimate2(object, ssc = ssc, ...), robust = robust, cluster = cluster, as.lava = as.lava))

}

## * iid2.lvmfit2
#' @rdname iid2
#' @export
iid2.lvmfit2 <- function(object, robust = TRUE, cluster = NULL, as.lava = TRUE, ...){

    dots <- list(...)
    if(length(dots)>0){
        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
    }

    ## ** compute iid
    object.score <- score2(object, indiv = TRUE, cluster = cluster, as.lava = FALSE)
    object.vcov <- vcov2(object, as.lava = FALSE)
    object.iid <- object.score %*% object.vcov

    if(robust == FALSE){
        object.chol.vcov <- matrixPower(object.vcov, power = 1/2, symmetric = TRUE)
        object.chol.rvcov <- matrixPower(crossprod(object.iid[rowSums(!is.na(object.iid))>0,,drop=FALSE]), power = -1/2, symmetric = TRUE)

        object.iid <- object.iid %*% object.chol.rvcov %*% object.chol.vcov
    }

    ## restaure names
    colnames(object.iid) <- colnames(object.score)
    if(!is.null(cluster)){
        attr(object.iid,"cluster") <- attr(object.score,"cluster")
    }

    ## ** export
    if(as.lava){
        object.iid <- object.iid[,names(object$sCorrect$skeleton$originalLink2param),drop=FALSE]
        colnames(object.iid) <- as.character(object$sCorrect$skeleton$originalLink2param)
    }
    return(object.iid)

}

## * iid.lvmfit2
#' @rdname iid2
#' @export
iid.lvmfit2 <- function(x, robust = TRUE, cluster = NULL, as.lava = TRUE, ...){
    return(iid2(x, robust = robust, cluster = cluster, as.lava = as.lava, ...)) ## necessary as first argument of iid must be x 
}

## * Documentation - iid2plot
#' @title  Display the i.i.d. Decomposition
#' @description  Extract the i.i.d. decomposition and display it along with the corresponding coefficient.
#'
#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
#' @param param [character] name of one of the model parameters.
#'
#' @export
iid2plot <- function(object, param){

    all.param <- coef2(object)
    if(length(param) != 1){
        stop("Incorrect argument \'param\'. \n",
             "Should have length 1. \n")
    }
    if(param %in% names(all.param) == FALSE){
        stop("Incorrect argument \'param\'. \n",
             "Should be one of the model parameters. \n")
    }

    object.iid <- iid2(object)[,param]
    h <- graphics::hist(object.iid, main = paste0(param,"=",all.param[param]), xlab = "iid")

    return(invisible(h))
}

##----------------------------------------------------------------------
### iid2.R ends here

\ No newline at end of file
diff --git a/R/sCorrect-information2.R b/R/sCorrect-information2.R
new file mode 100644
index 0000000..2b1ee72
--- /dev/null
+++ b/R/sCorrect-information2.R
@@ -0,0 +1,179 @@
+### sCorrect-information.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: feb 19 2018 (14:17) 
+## Version: 
+## Last-Updated: Jan 17 2022 (22:54) 
+##           By: Brice Ozenne
+##     Update #: 445
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## Compute information, hessian, and first derivative of information
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - information2
+#' @title  Expected Information With Small Sample Correction. 
+#' @description  Extract the expected information matrix from a latent variable model.
+#' Similar to \code{lava::information} but with small sample correction.
+#' @name information2
+#'
+#' @param object,x a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the information matrix.
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#'
+#' @return A matrix with as many rows and columns as the number of coefficients.
+#' 
+#' @examples
+#' #### simulate data ####
+#' n <- 5e1
+#' p <- 3
+#' X.name <- paste0("X",1:p)
+#' link.lvm <- paste0("Y~",X.name)
+#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
+#'
+#' m <- lvm(formula.lvm)
+#' distribution(m,~Id) <- Sequence.lvm(0)
+#' set.seed(10)
+#' d <- lava::sim(m,n)
+#' 
+#' #### linear models ####
+#' e.lm <- lm(formula.lvm,data=d)
+#' 
+#' #### latent variable models ####
+#' e.lvm <- estimate(lvm(formula.lvm),data=d)
+#' information(e.lvm)
+#' information2(e.lvm)
+#' information2(e.lvm)[1:4,1:4] -  solve(vcov(e.lm))
+#'
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`information2` <-
+  function(object, as.lava, ssc, ...) UseMethod("information2")
+
+## * information2.lvmfit
+#' @rdname information2
+#' @export
+information2.lvmfit <- function(object, as.lava = TRUE, ssc = lava.options()$ssc, ...){
+
+    return(information(estimate2(object, ssc = ssc, ...), as.lava = as.lava))
+
+}
+
+## * information2.lvmfit2
+#' @rdname information2
+#' @export
+information2.lvmfit2 <- function(object, as.lava = TRUE, ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    out <- object$sCorrect$information[names(object$sCorrect$skeleton$originalLink2param),
+                                       names(object$sCorrect$skeleton$originalLink2param),
+                                       drop=FALSE]
+    if(as.lava==FALSE){
+        dimnames(out) <- list(as.character(object$sCorrect$skeleton$originalLink2param),
+                              as.character(object$sCorrect$skeleton$originalLink2param))
+    }
+    return(out)
+}
+
+## * information.lvmfit2
+#' @rdname information2
+#' @export
+information.lvmfit2 <- function(x, ...){ ## necessary as first argument of information must be x 
+    information2(x, ...)
+}
+
+## * .information2
+#' @title Compute the Expected Information Matrix From the Conditional Moments
+#' @description Compute the expected information matrix from the conditional moments.
+#' @name information2-internal
+#' 
+#' @details \code{calc_information} will perform the computation individually when the
+#' argument \code{index.Omega} is not null.
+#' 
+#' @keywords internal
+.information2 <- function(dmu, dOmega, OmegaM1,
+                          missing.pattern, unique.pattern, name.pattern,
+                          grid.mean, grid.var, name.param,
+                          leverage, weights = NULL, n.cluster){
+    if(lava.options()$debug){cat(".information2\n")}
+    if(is.null(weights)){weights <- rep(1,n.cluster)}
+    
+    ## ** Prepare
+    n.grid.mean <- NROW(grid.mean)
+    n.grid.var <- NROW(grid.var)
+    n.param <- length(name.param)
+    n.pattern <- length(name.pattern)
+
+    Info <- matrix(0, nrow = n.param, ncol = n.param,
+                   dimnames = list(name.param,name.param))
+    if(length(dmu)>0){
+        index.mean <- 1:n.grid.mean
+    }else{
+        index.mean <- NULL
+    }
+    if(length(dOmega)>0){
+        index.var <- 1:n.grid.var
+    }else{
+        index.var <- NULL
+    } 
+
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        if(!is.null(leverage)){
+            iN.corrected <- sum(weights[iIndex]) - colSums(leverage[iIndex,iY,drop=FALSE])
+        }else{
+            iN.corrected <- sum(weights[iIndex]) 
+        }
+        
+        ## *** Information relative to the mean parameters
+        for(iG in index.mean){ # iG <- 1
+            iP1 <- grid.mean[iG,1]
+            iP2 <- grid.mean[iG,2]
+            Info[iP1,iP2] <- Info[iP1,iP2] + sum(rowSums(dmu[[iP1]][iIndex,iY,drop=FALSE] %*% iOmegaM1 * dmu[[iP2]][iIndex,iY,drop=FALSE])*weights[iIndex])
+        }
+
+        ## *** Information realtive to the variance parameters
+        for(iG in index.var){ # iG <- 2
+            iP1 <- grid.var[iG,1]
+            iP2 <- grid.var[iG,2]
+
+            ## NOTE: normally tr(ABAC)=tr(ACAB) but because of the factor n.correct this is no more the case
+            ##       so the information may slightly dependent on the ordering of the parameters
+            iDiag <- diag(iOmegaM1 %*% dOmega[[iP1]][iY,iY,drop=FALSE] %*% iOmegaM1 %*% dOmega[[iP2]][iY,iY,drop=FALSE])
+                        
+            Info[iP1,iP2] <- Info[iP1,iP2] + 1/2*sum(iDiag*iN.corrected)
+        }        
+    }
+
+    ## ** Make Info a symmetric matrix
+    Info <- symmetrize(Info, update.upper = NULL)
+        
+    ## ** export
+    return(Info)
+}
+
+
+
+
+
diff --git a/R/sCorrect-leverage2.R b/R/sCorrect-leverage2.R
new file mode 100644
index 0000000..4ed7ef6
--- /dev/null
+++ b/R/sCorrect-leverage2.R
@@ -0,0 +1,176 @@
+### leverage2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: feb 19 2018 (17:58) 
+## Version: 
+## Last-Updated: jan 17 2022 (11:56) 
+##           By: Brice Ozenne
+##     Update #: 177
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * documentation - leverage2
+#' @title Leverage With Small Sample Correction.
+#' @description Extract leverage values from a latent variable model, with small sample correction. 
+#' @name leverage2
+#' 
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param format [character] Use \code{"wide"} to return the residuals in the wide format (one row relative to each sample).
+#' Otherwise use \code{"long"} to return the residuals in the long format.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details The leverage are defined as the partial derivative of the fitted values with respect to the observations.
+#' \deqn{
+#' leverage_i = \frac{\partial \hat{Y}_i}{\partial Y_i}
+#' }
+#' See Wei et al. (1998). \cr \cr
+#' 
+#' When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the leverage.
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#' 
+#' @return a matrix containing the leverage relative to each sample (in rows)
+#' and each endogenous variable (in column).
+#'
+#' @references Bo-Cheng Wei et al., Generalized Leverage and its applications (1998), Scandinavian Journal of Statistics 25:1:25-37.
+#' 
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' m <- lvm(Y1~eta,Y2~eta,Y3~eta)
+#' latent(m) <- ~eta
+#' d <- lava::sim(m,20, latent = FALSE)
+#'
+#' #### latent variable models ####
+#' e.lvm <- estimate(m, data = d)
+#' leverage2(e.lvm)
+#' 
+#' @concept estimator
+#' @keywords smallSampleCorrection
+#' 
+#' @export
+`leverage2` <-
+    function(object, format, ssc, ...) UseMethod("leverage2")
+
+## * leverage2.lvmfit
+#' @rdname leverage2
+#' @export
+leverage2.lvmfit <- function(object, format = "wide", ssc = lava.options()$ssc, ...){
+
+    return(leverage2(estimate2(object, ssc = ssc, ...), format = format))
+
+}
+
+## * leverage2.lvmfit2
+#' @rdname leverage2
+#' @export
+leverage2.lvmfit2 <- function(object, format = "wide", ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    format <- match.arg(format, choices = c("long","wide"))
+
+    if(format == "wide"){
+        return(object$sCorrect$leverage)
+    }else if(format == "long"){
+        endogenous <- colnames(object$sCorrect$leverage)
+        n.endogenous <- length(endogenous)
+        
+        outW <- data.frame(cluster = 1:NROW(object$sCorrect$leverage), object$sCorrect$leverage)
+        outL <- stats::na.omit(stats::reshape(outW,
+                                              idvar = "id",
+                                              direction = "long",
+                                              varying = list(endogenous),
+                                              timevar = "endogenous",
+                                              v.names = "leverage"))
+        rownames(outL) <- NULL
+
+        
+        outL$endogenous <- factor(outL$endogenous, levels = 1:n.endogenous, labels = endogenous)
+        reorder <- match(interaction(object$sCorrect$old2new.order$XXclusterXX.old,object$sCorrect$old2new.order$XXendogenousXX.old),
+                         interaction(outL$cluster,outL$endogenous))
+        return(outL[reorder,])
+    }
+
+}
+
+## * .leverage2
+.leverage2 <- function(Omega, epsilon, dmu, dOmega, vcov.param,
+                       name.pattern, missing.pattern, unique.pattern,
+                       endogenous, n.endogenous, param, param.mean, param.var, n.cluster){
+
+    n.pattern <- NROW(unique.pattern)
+    n.param <- length(param)
+    leverage <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
+                       dimnames = list(NULL, endogenous))
+    if(length(param.mean)==0){
+        leverage[] <- 0
+        return(leverage)
+    }
+    if(is.null(vcov.param)){
+
+        stop("Cannot compute the leverage values without the variance-covariance matrix of the coefficients. \n")
+
+    }
+
+    scoreY <- array(0, dim = c(n.cluster, n.endogenous, n.param),
+                    dimnames = list(NULL, endogenous, param))
+    
+    for(iP in 1:n.pattern){ ## iP <- 1 
+        iIndex <- missing.pattern[[iP]]
+        iY <- which(unique.pattern[iP,]==1)
+        
+        iOmega <- Omega[iY,iY,drop=FALSE]
+        iOmegaM1 <- chol2inv(chol(iOmega))
+        iOmegaM1.epsilon <- epsilon[iIndex,iY,drop=FALSE] %*% iOmegaM1
+            
+        ## derivative of the score regarding Y
+        for(iParam in param){
+            
+            if(iParam %in% param.mean){
+                if(length(iY)>1){
+                    scoreY[iIndex,iY,iParam] <- scoreY[iIndex,iY,iParam] + t(dmu[iParam,iY,iIndex]) %*% iOmegaM1 
+                }else{
+                    scoreY[iIndex,iY,iParam] <- scoreY[iIndex,iY,iParam] + dmu[iParam,iY,iIndex] * iOmegaM1[1,1] 
+                }
+            }
+            if(iParam %in% param.var){
+                scoreY[iIndex,iY,iParam] <- scoreY[iIndex,iY,iParam] + 2 * iOmegaM1.epsilon %*% dOmega[[iParam]][iY,iY,drop=FALSE] %*% iOmegaM1
+            }
+
+        
+
+        }
+
+        ## leverage
+        for(iiY in iY){ ## iiY <- iY[2]
+            
+            if(length(param.mean)==1){
+                leverage[iIndex,iiY] <- dmu[param.mean,iiY,iIndex] * (scoreY[iIndex,iiY,] %*% vcov.param)[,param.mean]
+            }else if(n.param==1){
+                leverage[iIndex,iiY] <- dmu[param.mean,iiY,iIndex] * vcov.param * scoreY[iIndex,iiY,]
+            } else{
+                leverage[iIndex,iiY] <- rowSums(t(dmu[param.mean,iiY,iIndex]) * (scoreY[iIndex,iiY,] %*% vcov.param)[,param.mean,drop=FALSE] )
+            }
+            ## dmu2 <- matrix(0, nrow = n.param, ncol = length(iIndex), dimnames = list(param,NULL))
+            ## dmu2[param.mean,] <- dmu[param.mean,iiY,iIndex]
+            ## diag( t(dmu2) %*% vcov.param %*% t(scoreY[iIndex,iiY,]) )
+        }
+    }
+    return(leverage)            
+}
+
+##----------------------------------------------------------------------
+### leverage2.R ends here
diff --git a/R/sCorrect-moments2.R b/R/sCorrect-moments2.R
new file mode 100644
index 0000000..56e7b2d
--- /dev/null
+++ b/R/sCorrect-moments2.R
@@ -0,0 +1,578 @@
+### conditionalMoment.R --- 
+#----------------------------------------------------------------------
+## author: Brice Ozenne
+## created: okt 27 2017 (16:59) 
+## Version: 
+## last-updated: Jan 17 2022 (19:13) 
+##           By: Brice Ozenne
+##     Update #: 1953
+#----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+#----------------------------------------------------------------------
+## 
+### Code:
+
+## * moments2 - documentation
+#' @title Compute Key Quantities of a Latent Variable Model
+#' @description Compute conditional mean, conditional variance, their first and second derivative regarding model parameters, as well as various derivatives of the log-likelihood.
+#' @name moments2
+#' 
+#' @param object a latent variable model.
+#' @param data [data.frame] dataset if different from the one used to fit the model.
+#' @param param [numeric vector] value of the model parameters if different from the estimated ones.
+#' @param initialize [logical] Pre-compute quantities dependent on the data but not on the parameters values.
+#' @param usefit [logical] Compute key quantities based on the parameter values.
+#' @param update.dmoment [logical] should the first derivative of the moments be computed/updated?
+#' @param update.d2moment [logical] should the second derivative of the the moments be computed/updated?
+#' @param score [logical] should the score be output?
+#' @param information [logical] should the expected information be output?
+#' @param hessian [logical] should the hessian be output?
+#' @param vcov [logical] should the variance-covariance matrix based on the expected information be output?
+#' @param dVcov [logical] should the derivative of the variance-covariance matrix be output?
+#' @param dVcov.robust [logical]  should the derivative of the robust variance-covariance matrix be output?
+#' @param Psi [matrix]  Average first order bias in the residual variance. Only necessary for computing adjusted residuals.
+#' @param ... [internal] only used by the generic method or by the <- methods.
+#' 
+#' @details For lvmfit objects, there are two levels of pre-computation:
+#' \itemize{
+#' \item a basic one that do no involve the model coefficient (\code{conditionalMoment.lvm}).
+#' \item an advanced one that require the model coefficients (\code{conditionalMoment.lvmfit}). 
+#' }
+#' 
+#' @examples
+#' m <- lvm(Y1~eta,Y2~eta,Y3~eta)
+#' latent(m) <- ~eta
+#'
+#' d <- lava::sim(m,1e2)
+#' e <- estimate(m, d)
+#'
+#' ## basic pre-computation
+#' res1 <- moments2(e, data = d, initialize = TRUE, usefit = FALSE,
+#'                 score = TRUE, information = TRUE, hessian = TRUE, vcov = TRUE,
+#'                 dVcov = TRUE, dVcov.robust = TRUE, residuals = TRUE, leverage = FALSE,
+#'                 derivative = "analytic")
+#' res1$skeleton$param$Sigma
+#' 
+#' ## full pre-computation
+#' res2 <- moments2(e, param = coef(e), data = d, initialize = TRUE, usefit = TRUE,
+#'                 score = TRUE, information = TRUE, hessian = TRUE, vcov = TRUE,
+#'                 dVcov = TRUE, dVcov.robust = TRUE, residuals = TRUE, leverage = FALSE,
+#'                 derivative = "analytic")
+#' res2$moment$Omega
+#'
+#' @concept small sample inference
+#' @concept derivative of the score equation
+#' 
+#' @keywords internal
+#' @export
+`moments2` <-
+    function(object, param, data, weights, Omega, Psi,
+             initialize, usefit,
+             update.dmoment, update.d2moment, score, information, hessian, vcov, dVcov, dVcov.robust, residuals, leverage, derivative) UseMethod("moments2")
+
+## * moments2.lvm
+#' @rdname moments2
+#' @export
+moments2.lvm <- function(object, param = NULL, data = NULL, weights = NULL, Omega = NULL, Psi = NULL,
+                         initialize = TRUE, usefit = TRUE,
+                         update.dmoment = TRUE, update.d2moment = TRUE, score = TRUE, information = TRUE, hessian = TRUE, vcov = TRUE, dVcov = TRUE, dVcov.robust = TRUE, residuals = TRUE, leverage = TRUE,
+                         derivative = "analytic"){
+    if(lava.options()$debug){cat("moments2 \n")}
+    
+    ## ** sanity checks
+    if(initialize == FALSE && is.null(object$sCorrect)){
+        stop("Initialization of the moments missing. \n",
+             "Consider setting the argument \'initialize\' to TRUE \n")
+    }
+    if(!missing(derivative)){
+        derivative <- match.arg(derivative, choices = c("analytic","numeric"))
+    }
+    
+    ## ** initialize
+    if(score || information || hessian || vcov || dVcov || dVcov.robust){
+        first.order <- TRUE
+    }else{
+        first.order <- FALSE
+    }
+    if(hessian || dVcov || dVcov.robust){
+        second.order <- TRUE
+    }else{
+        second.order <- FALSE
+    }
+
+    ## NOTE: the estimation of the leverage depends on the information matrix and vice versa.
+    ##       moments2 is a single run function called by estimate2 which take care of iterating until reaching a stable point
+    previous.vcov.param <- object$sCorrect$vcov.param
+
+    ## ** skeleton
+    if(initialize){
+        out <- list()
+
+        ## *** get information from object
+        out$endogenous <- lava::endogenous(object, format = "wide")
+        out$latent <- lava::latent(object)
+
+        if(is.null(data)){
+            out$data <- extractData(object, design.matrix = FALSE, as.data.frame = TRUE,
+                                    envir = parent.env(environment()), rm.na = TRUE)
+        }else{
+            out$data <- as.data.frame(data)
+        }
+        out$X <- as.matrix(out$data[,lava::manifest(object),drop=FALSE])
+        out$cluster <- .getGroups2(object, data = out$data, endogenous = out$endogenous)
+
+        reserved.name <- c("XXvalueXX","XXendogenousXX","XXendogenousXX.index","XXclusterXX")
+        if(any(colnames(out$X) %in% reserved.name)){
+            stop("\"",paste(reserved.name[colnames(out$X) %in% reserved.name], collapse="\" \""),"\" should not correspond to variable names \n",
+                 "It is used internally for data manipulation \n")
+        }
+
+        ## *** reshape dataset (convert to long format)
+        X.latent <- matrix(NA, nrow = NROW(out$X), ncol = length(out$latent),
+                           dimnames = list(NULL, out$latent))
+
+        X.long <- reshape2::melt(data.frame(XXclusterXX = out$cluster$name.cluster, out$X, X.latent),
+                                 id.vars = "XXclusterXX",
+                                 measure.vars = c(out$endogenous,out$latent),
+                                 variable.name = "XXendogenousXX",
+                                 value.name = "XXvalueXX")
+        X.long <- cbind(X.long,out$X[X.long$XXclusterXX,,drop=FALSE]) ## add all variable (endo and exo) in case some endo are regressors
+
+        X.wide <- out$X[,out$endogenous,drop=FALSE]
+
+        out$old2new.order <- X.long[,c("XXclusterXX","XXendogenousXX","XXvalueXX")]
+        names(out$old2new.order) <- c("XXclusterXX.old","XXendogenousXX.old","XXvalueXX.old")
+        X.long <- X.long[order(X.long$XXclusterXX,X.long$XXendogenousXX),]
+        out$old2new.order$XXclusterXX.new <- out$old2new.order$XXclusterXX
+        out$old2new.order$XXendogenousXX.new <- out$old2new.order$XXendogenousXX
+        out$old2new.order$XXvalueXX.new <- out$old2new.order$XXvalueXX
+                
+        ## *** identify missing pattern
+        pattern <- X.wide
+        pattern[!is.na(pattern)] <- 1
+        pattern[is.na(pattern)] <- 0
+
+        if(!is.null(object$call$missing) && object$call$missing==FALSE){
+            unique.pattern <- unique(pattern[rowSums(pattern==0)==0,,drop=FALSE])
+            if(length(unique.pattern)==0){
+                stop("All clusters contain at least one missing value - cannot perform complete case analysis. \n",
+                     "Consider setting the argument \'missing\' to TRUE. \n")
+            }
+            XXclusterXX.NA <- unique(X.long$XXclusterXX[rowSums(is.na(X.long[,out$endogenous,drop=FALSE]))>0])
+            X.long[X.long$XXclusterXX %in% XXclusterXX.NA,c("XXvalueXX",out$endogenous)] <- NA ## add missing value to the other observation of the same cluster
+        }else{
+            unique.pattern <- unique(pattern[rowSums(pattern==1)>0,,drop=FALSE])
+        }
+        
+        name.pattern <- apply(unique.pattern, MARGIN = 1, FUN = paste0, collapse = "")
+        rownames(unique.pattern) <- name.pattern
+
+        out$missing$pattern <- tapply(1:out$cluster$n.cluster,apply(pattern, MARGIN = 1, FUN = paste0, collapse=""),list)[name.pattern]
+        out$missing$unique.pattern <- unique.pattern
+        out$missing$name.pattern <- name.pattern
+
+        ## *** initialize conditional moments
+        out$skeleton <- skeleton(object, X = X.long,
+                                 endogenous = out$endogenous, latent = out$latent,
+                                 n.cluster = out$cluster$n.cluster,
+                                 index.Omega = out$cluster$index.Omega)
+
+        ## *** initialize partial derivatives of the conditional moments
+        out$skeleton <- skeletonDtheta(out$skeleton,
+                                       X = X.long,
+                                       endogenous = out$endogenous, latent = out$latent,
+                                       missing.pattern = out$missing$pattern,
+                                       unique.pattern = out$missing$unique.pattern,
+                                       name.pattern = out$missing$name.pattern,
+                                       n.cluster = out$cluster$n.cluster,
+                                       index.Omega = out$cluster$index.Omega)
+
+        ## *** initialize second order partial derivatives of the conditional moments
+        ## GS <- skeletonDtheta2(out$skeleton)
+        out$skeleton <- skeletonDtheta2(out$skeleton)
+
+        ## *** weights
+        if(!is.null(object$weights)){
+            out$weights <- object$weights[,1]
+        }
+
+    }else{
+        ## subset to remove existing results
+        rm.name <- c("moment","dmoment","d2moment","score","vcov.param","information","hessian","dInformation","dVcov.param","dRvcov.param","leverage","residuals")
+        out <- object$sCorrect[setdiff(names(object$sCorrect),rm.name)]
+    }
+
+    ## ** update according to the value of the model coefficients
+    if(usefit){
+        if(is.null(param)){
+            param.tempo <- stats::coef(object, type = 2, labels = 1)
+            out$param <- stats::setNames(param.tempo[,"Estimate"],rownames(param.tempo))[out$skeleton$Uparam]
+            ## out$name.param <- out$name.param[names(stats::coef(object))]
+        }else{
+            if(all(names(param) %in% out$skeleton$Uparam)){ ## using user-defined names
+                ## e.g. mu1 mu2 Y1~X1 Y2~X1 sigma
+                out$param[names(param)] <- param
+            }else if(all(names(param) %in% names(out$skeleton$originalLink2param))){ ## using original link
+                ## e.g. Y1 Y2 Y1~X1 Y2~X1 Y1~~Y1
+                out$param[out$skeleton$Uparam[match(names(param),names(out$skeleton$originalLink2param))]] <- param
+            }else{
+                stop("Could not find model parameter(s) corresponding to the name(s): \"",paste(setdiff(names(param),c(out$skeleton$Uparam,names(out$skeleton$Uparam))), collapse="\" \""),"\" \n")
+            }
+        }
+        if(is.null(weights)){
+            weights <- object$weights[,1]
+        }
+        
+        out$name.param <- stats::setNames(out$skeleton$type[!is.na(out$skeleton$type$lava),"param"],
+                                   out$skeleton$type[!is.na(out$skeleton$type$lava),"originalLink"])
+
+        ## *** conditional moments
+        out$moment <- updateMoment(skeleton = out$skeleton$param,
+                                   value = out$skeleton$value,
+                                   toUpdate = out$skeleton$toUpdate.moment,
+                                   param = out$param, Omega = Omega,
+                                   name.pattern = out$missing$name.pattern,
+                                   unique.pattern = out$missing$unique.pattern,
+                                   endogenous = out$endogenous,
+                                   latent = out$latent,
+                                   n.cluster = out$cluster$n.cluster)
+
+        ## *** first order derivatives
+        if(update.dmoment && first.order){
+            out$dmoment <- updateDMoment(moment = out$moment,
+                                         skeleton = out$skeleton,
+                                         param = out$param)
+        }
+
+        
+        ## *** second order derivatives
+        if(update.d2moment && second.order){
+            out$d2moment <- updateD2Moment(moment = out$moment,
+                                           skeleton = out$skeleton,
+                                           param = out$param)
+        }
+        
+
+        ## *** update residuals
+        if(residuals || score || information || vcov || hessian || leverage){
+            if(!is.null(attr(Omega,"Omega.residuals"))){
+                OOmega <- attr(Omega,"Omega.residuals")
+            }else{
+                OOmega <- out$moment$Omega
+            }
+
+            out$residuals <- .adjustResiduals(epsilon = out$skeleton$param$endogenous - out$moment$mu,
+                                              Omega = OOmega, Psi = Psi, ## Note: if Psi is null returns epsilon i.e. no adjustment
+                                              name.pattern = out$missing$name.pattern, missing.pattern = out$missing$pattern, unique.pattern = out$missing$unique.pattern,
+                                              endogenous = out$endogenous, n.cluster = out$cluster$n.cluster)
+
+        }
+        ## mean(out$residuals^2)
+        ## out$moment$Omega
+        
+        ## *** score
+        if(score){
+            out$score <- .score2(dmu = out$dmoment$dmu,
+                                 dOmega = out$dmoment$dOmega,                    
+                                 epsilon = out$residuals,
+                                 OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                                 missing.pattern = out$missing$pattern,
+                                 unique.pattern = out$missing$unique.pattern,
+                                 name.pattern = out$missing$name.pattern,
+                                 name.param = out$skeleton$Uparam,
+                                 name.meanparam = out$skeleton$Uparam.mean,
+                                 name.varparam = out$skeleton$Uparam.var,
+                                 n.cluster = out$cluster$n.cluster,
+                                 weights = weights)
+        }
+
+        ## *** leverage
+        if((leverage || information || vcov) && !is.null(previous.vcov.param)){
+            if(!is.null(attr(Omega,"Omega.leverage"))){
+                OOmega <- attr(Omega,"Omega.leverage")
+            }else{
+                OOmega <- out$moment$Omega
+            }
+            if(!is.null(attr(Omega,"dOmega.leverage"))){
+                ddOOmega <- attr(Omega,"dOmega.leverage")
+            }else{
+                ddOOmega <- out$dmoment$dOmega
+            }
+
+            out$leverage <- .leverage2(Omega = OOmega, 
+                                       epsilon = out$residuals,
+                                       dmu = aperm(abind::abind(out$dmoment$dmu, along = 3), perm = c(3,2,1)),
+                                       dOmega = ddOOmega,
+                                       vcov.param = previous.vcov.param,
+                                       name.pattern = out$missing$name.pattern,
+                                       missing.pattern = out$missing$pattern,
+                                       unique.pattern = out$missing$unique.pattern,
+                                       endogenous = out$endogenous,
+                                       n.endogenous = length(out$endogenous),
+                                       param = out$skeleton$Uparam,
+                                       param.mean = out$skeleton$Uparam.mean,
+                                       param.var = out$skeleton$Uparam.var,
+                                       n.cluster = out$cluster$n.cluster)
+        }
+
+        ## *** information matrix
+        if(information || vcov){
+            if(!is.null(attr(Omega,"dOmega.leverage"))){
+                ddOOmega <- attr(Omega,"dOmega.leverage")
+            }else{
+                ddOOmega <- out$dmoment$dOmega
+            }
+
+            out$information <- .information2(dmu = out$dmoment$dmu,
+                                             dOmega = ddOOmega,##out$dmoment$dOmega,
+                                             OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                                             missing.pattern = out$missing$pattern,
+                                             unique.pattern = out$missing$unique.pattern,
+                                             name.pattern = out$missing$name.pattern,
+                                             grid.mean = out$skeleton$grid.dmoment$mean, 
+                                             grid.var = out$skeleton$grid.dmoment$var, 
+                                             name.param = out$skeleton$Uparam,
+                                             leverage = out$leverage,
+                                             n.cluster = out$cluster$n.cluster,
+                                             weights = weights)
+        }
+
+        if(vcov){
+            out$vcov.param  <- .info2vcov(out$information, attr.info = FALSE)
+        }
+    
+        ## *** hessian
+        if(hessian || (derivative == "analytic") && dVcov.robust){
+            out$hessian <- .hessian2(dmu = out$dmoment$dmu,
+                                     dOmega = out$dmoment$dOmega,
+                                     d2mu = out$d2moment$d2mu,
+                                     d2Omega = out$d2moment$d2Omega,
+                                     epsilon = out$residuals,                                     
+                                     OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                                     missing.pattern = out$missing$pattern,
+                                     unique.pattern = out$missing$unique.pattern,
+                                     name.pattern = out$missing$name.pattern,
+                                     grid.mean = out$skeleton$grid.dmoment$mean, 
+                                     grid.var = out$skeleton$grid.dmoment$var, 
+                                     grid.hybrid = out$skeleton$grid.dmoment$hybrid, 
+                                     name.param = out$skeleton$Uparam,
+                                     leverage = out$leverage,
+                                     n.cluster = out$cluster$n.cluster,
+                                     weights = weights)
+        }
+
+        ## *** dVcov.param (model based variance, analytic)
+        if((dVcov || dVcov.robust) && (derivative == "analytic")){
+            out$dInformation <- .dInformation2(dmu = out$dmoment$dmu,
+                                               dOmega = out$dmoment$dOmega,
+                                               d2mu = out$d2moment$d2mu,
+                                               d2Omega = out$d2moment$d2Omega,
+                                               OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                                               missing.pattern = out$missing$pattern,
+                                               unique.pattern = out$missing$unique.pattern,
+                                               name.pattern = out$missing$name.pattern,
+                                               grid.3varD1 = out$skeleton$grid.3varD1,
+                                               grid.2meanD1.1varD1 = out$skeleton$grid.2meanD1.1varD1,
+                                               grid.2meanD2.1meanD1 = out$skeleton$grid.2meanD2.1meanD1,
+                                               grid.2varD2.1varD1 = out$skeleton$grid.2varD2.1varD1,
+                                               name.param = out$skeleton$Uparam,
+                                               leverage = out$leverage,
+                                               n.cluster = out$cluster$n.cluster,
+                                               weights = weights)
+
+            ## delta method
+            out$dVcov.param <- .dVcov.param(vcov.param = out$vcov.param,
+                                            dInformation = out$dInformation,
+                                            n.param = length(out$skeleton$Uparam),
+                                            name.param = out$skeleton$Uparam)
+        }
+
+        ## *** dRvcov.param  (robust variance, analytic)
+        if(dVcov.robust && (derivative == "analytic")){
+
+        out$dRvcov.param <- .dRvcov.param(score = out$score,
+                                          hessian = out$hessian,
+                                          vcov.param = out$vcov.param,
+                                          dVcov.param = out$dVcov.param,
+                                          n.param = length(out$skeleton$Uparam),
+                                          name.param = out$skeleton$Uparam)
+
+    }
+
+    ## *** dVcov.param and dRvcov.param (numeric derivatives)
+    if((dVcov || dVcov.robust) && derivative == "numeric"){
+        test.package <- try(requireNamespace("numDeriv"), silent = TRUE)
+        if(inherits(test.package,"try-error")){
+            stop("There is no package \'numDeriv\' \n",
+                 "This package is necessary when argument \'numeric.derivative\' is TRUE \n")
+        }
+
+        ## range(out$score - .warper.numDev(value = out$param, object = object, type = "score"))
+        ## range(out$hessian - .warper.numDev(value = out$param, object = object, type = "hessian"))
+        ## range(out$vcov.param - .warper.numDev(value = out$param, object = object, type = "vcov.model"))
+
+        param <- out$param
+        name.param <- names(out$param)
+        n.param <- length(param)
+        n.cluster <- out$cluster$n.cluster
+        object2 <- object
+        object2$sCorrect <- out
+
+        ## *** hessian
+        ## print(range(.warper.numDev(param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "score")-out$score))
+        num.hessian <- numDeriv::jacobian(.warper.numDev, x = param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "score", method = "Richardson")
+
+        out$hessian <- aperm(array(num.hessian, dim = c(n.cluster,n.param,n.param),
+                                               dimnames = list(NULL, name.param, name.param)), perm = 3:1)
+        
+        ## *** dInformation
+        ## print(range(.warper.numDev(param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "information")-out$information))
+        num.information <- numDeriv::jacobian(.warper.numDev, x = param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "information", method = "Richardson")
+
+        out$dInformation <- array(num.information, dim = c(n.param,n.param,n.param),
+                                              dimnames = list(name.param, name.param, name.param))
+
+        ## *** dVcov.param
+        ## print(range(.warper.numDev(param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "vcov")-out$vcov.param))
+        num.dVcov.param <- numDeriv::jacobian(.warper.numDev, x = param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "vcov", method = "Richardson")
+
+        out$dVcov.param <- array(num.dVcov.param, dim = c(n.param,n.param,n.param),
+                                             dimnames = list(name.param, name.param, name.param))
+
+
+        ## *** dRvcov.param
+        num.dRvcov.param <- numDeriv::jacobian(.warper.numDev, x = param, object = object2, weights = weights, Omega = Omega, Psi = Psi, type = "vcov.robust", method = "Richardson")
+
+        out$dRvcov.param <- array(num.dRvcov.param, dim = c(n.param,n.param,n.param),
+                                              dimnames = list(name.param, name.param, name.param))
+    }        
+    
+        
+    }
+
+    ## ** export
+    return(out)
+}
+
+## * moments2.lvmfit
+#' @rdname moments2
+#' @export
+moments2.lvmfit <- moments2.lvm
+
+## * .wraper.numDev (helper)
+.warper.numDev <- function(value, object, type, weights, Psi = NULL, Omega = NULL){ # x <- p.obj
+    ## CANNOT DO DIRECTLY VIA moments2
+    ## (because Omega should not be fixed in updateMoment but it should be fixed (as well as Psi) when updating the residuals)
+    
+    out <- object$sCorrect
+    out$moment <- updateMoment(skeleton = out$skeleton$param,
+                               value = out$skeleton$value,
+                               toUpdate = out$skeleton$toUpdate.moment,
+                               param = value, Omega = NULL,
+                               name.pattern = out$missing$name.pattern,
+                               unique.pattern = out$missing$unique.pattern,
+                               endogenous = out$endogenous,
+                               latent = out$latent,
+                               n.cluster = out$cluster$n.cluster)
+
+    out$dmoment <- updateDMoment(moment = out$moment,
+                                 skeleton = out$skeleton,
+                                 param = out$param)
+
+    if(type %in% c("score","vcov.robust")){
+        out$residuals <- .adjustResiduals(epsilon = out$skeleton$param$endogenous - out$moment$mu,
+                                          Omega = Omega, Psi = Psi, ## FIXED
+                                          name.pattern = out$missing$name.pattern, missing.pattern = out$missing$pattern, unique.pattern = out$missing$unique.pattern,
+                                          endogenous = out$endogenous, n.cluster = out$cluster$n.cluster)
+
+        out$score <- .score2(dmu = out$dmoment$dmu,
+                             dOmega = out$dmoment$dOmega,                    
+                             epsilon = out$residuals,
+                             OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                             missing.pattern = out$missing$pattern,
+                             unique.pattern = out$missing$unique.pattern,
+                             name.pattern = out$missing$name.pattern,
+                             name.param = out$skeleton$Uparam,
+                             name.meanparam = out$skeleton$Uparam.mean,
+                             name.varparam = out$skeleton$Uparam.var,
+                             n.cluster = out$cluster$n.cluster,
+                             weights = weights)
+    }
+
+    if(type %in% c("information","vcov","vcov.robust")){
+        out$information <- .information2(dmu = out$dmoment$dmu,
+                                         dOmega = out$dmoment$dOmega,
+                                         OmegaM1 = out$moment$OmegaM1.missing.pattern,
+                                         missing.pattern = out$missing$pattern,
+                                         unique.pattern = out$missing$unique.pattern,
+                                         name.pattern = out$missing$name.pattern,
+                                         grid.mean = out$skeleton$grid.dmoment$mean, 
+                                         grid.var = out$skeleton$grid.dmoment$var, 
+                                         name.param = out$skeleton$Uparam,
+                                         leverage = out$leverage, ## FIXED
+                                         n.cluster = out$cluster$n.cluster,
+                                         weights = weights)
+    }
+
+    if(type %in% c("vcov","vcov.robust")){
+        out$vcov  <- .info2vcov(out$information, attr.info = FALSE)
+    }
+
+    if(type %in% c("vcov.robust")){
+        out$vcov.robust  <- out$vcov %*% crossprod(out$score) %*% out$vcov
+    }
+
+
+    return(out[[type]])
+}
+
+
+## * .info2vcov (helper)
+#' @title Inverse the Information Matrix
+#' @description Compute the inverse of the information matrix.
+#' @name vcov2-internal
+#'
+#' @param attr.info [logical] should the information matrix be returned as an attribute?
+#' @param ... arguments passed to .information2
+#' 
+#' @keywords internal
+.info2vcov <- function(information, attr.info = FALSE){
+    vcov <- try(chol2inv(chol(information)), silent = TRUE)
+    if(inherits(vcov, "try-error")){
+        vcov <- try(solve(information), silent = TRUE)
+        if(inherits(vcov, "try-error")){ ## try by block
+            cat("Singular information matrix: try to inverse it by block \n")
+            information.N0 <- abs(information)>1e-10
+            remaining.var <- colnames(information)
+            vcov <- matrix(0, nrow = NROW(information), ncol = NCOL(information),
+                           dimnames = dimnames(information))
+            while(length(remaining.var)>0){
+                current.set <- remaining.var[1]
+                new.set <- unique(unlist(apply(information.N0[current.set,,drop=FALSE],1,function(iRow){list(names(iRow[iRow==1]))})))
+                while(length(current.set)<length(new.set)){
+                    current.set <- new.set
+                    new.set <- unique(unlist(apply(information.N0[current.set,,drop=FALSE],1,function(iRow){list(names(iRow[iRow==1]))})))
+                }
+                if(length(new.set)>0){
+                    iTry <- try(solve(information[current.set,current.set]), silent = TRUE)
+                    if(inherits(iTry,"try-error")){
+                        vcov[current.set,current.set] <- NA
+                    }else{
+                        vcov[current.set,current.set] <- iTry
+                    }
+                }
+                remaining.var <- setdiff(remaining.var, current.set)
+            }
+        }
+    }
+    if(attr.info){
+        attr(vcov,"information") <- information
+    }
+    if(!inherits(vcov, "try-error")){
+        dimnames(vcov) <- dimnames(information)
+    }
+    return(vcov)
+}
diff --git a/R/sCorrect-nobs2.R b/R/sCorrect-nobs2.R
new file mode 100644
index 0000000..87929fb
--- /dev/null
+++ b/R/sCorrect-nobs2.R
@@ -0,0 +1,65 @@
+### sCorrect-nobs2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: Jan  4 2022 (14:37) 
+## Version: 
+## Last-Updated: Jan  6 2022 (15:53) 
+##           By: Brice Ozenne
+##     Update #: 23
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * documentation - nobs2
+#' @title Effective Sample Size.
+#' @description Extract the effective sample size, i.e. sample size minus the loss in degrees of freedom caused by the estimation of the parameters.
+#' @name nobs2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object.
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the leverage.
+#' 
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#' 
+#' @return Numeric vector of length the number of endogenous variables.
+#' 
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' 
+#' @export
+`nobs2` <-
+    function(object, ssc, ...) UseMethod("nobs2")
+
+## * nobs2.lvmfit
+#' @rdname nobs2
+#' @export
+nobs2.lvmfit <- function(object, ssc = lava.options()$ssc, ...){
+
+    return(nobs2(estimate2(object, ssc = ssc, ...)))
+
+}
+
+## * nobs2.lvmfit2
+#' @rdname nobs2
+#' @export
+nobs2.lvmfit2 <- function(object, ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    return(stats::nobs(object) - colSums(leverage2(object, format = "wide")))
+}
+
+##----------------------------------------------------------------------
+### sCorrect-nobs2.R ends here
diff --git a/R/sCorrect-residuals2.R b/R/sCorrect-residuals2.R
new file mode 100644
index 0000000..8155da3
--- /dev/null
+++ b/R/sCorrect-residuals2.R
@@ -0,0 +1,201 @@
+### sCorrect-residuals2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 18 2019 (11:17) 
+## Version: 
+## Last-Updated: Jan 11 2022 (16:00) 
+##           By: Brice Ozenne
+##     Update #: 140
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Residuals With Small Sample Correction.
+#' @description Extract residuals from a latent variable model.
+#' Similar to \code{stats::residuals} but with small sample correction.
+#' @name residuals2
+#' 
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param type [character] the type of residual to extract:
+#' \code{"response"} for raw residuals,
+#' \code{"studentized"} for studentized residuals,
+#' \code{"normalized"} for normalized residuals.
+#' @param format [character] Use \code{"wide"} to return the residuals in the wide format (one row relative to each sample).
+#' Otherwise use \code{"long"} to return the residuals in the long format.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the residuals.
+#'
+#' The raw residuals are defined by  observation minus the fitted value:
+#' \deqn{
+#' \varepsilon = (Y_1 - \mu_1, ..., Y_m - \mu_m)
+#' }
+#' The studentized residuals divided the raw residuals relative to each endogenous variable by the modeled variance of the endogenous variable.
+#' \deqn{
+#' \varepsilon_{stud} =(\frac{Y_1 - \mu_1}{\sigma_1}, ..., \frac{Y_m - \mu_m}{\sigma_m})
+#' }
+#' The normalized residuals multiply the raw residuals by the inverse of the square root of the modeled residual variance covariance matrix.
+#' \deqn{
+#' \varepsilon_{norm} = \varepsilon \Omega^{-1/2}
+#' }
+#' @return a matrix containing the residuals relative to each sample (in rows)
+#' and each endogenous variable (in column).
+#' 
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`residuals2` <-
+    function(object, type, format, ssc, ...) UseMethod("residuals2")
+
+## * Examples
+#' @rdname residuals2
+#' @examples
+#' #### simulate data ####
+#' set.seed(10)
+#' n <- 101
+#'
+#' Y1 <- rnorm(n, mean = 0)
+#' Y2 <- rnorm(n, mean = 0.3)
+#' Id <- findInterval(runif(n), seq(0.1,1,0.1))
+#' data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
+#'            data.frame(Y=Y2,G="2",Id = Id)
+#'            )
+#'
+#' #### latent variable models ####
+#' library(lava)
+#' e.lvm <- estimate(lvm(Y ~ G), data = data.df)
+#' residuals(e.lvm)
+#' residuals2(e.lvm)
+#' residuals(e.lvm) - residuals2(e.lvm)
+#'
+
+## * residuals2.lvmfit
+#' @export
+residuals2.lvmfit <- function(object, type = "response", format = "wide", ssc = lava.options()$ssc, ...){
+
+    return(residuals(estimate2(object, ssc = ssc, ...), type = type, format = format))
+
+}
+
+## * residuals2.lvmfit2
+#' @export
+residuals2.lvmfit2 <- function(object, type = "response", format = "wide", ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    format <- match.arg(format, choices = c("long","wide"))
+
+    residuals <- .normalizeResiduals(epsilon = object$sCorrect$residuals,
+                                     Omega = object$sCorrect$moment$Omega,
+                                     type = type,
+                                     missing.pattern = object$sCorrect$missing$pattern,
+                                     unique.pattern = object$sCorrect$missing$unique.pattern,
+                                     Omega.missing.pattern = object$sCorrect$moment$Omega.missing.pattern)
+    if(format == "wide"){
+        return(residuals)
+    }else if(format == "long"){
+        endogenous <- colnames(residuals)
+        n.endogenous <- length(endogenous)
+        
+        residualsW <- data.frame(1:NROW(residuals), residuals)
+        names(residualsW) <- c("cluster",endogenous)
+        residualsL <- stats::na.omit(stats::reshape(residualsW,
+                                                    idvar = "cluster",
+                                                    direction = "long",
+                                                    varying = list(endogenous),
+                                                    timevar = "endogenous",
+                                                    v.names = "residual"))
+
+        rownames(residualsL) <- NULL
+        residualsL$endogenous <- factor(residualsL$endogenous, levels = 1:n.endogenous, labels = endogenous)
+        reorder <- match(interaction(object$sCorrect$old2new.order$XXclusterXX.old, object$sCorrect$old2new.order$XXendogenousXX.old),
+                         interaction(residualsL$cluster,residualsL$endogenous))
+        return(residualsL[reorder,"residual"])
+    }
+}
+
+## * residuals.lvmfit2
+#' @export
+residuals.lvmfit2 <- residuals2.lvmfit2
+
+## * .normalizeResiduals
+.normalizeResiduals <- function(epsilon, Omega, type,
+                                missing.pattern, unique.pattern, Omega.missing.pattern){
+    type <- match.arg(type, choices = c("response","studentized","normalized"), several.ok = FALSE)
+
+    if(type %in% c("studentized")){
+        epsilon <- sweep(epsilon,
+                         STATS = sqrt(diag(Omega)),
+                         FUN = "/",
+                         MARGIN = 2)
+        ## object$sCorrect$residuals/epsilon
+    }else if(type=="normalized"){
+        name.endogenous <- colnames(epsilon)
+        if(any(is.na(epsilon))==FALSE){
+            epsilon <- epsilon %*% matrixPower(Omega, symmetric = TRUE, power = -1/2)
+        }else{
+            iOmegaHalf.missing.pattern <- lapply(Omega.missing.pattern,matrixPower,symmetric = TRUE, power = -1/2)
+            for(iP in names(missing.pattern)){
+                iY <- which(unique.pattern[iP,]==1)
+                for(iC in missing.pattern[[iP]]){ ## iC <- 1
+                    epsilon[iC,iY] <- epsilon[iC,iY] %*% iOmegaHalf.missing.pattern[[iP]]
+                }
+            }
+            
+        }
+        colnames(epsilon) <- name.endogenous
+    }
+
+    return(epsilon)
+}
+
+## * .adjustResiduals
+.adjustResiduals <- function(epsilon, Psi, Omega, 
+                             name.pattern, missing.pattern, unique.pattern,
+                             endogenous, n.endogenous, n.cluster){
+    if(is.null(Psi)){return(epsilon)}
+    n.endogenous <- length(endogenous)
+
+    epsilon.adj <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
+                          dimnames = list(NULL, endogenous))
+    n.pattern <- NROW(unique.pattern)
+    
+    for(iP in 1:n.pattern){ ## iP <- 1 
+        iIndex <- missing.pattern[[iP]]
+        iY <- which(unique.pattern[iP,]==1)
+        
+        iOmega <- Omega[iY,iY,drop=FALSE]
+        iPsi <- Psi[iY,iY,drop=FALSE]
+
+        iOmega.chol <- matrixPower(iOmega, symmetric = TRUE, power = 1/2)
+        iH <- iOmega %*% iOmega - iOmega.chol %*% iPsi %*% iOmega.chol
+        iHM1 <- tryCatch(matrixPower(iH, symmetric = TRUE, power = -1/2), warning = function(w){w})
+        if(inherits(iHM1,"warning")){
+            stop("Cannot compute the adjusted residuals \n",
+                 "Estimated bias too large compared to the estimated variance-covariance matrix \n",
+                 "Consider setting argument \'adjust.n\' to FALSE when calling sCorrect \n")
+        }
+        epsilon.adj[iIndex,iY] <- epsilon[iIndex,iY,drop=FALSE] %*% iOmega.chol %*% iHM1 %*% iOmega.chol
+    }
+
+    return(epsilon.adj)
+}
+
+
+
+######################################################################
+### sCorrect-residuals2.R ends here
diff --git a/R/sCorrect-score2.R b/R/sCorrect-score2.R
new file mode 100644
index 0000000..568ccf6
--- /dev/null
+++ b/R/sCorrect-score2.R
@@ -0,0 +1,191 @@
+### score2.R --- 
+#----------------------------------------------------------------------
+## author: Brice Ozenne
+## created: okt 12 2017 (16:43) 
+## Version: 
+## last-updated: Jan 17 2022 (23:21) 
+##           By: Brice Ozenne
+##     Update #: 2407
+#----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+#----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - score2
+#' @title  Score With Small Sample Correction
+#' @description  Extract the (individual) score a the latent variable model.
+#' Similar to \code{lava::score} but with small sample correction.
+#' @name score2
+#'
+#' @param object,x a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param indiv [logical] If \code{TRUE}, the score relative to each observation is returned. Otherwise the total score is returned.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#'
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#'
+#' @return When argument indiv is \code{TRUE}, a matrix containing the score relative to each sample (in rows)
+#' and each model coefficient (in columns). Otherwise a numeric vector of length the number of model coefficients.
+#' 
+#' @examples
+#' #### simulate data ####
+#' n <- 5e1
+#' p <- 3
+#' X.name <- paste0("X",1:p)
+#' link.lvm <- paste0("Y~",X.name)
+#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
+#'
+#' m <- lvm(formula.lvm)
+#' distribution(m,~Id) <- Sequence.lvm(0)
+#' set.seed(10)
+#' d <- lava::sim(m,n)
+#'
+#' #### linear models ####
+#' e.lm <- lm(Y~X1+X2+X3, data = d)
+#' 
+#' #### latent variable models ####
+#' m.lvm <- lvm(formula.lvm)
+#' e.lvm <- estimate(m.lvm,data=d)
+#' e2.lvm <- estimate2(m.lvm,data=d)
+#' score.tempo <- score(e2.lvm, indiv = TRUE)
+#' colSums(score.tempo)
+#'
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`score2` <-
+  function(object, indiv, cluster, as.lava, ...) UseMethod("score2")
+
+## * score2.lvmfit
+#' @rdname score2
+#' @export
+score2.lvmfit <- function(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ssc = lava.options()$ssc, ...){
+
+    return(lava::score(estimate2(object, ssc = ssc, ...), indiv = indiv, cluster = cluster, as.lava = as.lava))
+
+}
+
+## * score2.lvmfit2
+#' @rdname score2
+#' @export
+score2.lvmfit2 <- function(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...){
+    
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+    ## ** define cluster
+    if(length(cluster) == 1 && (is.numeric(cluster) || is.character(cluster) || is.factor(cluster))){
+        data <- object$sCorrect$data
+        if(length(cluster)==1){                
+            if(cluster %in% names(data) == FALSE){
+                stop("Invalid \'cluster\' argument \n",
+                     "Could not find variable \"",cluster,"\" in argument \'data\' \n")
+            }
+            cluster <- data[[cluster]]
+        }
+        cluster.index <- as.numeric(factor(cluster, levels = unique(cluster)))            
+        n.cluster <- length(unique(cluster.index))
+    }else if(is.vector(cluster)){
+        cluster.index <- as.numeric(factor(cluster, levels = unique(cluster)))
+        n.cluster <- length(unique(cluster.index))
+    }else if(is.null(cluster)){ ## NOTE: cluster is a function in the survival package
+        cluster <- NULL
+        n.cluster <- object$sCorrect$cluster$n.cluster
+        cluster.index <- 1:n.cluster
+    }else{
+        stop("Do not know how to handle argument cluster of class ",class(cluster),"\n")
+    }
+
+    ## ** get score
+    score <- object$sCorrect$score
+    if(!is.null(cluster)){ ## aggregate score by cluster
+        score <- rowsum(score, group = cluster.index, reorder = FALSE)
+    }
+    
+    ## ** export
+    score <- score[,names(object$sCorrect$skeleton$originalLink2param),drop=FALSE]
+    if(as.lava==FALSE){
+        colnames(score) <- as.character(object$sCorrect$skeleton$originalLink2param)
+    }
+    if(!is.null(cluster)){
+        index2.cluster <- tapply(1:length(cluster),cluster,list)
+        attr(score,"cluster") <- names(index2.cluster)
+    }
+    
+    if(indiv){
+        return(score)
+    }else{
+        return(colSums(score))
+    }
+}
+
+## * score.lvmfit2
+#' @rdname score2
+#' @export
+score.lvmfit2 <- function(x, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...){## necessary as first argument of score must be x 
+    score2(x, indiv = indiv, cluster = cluster, as.lava = as.lava, ...)
+}
+
+## * .score2
+#' @title Compute the Corrected Score.
+#' @description Compute the corrected score.
+#' @name score2-internal
+#' 
+#' @param n.cluster [integer >0] the number of observations.
+#' 
+#' @keywords internal
+.score2 <- function(dmu, dOmega, epsilon, OmegaM1,
+                    missing.pattern, unique.pattern, name.pattern,
+                    name.param, name.meanparam, name.varparam,
+                    n.cluster, weights){
+    if(lava.options()$debug){cat(".score2\n")}
+
+    ## ** Prepare
+    out.score <- matrix(NA, nrow = n.cluster, ncol = length(name.param),
+                        dimnames = list(NULL,name.param))
+    n.pattern <- length(name.pattern)
+    
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        iEpsilon.OmegaM1 <- epsilon[iIndex,iY,drop=FALSE] %*% iOmegaM1
+        out.score[iIndex,] <- 0 ## initialize (keep NA for missing values)
+
+        ## *** Compute score relative to the mean coefficients
+        for(iP in name.meanparam){ # iP <- "Y3~eta"
+            out.score[iIndex,iP] <- out.score[iIndex,iP] + rowSums(dmu[[iP]][iIndex,iY,drop=FALSE] * iEpsilon.OmegaM1)
+        }
+        
+        ## *** Compute score relative to the variance-covariance coefficients
+        for(iP in name.varparam){ # iP <- "eta~~eta"
+            term2 <- - 1/2 * tr(iOmegaM1 %*% dOmega[[iP]][iY,iY,drop=FALSE])            
+            term3 <- 1/2 * rowSums(iEpsilon.OmegaM1 %*% dOmega[[iP]][iY,iY,drop=FALSE] * iEpsilon.OmegaM1)
+            out.score[iIndex,iP] <- out.score[iIndex,iP] + as.double(term2) + term3
+        }        
+    }
+
+    ## ** export
+    if(!is.null(weights)){
+        out.score <- sweep(out.score, STATS = weights, MARGIN = 1, FUN = "*")
+    }
+    return(out.score)
+}
+
+
+#----------------------------------------------------------------------
+### score2.R ends her
diff --git a/R/sCorrect-skeleton.R b/R/sCorrect-skeleton.R
new file mode 100644
index 0000000..747d249
--- /dev/null
+++ b/R/sCorrect-skeleton.R
@@ -0,0 +1,759 @@
+### sCorrect-skeleton.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov  8 2017 (10:35) 
+## Version: 
+## Last-Updated: jan 17 2022 (14:44) 
+##           By: Brice Ozenne
+##     Update #: 1697
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - skeleton
+#' @title Pre-computation for the Score
+#' @description Pre-compute quantities that are necessary to compute the score of a lvm model.
+#' @name skeleton
+#' 
+#' @param object a \code{lvm} object.
+#' @param X [matrix] design matrix containing the covariates for each endogeneous and latent variable.
+#' @param endogeneous [character vector] the name of the endogeneous variables.
+#' @param latent [character vector] the name of the latent variables.
+#' @param ... [internal] only used by the generic method.
+#' 
+#' @details
+#' When the user specifies names for the coefficients (e.g. Y1[mu:sigma]) or uses constraints (Y1~beta*X1), \code{as.lava=FALSE} will use the names specified by the user (e.g. mu, sigma, beta)
+#' while \code{as.lava=TRUE} will use the name of the first link defining the coefficient.
+#'
+#' @examples
+#' \dontrun{
+#' skeleton <- lavaSearch2::skeleton
+#' skeleton.lvm <- lavaSearch2::skeleton.lvm
+#' skeleton.lvmfit <- lavaSearch2::skeleton.lvmfit
+#' 
+#' ## without constrain
+#' m <- lvm(Y1~X1+X2+eta,Y2~X3+eta,Y3~eta)
+#' latent(m) <- ~eta
+#' 
+#' e <- estimate(m, lava::sim(m,1e2))
+#' M.data <- as.matrix(model.frame(e))
+#'
+#' skeleton(e$model, as.lava = TRUE,
+#'          name.endogenous = endogenous(e), n.endogenous = 3,
+#'          name.latent = latent(e), 
+#'          update.value = FALSE)
+#' skeleton(e, data = M.data, p = pars(e), as.lava = TRUE,
+#'          name.endogenous = endogenous(e), n.endogenous = 3,
+#'          name.latent = latent(e), 
+#'          update.value = TRUE)
+#'
+#' ## with constrains
+#' m <- lvm(Y[mu:sigma] ~ beta*X1+X2)
+#' e <- estimate(m, lava::sim(m,1e2))
+#' M.data <- as.matrix(model.frame(e))
+#'
+#' skeleton(e$model, as.lava = TRUE,
+#'          name.endogenous = "Y", n.endogenous = 1,
+#'          name.latent = NULL, 
+#'          update.value = FALSE)$skeleton
+#' 
+#' skeleton(e, data = M.data, p = pars(e), as.lava = FALSE,
+#'          name.endogenous = "Y", n.endogenous = 1,
+#'          name.latent = NULL, 
+#'          update.value = FALSE)$skeleton
+#' 
+#'}
+#' @concept small sample inference
+#' @concept derivative of the score equation
+#' @keywords internal
+
+
+## * skeleton
+#' @rdname skeleton
+#' @export
+skeleton <- function(object, X,
+                     endogenous, latent,
+                     n.cluster, index.Omega){
+    if(lava.options()$debug){cat("skeleton \n")}
+
+    n.endogenous <- length(endogenous)
+    n.latent <- length(latent)
+    n.obs <- NROW(X)
+    obsByEndoInX <- tapply(1:n.obs,X$XXendogenousXX,list)
+
+    ## ** extract table type
+    type <- coefType(object, as.lava = FALSE)
+    type.theta <- type[type$marginal == FALSE,,drop=FALSE]
+    theta.value <- list()
+    theta.param <- list()
+
+    ## ** prepare value and param
+    if("nu" %in% type.theta$detail){
+        type.nu <- type.theta[type.theta$detail %in% "nu",,drop=FALSE]
+
+        theta.value$nu <- stats::setNames(rep(NA, n.endogenous), endogenous)
+        if(any(!is.na(type.nu$value))){
+            theta.value$nu[type.nu[!is.na(type.nu$value),"Y"]] <- type.nu[!is.na(type.nu$value),"value"]
+        }
+
+        theta.param$nu <- stats::setNames(rep(as.character(NA), n.endogenous), endogenous)
+        if(any(!is.na(type.nu$param))){
+            theta.param$nu[type.nu[!is.na(type.nu$param),"Y"]] <- type.nu[!is.na(type.nu$param),"param"]
+        }
+        theta.param$Xnu <- matrix(NA,
+                                  nrow = n.cluster, ncol = n.endogenous, byrow = TRUE,
+                                  dimnames = list(NULL,endogenous))
+        for(iC in 1:n.cluster){
+            theta.param$Xnu[iC,index.Omega[[iC]]] <- 1
+        }        
+    }
+
+    if("alpha" %in% type.theta$detail){
+        type.alpha <- type.theta[type.theta$detail %in% "alpha",,drop=FALSE]
+        theta.value$alpha <- stats::setNames(rep(NA, n.latent), latent)
+        theta.param$alpha <- stats::setNames(rep(as.character(NA), n.latent), latent)
+        theta.param$Xalpha <- rep(1, times = n.cluster)
+        
+        if(any(!is.na(type.alpha$value))){
+            theta.value$alpha[type.alpha[!is.na(type.alpha$value),"Y"]] <- type.alpha[!is.na(type.alpha$value),"value"]
+        }
+        if(any(!is.na(type.alpha$param))){
+            theta.param$alpha[type.alpha[!is.na(type.alpha$param),"Y"]] <- type.alpha[!is.na(type.alpha$param),"param"]
+        }
+    }
+
+    if("K" %in% type.theta$detail){
+        type.K <- type.theta[type.theta$detail %in% "K",,drop=FALSE]
+        K.exogenous <- unique(type.K$X)
+        
+        theta.value$K <- matrix(0, nrow = length(K.exogenous), ncol = n.endogenous,
+                                dimnames = list(K.exogenous, endogenous))
+        theta.param$K <- matrix(as.character(NA), nrow = length(K.exogenous), ncol = n.endogenous,
+                                dimnames = list(K.exogenous, endogenous))
+
+        theta.param$XK <- lapply(endogenous, function(iE){ ## iE <- endogenous[1]
+            iXK <- matrix(NA, nrow = n.cluster, ncol = length(K.exogenous),
+                          dimnames = list(NULL, K.exogenous))
+            iXK[X[obsByEndoInX[[iE]],"XXclusterXX"],] <- as.matrix(X[obsByEndoInX[[iE]],K.exogenous,drop=FALSE])
+            return(iXK)
+        })
+        names(theta.param$XK) <- endogenous
+        
+        for(iK in 1:NROW(type.K)){ ## iK <- 1
+            theta.value$K[type.K[iK,"X"],type.K[iK,"Y"]] <- type.K[iK,"value"]
+            theta.param$K[type.K[iK,"X"],type.K[iK,"Y"]] <- type.K[iK,"param"]
+        }
+    }
+
+    if("Gamma" %in% type.theta$detail){
+        type.Gamma <- type.theta[type.theta$detail %in% "Gamma",,drop=FALSE]
+        Gamma.exogenous <- unique(type.Gamma$X)
+
+        theta.value$Gamma <- matrix(0, nrow = length(Gamma.exogenous), ncol = n.latent,
+                                    dimnames = list(Gamma.exogenous, latent))
+        theta.param$Gamma <- matrix(as.character(NA), nrow = length(Gamma.exogenous), ncol = n.latent,
+                                    dimnames = list(Gamma.exogenous, latent))
+        theta.param$XGamma <- lapply(obsByEndoInX[latent], function(iIndex){as.matrix(X[iIndex,Gamma.exogenous,drop=FALSE])})
+        for(iGamma in 1:NROW(type.Gamma)){ ## iGamma <- 1
+            theta.value$Gamma[type.Gamma[iGamma,"X"],type.Gamma[iGamma,"Y"]] <- type.Gamma[iGamma,"value"]
+            theta.param$Gamma[type.Gamma[iGamma,"X"],type.Gamma[iGamma,"Y"]] <- type.Gamma[iGamma,"param"]
+        }
+    }
+
+    if("Lambda" %in% type.theta$detail){
+        type.Lambda <- type.theta[type.theta$detail %in% "Lambda",,drop=FALSE]
+        
+        theta.value$Lambda <- matrix(0, nrow = n.latent, ncol = n.endogenous,
+                                     dimnames = list(latent, endogenous))
+        theta.param$Lambda <- matrix(as.character(NA), nrow = n.latent, ncol = n.endogenous,
+                                     dimnames = list(latent, endogenous))
+
+        for(iLambda in 1:NROW(type.Lambda)){ ## iLambda <- 1
+            theta.value$Lambda[type.Lambda[iLambda,"X"],type.Lambda[iLambda,"Y"]] <- type.Lambda[iLambda,"value"]
+            theta.param$Lambda[type.Lambda[iLambda,"X"],type.Lambda[iLambda,"Y"]] <- type.Lambda[iLambda,"param"]
+        }
+    }
+
+    if("B" %in% type.theta$detail){
+        type.B <- type.theta[type.theta$detail %in% "B",,drop=FALSE]
+        
+        theta.value$B <- matrix(0, nrow = n.latent, ncol = n.latent,
+                                     dimnames = list(latent, latent))
+        theta.param$B <- matrix(as.character(NA), nrow = n.latent, ncol = n.latent,
+                                     dimnames = list(latent, latent))
+
+        for(iB in 1:NROW(type.B)){ ## iB <- 1
+            theta.value$B[type.B[iB,"X"],type.B[iB,"Y"]] <- type.B[iB,"value"]
+            theta.param$B[type.B[iB,"X"],type.B[iB,"Y"]] <- type.B[iB,"param"]
+        }
+    }
+
+    if(any(c("Sigma_var", "Sigma_cov") %in% type.theta$detail)){
+        type.Sigma_var <- type.theta[type.theta$detail %in% "Sigma_var",,drop=FALSE]
+        type.Sigma_cov <- type.theta[type.theta$detail %in% "Sigma_cov",,drop=FALSE]
+
+        theta.value$Sigma <- matrix(0, nrow = n.endogenous, ncol = n.endogenous,
+                                     dimnames = list(endogenous, endogenous))
+        theta.param$Sigma <- matrix(as.character(NA), nrow = n.endogenous, ncol = n.endogenous,
+                                     dimnames = list(endogenous, endogenous))
+
+        if(NROW(type.Sigma_var)>0){
+            for(iSigma_var in 1:NROW(type.Sigma_var)){ ## iSigma_var <- 1
+                theta.value$Sigma[type.Sigma_var[iSigma_var,"X"],type.Sigma_var[iSigma_var,"Y"]] <- type.Sigma_var[iSigma_var,"value"]
+                theta.param$Sigma[type.Sigma_var[iSigma_var,"X"],type.Sigma_var[iSigma_var,"Y"]] <- type.Sigma_var[iSigma_var,"param"]
+            }
+        }
+        if(NROW(type.Sigma_cov)>0){
+            for(iSigma_cov in 1:NROW(type.Sigma_cov)){ ## iSigma_cov <- 1
+                theta.value$Sigma[type.Sigma_cov[iSigma_cov,"X"],type.Sigma_cov[iSigma_cov,"Y"]] <- type.Sigma_cov[iSigma_cov,"value"]
+                theta.value$Sigma[type.Sigma_cov[iSigma_cov,"Y"],type.Sigma_cov[iSigma_cov,"X"]] <- type.Sigma_cov[iSigma_cov,"value"]
+                theta.param$Sigma[type.Sigma_cov[iSigma_cov,"X"],type.Sigma_cov[iSigma_cov,"Y"]] <- type.Sigma_cov[iSigma_cov,"param"]
+                theta.param$Sigma[type.Sigma_cov[iSigma_cov,"Y"],type.Sigma_cov[iSigma_cov,"X"]] <- type.Sigma_cov[iSigma_cov,"param"]
+            }
+        }
+    }
+    
+    if(any(c("Psi_var", "Psi_cov") %in% type.theta$detail)){
+        type.Psi_var <- type.theta[type.theta$detail %in% "Psi_var",,drop=FALSE]
+        type.Psi_cov <- type.theta[type.theta$detail %in% "Psi_cov",,drop=FALSE]
+        
+        theta.value$Psi <- matrix(0, nrow = n.latent, ncol = n.latent,
+                                     dimnames = list(latent, latent))
+        theta.param$Psi <- matrix(as.character(NA), nrow = n.latent, ncol = n.latent,
+                                     dimnames = list(latent, latent))
+
+        if(NROW(type.Psi_var)>0){
+            for(iPsi_var in 1:NROW(type.Psi_var)){ ## iPsi_var <- 1
+                theta.value$Psi[type.Psi_var[iPsi_var,"X"],type.Psi_var[iPsi_var,"Y"]] <- type.Psi_var[iPsi_var,"value"]
+                theta.param$Psi[type.Psi_var[iPsi_var,"X"],type.Psi_var[iPsi_var,"Y"]] <- type.Psi_var[iPsi_var,"param"]
+            }
+        }
+        if(NROW(type.Psi_cov)>0){
+            for(iPsi_cov in 1:NROW(type.Psi_cov)){ ## iPsi_cov <- 1
+                theta.value$Psi[type.Psi_cov[iPsi_cov,"X"],type.Psi_cov[iPsi_cov,"Y"]] <- type.Psi_cov[iPsi_cov,"value"]
+                theta.value$Psi[type.Psi_cov[iPsi_cov,"Y"],type.Psi_cov[iPsi_cov,"X"]] <- type.Psi_cov[iPsi_cov,"value"]
+                theta.param$Psi[type.Psi_cov[iPsi_cov,"X"],type.Psi_cov[iPsi_cov,"Y"]] <- type.Psi_cov[iPsi_cov,"param"]
+                theta.param$Psi[type.Psi_cov[iPsi_cov,"Y"],type.Psi_cov[iPsi_cov,"X"]] <- type.Psi_cov[iPsi_cov,"param"]
+            }
+        }
+    }
+
+    ## ** original link
+    type.originalLink <- type[!is.na(type$originalLink),,drop=FALSE]
+    originalLink2param <- stats::setNames(type.originalLink$param,type.originalLink$originalLink)
+    if(inherits(object,"lvm")){
+        originalLink2param <- originalLink2param[coef(object)]
+    }else if(inherits(object,"lvmfit")){
+        originalLink2param <- originalLink2param[names(coef(object))]
+    }
+    
+    ## ** type of parameters
+    type.param <- type[!is.na(type$param),,drop=FALSE]
+    type.mean <- c("nu","alpha","K","Gamma","Lambda","B")
+    type.var <- c("Lambda","B","Sigma_var","Sigma_cov","Psi_var","Psi_cov")
+
+    Uparam <- as.character(originalLink2param)
+    Uparam.mean <- unique(type.param[type.param$detail %in% type.mean,"param"])
+    Uparam.variance <- unique(type.param[type.param$detail %in% type.var,"param"])
+    Uparam.hybrid <- intersect(Uparam.mean,Uparam.variance)
+    
+    ## ** to update
+    toUpdate <- c("nu" = "nu" %in% type.param$detail,
+                  "K" = "K" %in% type.param$detail,
+                  "Lambda" = "Lambda" %in% type.param$detail,
+                  "Sigma" = ("Sigma_cov" %in% type.param$detail) || ("Sigma_var" %in% type.param$detail),
+                  "alpha" = "alpha" %in% type.param$detail,
+                  "Gamma" = "Gamma" %in% type.param$detail,
+                  "B" = "B" %in% type.param$detail,
+                  "Psi" = ("Psi_cov" %in% type.param$detail) || ("Psi_var" %in% type.param$detail)
+                  )    
+
+    ## ** residuals
+    theta.param$endogenous <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
+                                    dimnames = list(NULL, endogenous))
+    for(iEndo in 1:length(endogenous)){ ## iEndo <- 1
+        iIndexLong <- obsByEndoInX[[endogenous[iEndo]]]
+        iIndexWide <- X[iIndexLong,"XXclusterXX"]
+        theta.param$endogenous[iIndexWide,endogenous[iEndo]] <- X[iIndexLong,"XXvalueXX"]
+    }
+
+    ## ** export
+    return(list(param = theta.param,
+                value = theta.value,
+                type = type,
+                Uparam = Uparam,
+                Uparam.mean = Uparam.mean,
+                Uparam.variance = Uparam.variance,
+                Uparam.hybrid = Uparam.hybrid,
+                toUpdate.moment = toUpdate,
+                originalLink2param = originalLink2param,
+                obsByEndoInX = obsByEndoInX)
+           )
+}
+
+## * skeletonDtheta
+#' @rdname skeleton
+skeletonDtheta <- function(object, X,
+                           endogenous, latent,
+                           missing.pattern, unique.pattern, name.pattern,
+                           n.cluster, index.Omega){
+    if(lava.options()$debug){cat("skeletonDtheta \n")}
+
+    n.endogenous <- length(endogenous)
+    n.latent <- length(latent)
+    type <- object$type
+
+    type <- type[!is.na(type$param),]
+    name.param <- unique(type$param)
+    n.param <- length(name.param)
+
+    ## ** Compute partial derivative with respect to the matrices of parameters
+    dOmega.dparam <- list() ## derivative regarding the variance
+    dmat.dparam <- list() ## derivative regarding the matrix of parameters
+
+    if("nu" %in% type$detail){
+        type.nu <- type[type$detail == "nu",]
+        Utype.nu <- unique(type.nu$param)
+        nUtype.nu <- length(Utype.nu)
+
+        dmat.dparam$nu <- stats::setNames(vector(mode = "list", length = nUtype.nu), Utype.nu)
+        
+        for(iNu in Utype.nu){ ## iNu <- Utype.nu[1]
+            dnu.dparam <- as.numeric(endogenous %in% type.nu[type.nu$param==iNu,"Y"])
+
+            dmat.dparam$nu[[iNu]] <- matrix(NA,
+                                            nrow = n.cluster, ncol = n.endogenous, byrow = TRUE,
+                                            dimnames = list(NULL,endogenous))
+            for(iP in name.pattern){ ## iP <- name.pattern[1]
+                iIndex <- missing.pattern[[iP]]
+                iY <- which(unique.pattern[iP,]==1)
+                dmat.dparam$nu[[iNu]][iIndex,iY] <- rep(1, times = length(iIndex)) %o% dnu.dparam[iY]
+            }
+        }
+    }
+
+    if("alpha" %in% type$detail){
+        type.alpha <- type[type$detail == "alpha",]
+        Utype.alpha <- unique(type.alpha$param)
+        nUtype.alpha <- length(Utype.alpha)
+        dmat.dparam$alpha <- stats::setNames(vector(mode = "list", length = nUtype.alpha), Utype.alpha)
+
+        for(iAlpha in Utype.alpha){ ## iAlpha <- Utype.alpha[1]
+            dmat.dparam$alpha[[iAlpha]] <- matrix(as.numeric(latent %in% type.alpha[type.alpha$param==iAlpha,"Y"]),
+                                                  nrow = n.cluster, ncol = n.latent, byrow = TRUE,
+                                                  dimnames = list(NULL,latent))
+        }
+    }
+
+    if("K" %in% type$detail){
+        type.K <- type[type$detail == "K",]
+        Utype.K <- unique(type.K$param)
+        nUtype.K <- length(Utype.K)
+
+        for(iK in Utype.K){ ## iK <- Utype.K[1]
+            iType.K <- type.K[type.K$param==iK,]
+            dmat.dparam$K[[iK]] <- matrix(0,
+                                          nrow = n.cluster, ncol = n.endogenous,
+                                          dimnames = list(NULL,endogenous))
+
+            for(iX in 1:NROW(iType.K)){ ## iY <- 5
+                iY <- match(iType.K$Y[iX],endogenous)
+                iIndex <- which(X$XXendogenousXX==endogenous[iY])
+                dmat.dparam$K[[iK]][X[iIndex,"XXclusterXX"],iY] <- dmat.dparam$K[[iK]][X[iIndex,"XXclusterXX"],iY] + X[X$XXendogenousXX==endogenous[iY],iType.K$X[iX]]
+            }
+        }
+    }
+
+    if("Gamma" %in% type$detail){
+        type.Gamma <- type[type$detail == "Gamma",]
+        Utype.Gamma <- unique(type.Gamma$param)
+        nUtype.Gamma <- length(Utype.Gamma)
+        dmat.dparam$Gamma <- stats::setNames(vector(mode = "list", length = nUtype.Gamma), Utype.Gamma)
+
+        for(iGamma in Utype.Gamma){ ## iGamma <- Utype.Gamma[1]
+            iType.Gamma <- type.Gamma[type.Gamma$param==iGamma,]
+            dmat.dparam$Gamma[[iGamma]] <- matrix(0,
+                                                  nrow = n.cluster, ncol = n.latent,
+                                                  dimnames = list(NULL,latent))
+
+            for(iX in 1:NROW(iType.Gamma)){ ## iLatent <- 5
+                iLatent <- match(iType.Gamma$Y[iX],latent)
+                dmat.dparam$Gamma[[iGamma]][,iLatent] <- dmat.dparam$Gamma[[iGamma]][,iLatent] + X[X$XXendogenousXX==latent[iLatent],iType.Gamma$X[iX]]
+            }
+        }
+    }
+
+    if("Lambda" %in% type$detail){
+        type.Lambda <- type[type$detail == "Lambda",]
+        Utype.Lambda <- unique(type.Lambda$param)
+        nUtype.Lambda <- length(Utype.Lambda)
+        dmat.dparam$Lambda <- stats::setNames(vector(mode = "list", length = nUtype.Lambda), Utype.Lambda)
+
+        for(iLambda in Utype.Lambda){ ## iLambda <- Utype.Lambda[1]
+            dmat.dparam$Lambda[[iLambda]] <- matrix(as.double(object$param$Lambda %in% iLambda),
+                                                    nrow = n.latent, ncol = n.endogenous,
+                                                    dimnames = list(latent,endogenous))
+        }
+    }
+
+    if("B" %in% type$detail){
+        type.B <- type[type$detail == "B",]
+        Utype.B <- unique(type.B$param)
+        nUtype.B <- length(Utype.B)
+        dmat.dparam$B <- stats::setNames(vector(mode = "list", length = nUtype.B), Utype.B)
+
+        for(iB in Utype.B){ ## iB <- Utype.B[1]
+            dmat.dparam$B[[iB]] <- matrix(as.double(object$param$B %in% iB),
+                                          nrow = n.latent, ncol = n.latent,
+                                          dimnames = list(latent,latent))
+        }
+    }
+
+    if(any(c("Sigma_var","Sigma_cov") %in% type$detail)){
+        type.Sigma <- type[type$detail %in% c("Sigma_var","Sigma_cov"),]
+        Utype.Sigma <- unique(type.Sigma$param)
+        nUtype.Sigma <- length(Utype.Sigma)
+        dmat.dparam$Sigma <- stats::setNames(vector(mode = "list", length = nUtype.Sigma), Utype.Sigma)
+
+        for(iSigma in Utype.Sigma){ ## iSigma <- Utype.Sigma[1]
+            dmat.dparam$Sigma[[iSigma]] <- matrix(as.double(object$param$Sigma %in% iSigma),
+                                                  nrow = n.endogenous, ncol = n.endogenous,
+                                                  dimnames = list(endogenous,endogenous))
+        }
+    }
+
+    if(any(c("Psi_var","Psi_cov") %in% type$detail)){
+        type.Psi <- type[type$detail %in% c("Psi_var","Psi_cov"),]
+        Utype.Psi <- unique(type.Psi$param)
+        nUtype.Psi <- length(Utype.Psi)
+        dmat.dparam$Psi <- stats::setNames(vector(mode = "list", length = nUtype.Psi), Utype.Psi)
+
+        for(iPsi in Utype.Psi){ ## iB <- Utype.B[1]
+            dmat.dparam$Psi[[iPsi]] <- matrix(as.double(object$param$Psi %in% iPsi),
+                                              nrow = n.latent, ncol = n.latent,
+                                              dimnames = list(latent,latent))
+        }
+    }
+
+    ## ** Store derivative with respect to the mean/variance
+    if(length(dmat.dparam$nu)+length(dmat.dparam$K) > 0){
+        dmu.dparam <- c(dmat.dparam$nu, dmat.dparam$K)
+    }else{
+        dmu.dparam <- list()
+    }
+
+    if(length(dmat.dparam$Sigma) > 0){
+        dOmega.dparam <- dmat.dparam$Sigma
+    }else{
+        dOmega.dparam <- list()
+    }
+
+    ## ** pairs of parameters to be considered
+    grid.param <- list(mean = .combination(object$Uparam.mean, object$Uparam.mean),
+                       var = .combination(object$Uparam.var, object$Uparam.var),
+                       hybrid = .combination(object$Uparam.mean, object$Uparam.var))
+
+    ## ** export
+    return(c(object,
+             list(
+                 dmu.dparam = dmu.dparam,
+                 dOmega.dparam = dOmega.dparam,
+                 dmat.dparam = dmat.dparam,
+                 grid.dmoment = grid.param
+                 ))
+           )
+}
+
+## * skeletonDtheta2
+#' @rdname skeleton
+skeletonDtheta2 <- function(object){
+    if(lava.options()$debug){cat("skeletonDtheta2 \n")}
+
+    type.param <- object$type[!is.na(object$type$param),,drop=FALSE]
+    grid.param <- list()
+    
+    ## ** identify all combinations of coefficients with second derivative
+    grid.param$mean <- list()
+
+    grid.param$mean$alpha.B <- .combinationDF(type.param,
+                                        detail1 = "alpha", name1 = "alpha",
+                                        detail2 = "B", name2 = "B")
+
+    grid.param$mean$alpha.Lambda <- .combinationDF(type.param,
+                                                 detail1 = "alpha", name1 = "alpha",
+                                                 detail2 = "Lambda", name2 = "Lambda")
+
+    grid.param$mean$Gamma.B <- .combinationDF(type.param,
+                                        detail1 = "Gamma", name1 = "Gamma",
+                                        detail2 = "B", name2 = "B")
+
+    grid.param$mean$Gamma.Lambda <- .combinationDF(type.param,
+                                             detail1 = "Gamma", name1 = "Gamma",
+                                             detail2 = "Lambda", name2 = "Lambda")
+    
+    grid.param$mean$Lambda.B <- .combinationDF(type.param,
+                                        detail1 = "Lambda", name1 = "Lambda",
+                                        detail2 = "B", name2 = "B")
+
+    grid.param$mean$B.B <- .combinationDF(type.param,
+                                    detail1 = "B", name1 = "B1",
+                                    detail2 = "B", name2 = "B2")
+
+    grid.param$var <- list()
+    
+    grid.param$var$Psi.Lambda <- .combinationDF(type.param,
+                                           detail1 = c("Psi_var","Psi_cov"), name1 = "Psi",
+                                           detail2 = "Lambda", name2 = "Lambda")
+
+    grid.param$var$Psi.B <- .combinationDF(type.param,
+                                      detail1 = c("Psi_var","Psi_cov"), name1 = "Psi",
+                                      detail2 = "B", name2 = "B")
+
+    grid.param$var$Lambda.B <- .combinationDF(type.param,
+                                             detail1 = "Lambda", name1 = "Lambda",
+                                             detail2 = "B", name2 = "B")
+
+    grid.param$var$Lambda.Lambda <- .combinationDF(type.param,
+                                              detail1 = "Lambda", name1 = "Lambda1",
+                                              detail2 = "Lambda", name2 = "Lambda2")
+
+    grid.param$var$B.B <- .combinationDF(type.param,
+                                        detail1 = "B", name1 = "B1",
+                                        detail2 = "B", name2 = "B2")
+
+    
+    grid.param$mean <- grid.param$mean[lengths(grid.param$mean)>0]
+    grid.param$var <- grid.param$var[lengths(grid.param$var)>0]
+    
+    ## ** create d2mu and d2Omega
+    if(length(grid.param$n.mean)>0){
+        grid.tempo <- lapply(grid.param$mean, function(x){
+            if(NROW(x)>0){
+                colnames(x) <- c("x","y")
+            }
+            return(x)
+        })
+        collapseGrid <- do.call(rbind, grid.tempo)
+        name.tempo <- as.character(unique(collapseGrid[[1]]))
+        d2mu <- lapply(name.tempo, function(x){
+            iIndex <- which(collapseGrid[[1]]==x)
+            v <- vector(mode = "list", length(iIndex))
+            names(v) <- collapseGrid[[2]][iIndex]
+            return(v)
+        })
+        names(d2mu) <- name.tempo
+    }else{
+        d2mu <- list()
+    }
+
+    if(length(grid.param$var)>0){
+        grid.tempo <- lapply(grid.param$var, function(x){
+            if(NROW(x)>0){
+                colnames(x) <- c("x","y")
+            }
+            return(x)
+        })
+        collapseGrid <- do.call(rbind, grid.tempo)
+        name.tempo <- as.character(unique(collapseGrid[[1]]))
+        d2Omega <- lapply(name.tempo, function(x){
+            iIndex <- which(collapseGrid[[1]]==x)
+            v <- vector(mode = "list", length(iIndex))
+            names(v) <- collapseGrid[[2]][iIndex]
+            return(v)
+        })
+        names(d2Omega) <- name.tempo
+    }else{
+        d2Omega <- list()
+    }
+
+    ## ** update grid.dmoment
+    if(NROW(object$grid.dmoment$mean)>0){
+        object$grid.dmoment$mean$d2.12 <- FALSE
+        object$grid.dmoment$mean$d2.21 <- FALSE
+    }
+    
+    if(length(grid.param$mean)>0){
+        for(iType in names(grid.param$mean)){  ## iType <- names(grid.param$mean)[1]
+            for(iRow in 1:NROW(grid.param$mean[[iType]])){ ## iRow <- 1
+                iName1 <- grid.param$mean[[iType]][iRow,1]
+                iName2 <- grid.param$mean[[iType]][iRow,2]
+                iIndex1 <- which((object$grid.dmoment$mean$Var1==iName1)*(object$grid.dmoment$mean$Var2==iName2)==1)
+                iIndex2 <- which((object$grid.dmoment$mean$Var1==iName2)*(object$grid.dmoment$mean$Var2==iName1)==1)
+                if(length(iIndex1)>0){
+                    object$grid.dmoment$mean[iIndex1,"d2.12"] <- TRUE
+                }else if(length(iIndex2)>0){
+                    object$grid.dmoment$mean[iIndex2,"d2.21"] <- TRUE
+                }
+            }
+        }
+    }
+
+    if(NROW(object$grid.dmoment$var)>0){
+        object$grid.dmoment$var$d2.12 <- FALSE
+        object$grid.dmoment$var$d2.21 <- FALSE
+    }
+    if(length(grid.param$var)>0){
+        for(iType in names(grid.param$var)){  ## iType <- names(grid.param$var)[1]
+            for(iRow in 1:NROW(grid.param$var[[iType]])){ ## iRow <- 1
+                iName1 <- grid.param$var[[iType]][iRow,1]
+                iName2 <- grid.param$var[[iType]][iRow,2]
+                iIndex1 <- which((object$grid.dmoment$var$Var1==iName1)*(object$grid.dmoment$var$Var2==iName2)==1)
+                iIndex2 <- which((object$grid.dmoment$var$Var1==iName2)*(object$grid.dmoment$var$Var2==iName1)==1)
+                if(length(iIndex1)>0){
+                    object$grid.dmoment$var[iIndex1,"d2.12"] <- TRUE
+                }else if(length(iIndex2)>0){
+                    object$grid.dmoment$var[iIndex2,"d2.21"] <- TRUE
+                }
+            }
+        }
+    }
+
+    ## ** Parameters in dInformation/JJK
+    grid.3varD1 <- .duplicatedGrid(expand.grid(X = object$Uparam.var, Y = object$Uparam.var, Z = object$Uparam.var, stringsAsFactors = FALSE))
+    
+    grid.2meanD1.1varD1 <- .duplicatedGrid(expand.grid(X = object$Uparam.mean, Y = object$Uparam.mean, Z = object$Uparam.var, stringsAsFactors = FALSE))
+
+    grid.2meanD2.1meanD1 <- .expand.grid2(X = object$Uparam.mean, Y = object$Uparam.mean, Z = object$Uparam.mean,
+                                          d2 = object$grid.dmoment$mean)
+    grid.2varD2.1varD1 <- .expand.grid2(X = object$Uparam.var, Y = object$Uparam.var, Z = object$Uparam.var,
+                                        d2 = object$grid.dmoment$var)
+
+    ## ** Export    
+    return(c(object,
+             list(d2mu.dparam = d2mu,
+                  d2Omega.dparam = d2Omega,
+                  grid.d2moment = grid.param,
+                  grid.3varD1 = grid.3varD1,
+                  grid.2meanD1.1varD1 = grid.2meanD1.1varD1,
+                  grid.2meanD2.1meanD1 = grid.2meanD2.1meanD1,
+                  grid.2varD2.1varD1 = grid.2varD2.1varD1))
+           )
+}
+
+
+
+## * helpers
+## ** .combinationDF
+.combinationDF <- function(data,
+                           detail1, detail2,
+                           name1, name2){
+
+    detail <- NULL # [:for CRAN check] subset
+    
+    if(any(detail1 %in% data$detail) && any(detail2 %in% data$detail) ){
+        ls.args <- list(subset(data, subset = detail %in% detail1, select = "param", drop = TRUE),
+                        subset(data, subset = detail %in% detail2, select = "param", drop = TRUE))
+        names(ls.args) <- c(name1,name2)
+    
+        return(do.call(.combination, args = ls.args))
+        
+    }else{
+        
+        return(numeric(0))
+        
+    }
+}
+
+## ** .expand.grid2
+.expand.grid2 <- function(X,Y,Z,d2){
+    if(NROW(d2)==0 || sum(d2$d2.12+d2$d2.21)==0){return(NULL)}
+
+    ## find available derivatives
+    index.12 <- which(d2[,"d2.12"])
+    index.21 <- which(d2[,"d2.21"])
+
+    available.d2 <- rbind(data.frame(Var1 = d2[index.12,1], Var2 = d2[index.12,2], stringsAsFactors = FALSE),
+                          data.frame(Var1 = d2[index.21,2], Var2 = d2[index.21,1], stringsAsFactors = FALSE))
+    name.available.d2 <- as.character(interaction(available.d2))
+    n.available.d2 <- length(name.available.d2)
+
+    unavailable.d2 <- data.frame(Var1 = available.d2[,2], Var2 = available.d2[,1], stringsAsFactors = FALSE)
+    name2position <- rbind(data.frame(name = name.available.d2, position = 1:n.available.d2, stringsAsFactors = FALSE),
+                           data.frame(name = as.character(interaction(unavailable.d2)), position = 1:n.available.d2, stringsAsFactors = FALSE))
+    unavailable.d2 <- unavailable.d2[as.character(interaction(unavailable.d2)) %in% name.available.d2 == FALSE,,drop=FALSE]
+    
+    ## generate grid (combination between X and YZ)
+    grid <- expand.grid(X = X, Y = Y, Z = Z, stringsAsFactors = FALSE)
+
+    ## find second order derivative
+    name.gridXY <- as.character(interaction(grid[,c("X","Y")]))
+    d2XY <- available.d2[name2position$position[match(name.gridXY, name2position$name)],]
+    names(d2XY) <- c("d2XY.Var1","d2XY.Var2")
+        
+    name.gridXZ <- as.character(interaction(grid[,c("X","Z")]))
+    d2XZ <- available.d2[name2position$position[match(name.gridXZ, name2position$name)],]
+    names(d2XZ) <- c("d2XZ.Var1","d2XZ.Var2")
+
+    name.gridYZ <- as.character(interaction(grid[,c("Y","Z")]))
+    d2YZ <- available.d2[name2position$position[match(name.gridYZ, name2position$name)],]
+    names(d2YZ) <- c("d2YZ.Var1","d2YZ.Var2")
+    
+    grid <- cbind(grid,d2XY,d2XZ,d2YZ)
+    rownames(grid) <- NULL
+    
+    ## find duplicated
+    grid <- cbind(.duplicatedGrid(grid[,c("X","Y","Z")]), grid[,-(1:3)])
+    grid$d2XY <- !is.na(grid$d2XY.Var1)
+    grid$d2XZ <- !is.na(grid$d2XZ.Var1)
+    grid$d2YZ <- !is.na(grid$d2YZ.Var1)
+    return(grid[grid$d2XY+grid$d2XZ+grid$d2YZ>0,,drop=FALSE])
+}
+
+## ** .duplicatedGrid
+.duplicatedGrid <- function(grid){
+    if(NROW(grid)==0){
+        return(NULL)
+    }else if(NROW(grid)==1){
+        grid$duplicatedXY <- FALSE
+        grid$duplicatedXZ <- FALSE
+        grid$duplicatedYZ <- FALSE
+        grid$duplicatedXYZ <- FALSE
+        return(grid)
+    }
+    grid <- as.data.frame(grid)
+    
+    ## normalize grid
+    labels <- unique(unlist(grid))
+    n.labels <- length(labels)
+    grid.num <- cbind(apply(grid, 2, match, labels), index =  1:NROW(grid))
+    p <- NCOL(grid)
+    
+    ## warper
+    warper <- function(M, M.max, p){
+        newlevel <- rep(NA, M.max)
+        newlevel[1] <- 1
+        for(iX in 2:M.max){
+            newlevel[iX] <- p * newlevel[iX-1] + 1
+        }
+        M[] <- newlevel[unlist(M)]
+        return(duplicated(rowSums(M)))
+    }
+    
+    ## find duplicated
+    duplicatedXY <- do.call(rbind,by(grid.num[,c(1:2,4),drop=FALSE], INDICES = grid.num[,3], FUN = function(iGrid){
+        return(cbind(warper(iGrid[,1:2], M.max = n.labels, p = p),iGrid[,3]))
+    }))
+    grid$duplicatedXY[duplicatedXY[,2]] <- as.logical(duplicatedXY[,1])
+
+    duplicatedXZ <- do.call(rbind,by(grid.num[,c(1,3,4),drop=FALSE], INDICES = grid.num[,2], FUN = function(iGrid){
+        return(cbind(warper(iGrid[,1:2], M.max = n.labels, p = p),iGrid[,3]))
+    }))
+    grid$duplicatedXZ[duplicatedXZ[,2]] <- as.logical(duplicatedXZ[,1])
+
+    duplicatedYZ <- do.call(rbind,by(grid.num[,c(2:4),drop=FALSE], INDICES = grid.num[,1], FUN = function(iGrid){
+        return(cbind(warper(iGrid[,1:2], M.max = n.labels, p = p),iGrid[,3]))
+    }))
+    grid$duplicatedYZ[duplicatedYZ[,2]] <- as.logical(duplicatedYZ[,1])
+
+    
+    grid$duplicatedXYZ <- warper(grid.num[,1:3,drop=FALSE], M.max = n.labels, p = p)
+
+    return(grid)
+
+}
+
diff --git a/R/sCorrect-sscCoxSnell.R b/R/sCorrect-sscCoxSnell.R
new file mode 100644
index 0000000..e738de5
--- /dev/null
+++ b/R/sCorrect-sscCoxSnell.R
@@ -0,0 +1,329 @@
+### biasCoxSnell.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: aug  2 2019 (10:20) 
+## Version: 
+## Last-Updated: Jan 11 2022 (17:36) 
+##           By: Brice Ozenne
+##     Update #: 177
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * .init_sscCoxSnell
+.init_sscCoxSnell <- function(object,...){
+    return(object,...)
+}
+
+## * .sscCoxSnell
+.sscCoxSnell <- function(object, ssc){
+    param <- ssc$param0
+    name.param <- names(param)
+    
+    ## ** compute JJK
+    object$sCorrect$skeleton$grid.2varD2.1varD1
+    JJK <- .calcJJK(object)
+
+    ## print(range(.old_calcJJK(object)-JJK))
+    ## print(sum(.old_calcJJK(object))-sum(JJK))
+
+    ## ** least squares
+    Y <- (1/2) * sapply(name.param, function(iP){sum(JJK[,,iP] * object$sCorrect$vcov.param)})
+    X <- object$sCorrect$information
+    ## information(e.lvm2)
+
+    ## dd <- as.data.frame(cbind(Y = Y,X))
+    ## names(dd) <- c("Y",paste0("X",1:9))
+    ## e.lm <- lm(Y ~ -1+X1+X2+X3+X4+X5+X6+X7+X8+X9, data = dd)
+    
+    e.lm <- stats::lm.fit(y = Y, x = X)
+    newparam <- param - e.lm$coefficient
+    ## print(e.lm$coefficient)
+    
+    ## ** export
+    attr(newparam,"JJK") <- JJK
+    attr(newparam,"lm") <- e.lm
+    return(newparam)
+
+}
+
+
+## * .calcJJK
+.calcJJK <- function(object){
+
+    ## ** extract information
+    dmu <- object$sCorrect$dmoment$dmu
+    d2mu <- object$sCorrect$d2moment$d2mu
+    dOmega <- object$sCorrect$dmoment$dOmega
+    d2Omega <- object$sCorrect$d2moment$d2Omega
+
+    missing.pattern <- object$sCorrect$missing$pattern
+    name.pattern <- object$sCorrect$missing$name.pattern
+    unique.pattern <- object$sCorrect$missing$unique.pattern
+    n.pattern <- length(name.pattern)
+    OmegaM1 <- object$sCorrect$moment$OmegaM1.missing.pattern
+    
+    name.param <- names(object$sCorrect$param)
+    n.param <- length(name.param)
+    n.cluster <- object$sCorrect$cluster$n.cluster
+    
+    grid.2meanD1.1varD1 <- object$sCorrect$skeleton$grid.2meanD1.1varD1
+    grid.2meanD2.1meanD1 <- object$sCorrect$skeleton$grid.2meanD2.1meanD1
+    grid.2varD2.1varD1 <- object$sCorrect$skeleton$grid.2varD2.1varD1
+    n.grid.2meanD1.1varD1 <- NROW(grid.2meanD1.1varD1)
+    n.grid.2meanD2.1meanD1 <- NROW(grid.2meanD2.1meanD1)
+    n.grid.2varD2.1varD1 <- NROW(grid.2varD2.1varD1)
+
+    ## ** prepare output    
+    JJK <-  array(0, dim = c(n.param,n.param,n.param),
+                  dimnames = list(name.param,name.param,name.param))
+
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        idmu <- .subsetList(dmu, indexRow = iIndex, indexCol = iY)
+        idOmega <- .subsetList(dOmega, indexRow = iY, indexCol = iY)
+        id2mu <- .subsetList2(d2mu, indexRow = iIndex, indexCol = iY)
+        id2Omega <- .subsetList2(d2Omega, indexRow = iY, indexCol = iY)
+
+        ## *** 1 second derivative and 1 first derivative regarding the variance
+        if(n.grid.2varD2.1varD1>0){
+            for(iGrid in 1:n.grid.2varD2.1varD1){ # iGrid <- 1
+                iName1 <- grid.2varD2.1varD1[iGrid,"X"]
+                iName2 <- grid.2varD2.1varD1[iGrid,"Y"]
+                iName3 <- grid.2varD2.1varD1[iGrid,"Z"]
+
+                ## term 1
+                if(grid.2varD2.1varD1[iGrid,"d2XY"]){
+                    d2.Var1 <- grid.2varD2.1varD1[iGrid,"d2XY.Var1"]
+                    d2.Var2 <- grid.2varD2.1varD1[iGrid,"d2XY.Var2"]
+                    iDiag <- diag(iOmegaM1 %*% id2Omega[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 %*% idOmega[[iName3]])
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    ## cat("a: ",iName1," ",iName2," ",iName3,"\n")
+                }
+
+                ## term 2
+                if(grid.2varD2.1varD1[iGrid,"d2XZ"]){
+                    d2.Var1 <- grid.2varD2.1varD1[iGrid,"d2XZ.Var1"]
+                    d2.Var2 <- grid.2varD2.1varD1[iGrid,"d2XZ.Var2"]
+                    iDiag <- diag(iOmegaM1 %*% id2Omega[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 %*% idOmega[[iName2]])
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    ## cat("b: ",iName1," ",iName2," ",iName3,"\n")
+                }
+
+                ## term 3
+                if(grid.2varD2.1varD1[iGrid,"d2YZ"]){
+                    d2.Var1 <- grid.2varD2.1varD1[iGrid,"d2YZ.Var1"]
+                    d2.Var2 <- grid.2varD2.1varD1[iGrid,"d2YZ.Var2"]
+                    iDiag <- diag(iOmegaM1 %*% id2Omega[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 %*% idOmega[[iName1]])
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + 1/2 * sum(iDiag * n.cluster)
+                    ## cat(iGrid,") c: ",iName1," ",iName2," ",iName3," = ", 1/2 * sum(iDiag * n.cluster),"\n")
+                }
+            }
+        }
+
+        ## *** 1 second derivative and 1 first derivative regarding the mean
+        if(n.grid.2meanD2.1meanD1>0){
+            for(iGrid in 1:n.grid.2meanD2.1meanD1){ # iGrid <- 1
+                iName1 <- grid.2meanD2.1meanD1[iGrid,"X"]
+                iName2 <- grid.2meanD2.1meanD1[iGrid,"Y"]
+                iName3 <- grid.2meanD2.1meanD1[iGrid,"Z"]
+
+                ## term 4
+                if(grid.2meanD2.1meanD1[iGrid,"d2XY"]){
+                    d2.Var1 <- grid.2meanD2.1meanD1[iGrid,"d2XY.Var1"]
+                    d2.Var2 <- grid.2meanD2.1meanD1[iGrid,"d2XY.Var2"]
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 * idmu[[iName3]])
+                }
+
+                ## term 5
+                if(grid.2meanD2.1meanD1[iGrid,"d2XZ"]){
+                    d2.Var1 <- grid.2meanD2.1meanD1[iGrid,"d2XZ.Var1"]
+                    d2.Var2 <- grid.2meanD2.1meanD1[iGrid,"d2XZ.Var2"]
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 * idmu[[iName2]])
+                }
+
+                ## term 6
+                if(grid.2meanD2.1meanD1[iGrid,"d2YZ"]){
+                    d2.Var1 <- grid.2meanD2.1meanD1[iGrid,"d2YZ.Var1"]
+                    d2.Var2 <- grid.2meanD2.1meanD1[iGrid,"d2YZ.Var2"]
+                    JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + sum(id2mu[[d2.Var1]][[d2.Var2]] %*% iOmegaM1 * idmu[[iName1]])
+                }
+            }
+        }
+        
+        ## *** 2 first derivative regarding the mean and one regarding the variance
+        if(n.grid.2meanD1.1varD1>0){
+            for(iGrid in 1:n.grid.2meanD1.1varD1){ # iGrid <- 1
+
+                ## term 7
+                iName1 <- grid.2meanD1.1varD1[iGrid,"Z"]
+                iName2 <- grid.2meanD1.1varD1[iGrid,"X"]
+                iName3 <- grid.2meanD1.1varD1[iGrid,"Y"]
+                value <- sum(idmu[[iName2]] %*% iOmegaM1 %*% idOmega[[iName1]] %*% iOmegaM1 * idmu[[iName3]])
+                JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + value
+
+                ## term 8 
+                iName1 <- grid.2meanD1.1varD1[iGrid,"X"]
+                iName2 <- grid.2meanD1.1varD1[iGrid,"Z"]
+                iName3 <- grid.2meanD1.1varD1[iGrid,"Y"]
+                JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - value
+                
+                ## term 9
+                iName1 <- grid.2meanD1.1varD1[iGrid,"X"]
+                iName2 <- grid.2meanD1.1varD1[iGrid,"Y"]
+                iName3 <- grid.2meanD1.1varD1[iGrid,"Z"]
+                JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - value
+            }
+        }
+        
+
+    }
+
+    ## sum(abs(JJK)>0)
+    return(JJK)
+}
+
+
+## * .old_calcJJK
+.old_calcJJK <- function(object){
+
+    ## ** extract information
+    dmu <- object$sCorrect$dmoment$dmu
+    d2mu <- object$sCorrect$d2moment$d2mu
+    dOmega <- object$sCorrect$dmoment$dOmega
+    d2Omega <- object$sCorrect$d2moment$d2Omega
+
+    missing.pattern <- object$sCorrect$missing$pattern
+    name.pattern <- object$sCorrect$missing$name.pattern
+    unique.pattern <- object$sCorrect$missing$unique.pattern
+    n.pattern <- length(name.pattern)
+    OmegaM1 <- object$sCorrect$moment$OmegaM1.missing.pattern
+    
+    name.param <- names(object$sCorrect$param)
+    n.param <- length(name.param)
+    n.cluster <- object$sCorrect$cluster$n.cluster
+    
+    grid.2meanD1.1varD1 <- object$sCorrect$skeleton$grid.2meanD1.1varD1
+    grid.2meanD2.1meanD1 <- object$sCorrect$skeleton$grid.2meanD2.1meanD1
+    grid.2varD2.1varD1 <- object$sCorrect$skeleton$grid.2varD2.1varD1
+    n.grid.2meanD1.1varD1 <- NROW(grid.2meanD1.1varD1)
+    n.grid.2meanD2.1meanD1 <- NROW(grid.2meanD2.1meanD1)
+    n.grid.2varD2.1varD1 <- NROW(grid.2varD2.1varD1)
+
+    ## ** prepare output    
+    JJK <-  array(0, dim = c(n.param,n.param,n.param),
+                  dimnames = list(name.param,name.param,name.param))
+
+    ## ** loop over missing data pattern
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iPattern <- name.pattern[iP]
+        iIndex <- missing.pattern[[iPattern]]
+        iY <- which(unique.pattern[iP,]==1)
+
+        iOmegaM1 <- OmegaM1[[iPattern]]
+        idmu <- .subsetList(dmu, indexRow = iIndex, indexCol = iY)
+        idOmega <- .subsetList(dOmega, indexRow = iY, indexCol = iY)
+        id2mu <- .subsetList2(d2mu, indexRow = iIndex, indexCol = iY)
+        id2Omega <- .subsetList2(d2Omega, indexRow = iY, indexCol = iY)
+
+        for(iParam1 in 1:n.param){ ## iParam1 <- 1
+            for(iParam2 in 1:n.param){ ## iParam2 <- 1
+                for(iParam3 in 1:n.param){ ## iParam3 <- 1
+
+                    iName1 <- name.param[iParam1]
+                    iName2 <- name.param[iParam2]
+                    iName3 <- name.param[iParam3]
+                    
+                    ## *** 1 second derivative and 1 first derivative regarding the variance
+
+                    ## term 1
+                    if(!is.null(idOmega[[iName3]]) && !is.null(id2Omega[[iName1]][[iName2]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName1]][[iName2]] %*% iOmegaM1 %*% idOmega[[iName3]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    }else if(!is.null(idOmega[[iName3]]) && !is.null(id2Omega[[iName2]][[iName1]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName2]][[iName1]] %*% iOmegaM1 %*% idOmega[[iName3]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    }
+
+                    ## term 2
+                    if(!is.null(idOmega[[iName2]]) && !is.null(id2Omega[[iName1]][[iName3]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName1]][[iName3]] %*% iOmegaM1 %*% idOmega[[iName2]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    }else if(!is.null(idOmega[[iName2]]) && !is.null(id2Omega[[iName3]][[iName1]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName3]][[iName1]] %*% iOmegaM1 %*% idOmega[[iName2]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - 1/2 * sum(iDiag * n.cluster)
+                    }
+
+                    ## term 3
+                    if(!is.null(idOmega[[iName1]]) && !is.null(id2Omega[[iName2]][[iName3]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName2]][[iName3]] %*% iOmegaM1 %*% idOmega[[iName1]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + 1/2 * sum(iDiag * n.cluster)
+                        ## cat("c: ",iName1," ",iName2," ",iName3," = ", 1/2 * sum(iDiag * n.cluster),"\n")
+                    }else if(!is.null(idOmega[[iName1]]) && !is.null(id2Omega[[iName3]][[iName2]])){
+                        iDiag <- diag(iOmegaM1 %*% id2Omega[[iName3]][[iName2]] %*% iOmegaM1 %*% idOmega[[iName1]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + 1/2 * sum(iDiag * n.cluster)
+                        ## cat("c: ",iName1," ",iName2," ",iName3," = ", 1/2 * sum(iDiag * n.cluster),"\n")
+                    }
+        
+                    ## *** 1 second derivative and 1 first derivative regarding the mean
+
+                    ## term 4
+                    if(!is.null(idmu[[iName3]]) && !is.null(id2mu[[iName1]][[iName2]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[iName1]][[iName2]] %*% iOmegaM1 * idmu[[iName3]])
+                    }else if(!is.null(idmu[[iName3]]) && !is.null(id2mu[[iName2]][[iName1]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[iName2]][[iName1]] %*% iOmegaM1 * idmu[[iName3]])
+                    }
+
+                    ## term 5
+                    if(!is.null(idmu[[iName2]]) && !is.null(id2mu[[iName1]][[iName3]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[iName1]][[iName3]] %*% iOmegaM1 * idmu[[iName2]])
+                    }else if(!is.null(idmu[[iName2]]) && !is.null(id2mu[[iName3]][[iName1]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - sum(id2mu[[iName3]][[iName1]] %*% iOmegaM1 * idmu[[iName2]])
+                    }
+
+                    ## term 6
+                    if(!is.null(idmu[[iName1]]) && !is.null(id2mu[[iName2]][[iName3]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + sum(id2mu[[iName2]][[iName3]] %*% iOmegaM1 * idmu[[iName1]])
+                    }else if(!is.null(idmu[[iName1]]) && !is.null(id2mu[[iName3]][[iName2]])){
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + sum(id2mu[[iName3]][[iName2]] %*% iOmegaM1 * idmu[[iName1]])
+                    }
+
+                    ## *** 2 first derivative regarding the mean and one regarding the variance
+
+                    ## term 7
+                    if(!is.null(idmu[[iName2]]) && !is.null(idOmega[[iName1]]) && !is.null(idmu[[iName3]])){
+                        value <- sum(idmu[[iName2]] %*% iOmegaM1 %*% idOmega[[iName1]] %*% iOmegaM1 * idmu[[iName3]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] + value
+                    }
+
+                    ## term 8 
+                    if(!is.null(idmu[[iName1]]) && !is.null(idOmega[[iName2]]) && !is.null(idmu[[iName3]])){
+                        value <- sum(idmu[[iName1]] %*% iOmegaM1 %*% idOmega[[iName2]] %*% iOmegaM1 * idmu[[iName3]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - value
+                    }
+                
+                    ## term 9
+                    if(!is.null(idmu[[iName2]]) && !is.null(idOmega[[iName3]]) && !is.null(idmu[[iName1]])){
+                        value <- sum(idmu[[iName2]] %*% iOmegaM1 %*% idOmega[[iName3]] %*% iOmegaM1 * idmu[[iName1]])
+                        JJK[iName1,iName2,iName3] <- JJK[iName1,iName2,iName3] - value
+                    }
+                }
+            }
+        }        
+
+    }
+
+    return(JJK)
+}
+######################################################################
+### biasCoxSnell.R ends here
diff --git a/R/sCorrect-sscResiduals.R b/R/sCorrect-sscResiduals.R
new file mode 100644
index 0000000..9fe0c9a
--- /dev/null
+++ b/R/sCorrect-sscResiduals.R
@@ -0,0 +1,277 @@
+### sCorrect-sscResiduals.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: feb 16 2018 (16:38) 
+## Version: 
+## Last-Updated: jan 17 2022 (14:06) 
+##           By: Brice Ozenne
+##     Update #: 1226
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * .init_sscResiduals
+.init_sscResiduals <- function(object){
+
+    out <- list()
+
+    ## ** extract info
+    endogenous <- object$sCorrect$endogenous
+    n.endogenous <- length(endogenous)
+    latent <- object$sCorrect$latent
+    n.latent <- length(latent)
+    
+    type <- object$sCorrect$skeleton$type
+
+    index.var <- which(type$detail %in% c("Sigma_var","Sigma_cov","Psi_var","Psi_cov"))
+    index.param <- which(!is.na(type$param))
+    type.param <- type[intersect(index.var,index.param),,drop=FALSE]
+    
+    
+    Omega <- object$sCorrect$moment$Omega
+    ## name.var <- object$sCorrect$name.param[object$sCorrect$name.param %in% unique(type.param$param)] ## make sure to keep the same order as in the original vector of parameters
+    name.var <- unique(type.param$param)
+    n.var <- length(name.var)
+
+    ## ** subset residual variance-covariance
+    index.upper.tri <- data.frame(index = which(upper.tri(Omega, diag = TRUE)),
+                                  which(upper.tri(Omega, diag = TRUE), arr.ind = TRUE)
+                                  )
+    name.rhs <- paste(endogenous[index.upper.tri[,"row"]],
+                      lava.options()$symbols[2],
+                      endogenous[index.upper.tri[,"col"]],
+                      sep = "")
+    n.rhs <- length(name.rhs)
+
+    ## ** fixed part of the variance-covariance matrix
+    index.value <- which(!is.na(type$value))
+    index.var2 <- which(type$detail %in% c("Lambda","B","Sigma_var","Sigma_cov","Psi_var","Psi_cov"))
+    type.var.constrain <- type[intersect(index.value, index.var2),,drop=FALSE]
+
+    Omega.constrain <- matrix(0, nrow = n.endogenous, ncol = n.endogenous, 
+                              dimnames = list(endogenous,endogenous))
+
+    if(NROW(type.var.constrain)>0){
+        ## extract parameters with value fixed by the user
+        value <- object$sCorrect$skeleton$value
+
+        ## add fixed Sigma value
+        if(any(c("Sigma_var","Sigma_cov") %in% type.var.constrain$detail)){
+            addSigma <- value$Sigma
+            addSigma[is.na(addSigma)] <- 0
+            Omega.constrain <- Omega.constrain + addSigma
+        }
+        if(any(c("Psi_var","Psi_cov") %in% type.var.constrain$detail)){
+            Psi.constrain <- value$Psi
+            Lambda.constrain <- value$Lambda
+            if(any(is.na(value$B))){
+                stop("Current implementation cannot handle constraints in Psi when there are B parameters.\n")
+            }
+            if(any(is.na(value$Lambda))){
+                stop("Current implementation cannot handle constraints in Psi when there are lambda parameters.\n")
+            }
+            if("B" %in% names(value)){
+                iIB.constrain <- solve(diag(1, nrow = n.latent, ncol = n.latent) - value$B)
+            }else{
+                iIB.constrain <- diag(1, nrow = n.latent, ncol = n.latent)
+            }
+
+            addPsi <- t(Lambda.constrain) %*% t(iIB.constrain) %*% Psi.constrain %*% iIB.constrain %*% Lambda.constrain
+            addPsi[is.na(addPsi)] <- 0
+            Omega.constrain <- Omega.constrain + addPsi
+        }
+
+    }
+
+    ## ** Design matrix
+    A <- matrix(0, nrow = n.rhs, ncol = n.var,
+                dimnames = list(name.rhs, name.var))
+
+    ## *** Sigma_var and Sigma_cov
+    if(any(type.param$detail %in% c("Sigma_var","Sigma_cov"))){
+        type.Sigma <- type.param[type.param$detail %in% c("Sigma_var","Sigma_cov"),,drop=FALSE]
+        for(iRow in 1:NROW(type.Sigma)){ ## iRow <- 1 
+            A[paste0(type.Sigma[iRow,"Y"],"~~",type.Sigma[iRow,"X"]),type.Sigma[iRow,"param"]] <- 1
+        }
+    }
+    attr(A,"name") <- name.var
+
+    ## *** Psi_var and Psi_cov
+    if(any(type.param$detail %in% c("Psi_var","Psi_cov"))){
+        type.Psi <- type.param[type.param$detail %in% c("Psi_var","Psi_cov"),,drop=FALSE]
+        index.Psi <- cbind(row = match(type.Psi$X, latent),
+                           col = match(type.Psi$Y, latent))
+        rownames(index.Psi) <- type.Psi$param
+    }else{
+        index.Psi <- NULL
+    }
+    
+    return(list(type = "residuals",
+                param0 = object$sCorrect$param,
+                Omega0 = object$sCorrect$moment$Omega,
+                residuals0 = object$sCorrect$residuals,
+                index.upper.tri = index.upper.tri,
+                name.rhs = name.rhs,
+                name.var = name.var,
+                A = A,
+                Omega.constrain = Omega.constrain,
+                index.Psi = index.Psi
+                ))
+}
+
+## * .sscResiduals
+#' @title Compute Bias Corrected Quantities.
+#' @description Compute bias corrected residuals variance covariance matrix
+#' and information matrix.
+#' Also provides the leverage values and corrected sample size when adjust.n is set to TRUE.
+#' @name estimate2
+#' 
+#' @keywords internal
+.sscResiduals <- function(object, ssc, algorithm = "2"){
+    algorithm <- match.arg(as.character(algorithm), choices = c("1","2"))
+
+    ## ** initial values (i.e. non bias corrected)
+    Omega0 <- ssc$Omega0 
+    Omega.constrain <- ssc$Omega.constrain
+    param0 <- ssc$param0
+    residuals0 <- ssc$residuals0
+
+    ## ** current values
+    Omega <- object$sCorrect$moment$Omega 
+    epsilon <- object$sCorrect$residuals
+    leverage <- object$sCorrect$leverage
+    dmu <- object$sCorrect$dmoment$dmu
+    dOmega <- object$sCorrect$dmoment$dOmega
+    vcov.param <- object$sCorrect$vcov.param
+    
+    endogenous <- object$sCorrect$endogenous
+    n.endogenous <- length(endogenous)
+    n.cluster <- object$sCorrect$cluster$n.cluster
+    param.mean <- object$sCorrect$skeleton$Uparam.mean
+    param.var <- object$sCorrect$skeleton$Uparam.var
+    param.hybrid <- intersect(param.mean,param.var)
+    missing.pattern <- object$sCorrect$missing$pattern
+    name.pattern <- object$sCorrect$missing$name.pattern
+    n.pattern <- length(name.pattern)
+    unique.pattern <- object$sCorrect$missing$unique.pattern
+
+    if(length(param.mean)==0){
+        stop("No mean parameter. No small sample correction needed. \n",
+             "Consider setting  \'ssc\' to NA. \n")
+    }
+    ## ** Step (i-ii) compute individual and average bias
+    dmu <- aperm(abind::abind(dmu[param.mean], along = 3), perm = c(3,2,1))
+    vcov.muparam <- vcov.param[param.mean,param.mean,drop=FALSE]
+    Psi <- matrix(0, nrow = n.endogenous, ncol = n.endogenous,
+                  dimnames = list(endogenous, endogenous))
+    n.Psi <- matrix(0, nrow = n.endogenous, ncol = n.endogenous,
+                    dimnames = list(endogenous, endogenous))
+
+    ## ls.Psi <- vector(mode = "list", length = n.cluster)
+    for(iP in 1:n.pattern){ ## iP <- 1
+        iY <- which(unique.pattern[iP,]>0)
+        
+        for(iC in missing.pattern[[iP]]){ ## iC <- 1
+            ## individual bias
+            if(length(param.mean)==1){
+                iPsi <- vcov.muparam[1,1] * tcrossprod(dmu[,iY,iC])
+            }else{
+                iPsi <- t(dmu[,iY,iC])  %*% vcov.muparam %*% dmu[,iY,iC]
+            }
+
+            ## ls.Psi[[iC]] <- iPsi
+            ## cumulated bias            
+            Psi[iY,iY] <- Psi[iY,iY] + iPsi
+            n.Psi[iY,iY] <- n.Psi[iY,iY] + 1
+        }
+    }
+    ## take the average
+    Psi[n.Psi>0] <- Psi[n.Psi>0]/n.Psi[n.Psi>0]
+    
+    ## ** Step (iii): compute corrected residuals and effective sample size
+    ## done in estimate2 via moments2
+    
+    ## ** Step (iv): bias-corrected residual covariance matrix
+    Omega.adj <- Omega0 + Psi
+
+    ## ** Step (v): bias-corrected variance parameters
+    A <- ssc$A
+
+    ## *** right hand side of the equation
+    index.upper.tri <- ssc$index.upper.tri[,"index"]
+    eq.rhs <- stats::setNames((Omega.adj-Omega.constrain)[index.upper.tri],
+                              ssc$name.rhs)
+
+    ## *** left hand side of the equation
+    index.Psi <- ssc$index.Psi
+
+    if(NROW(index.Psi)>0){
+        Z <- object$sCorrect$moment$iIB %*% object$sCorrect$moment$Lambda
+        tZ <- t(Z)
+        n.index.Psi <- NROW(index.Psi)
+    
+        ## A = t(Z) Psi Z + Sigma
+        ## (t(Z) Psi Z)_{ij} = \sum_{k,l} Z_{k,i} Psi_{k,l} Z_{l,j}
+        ## (t(Z) Psi Z)_{ij} regarding param_(k,l) = Z_{k,i} Z_{l,j}
+        for(iPsi in 1:n.index.Psi){ ## iPsi <- 3
+            iNamePsi <- rownames(index.Psi)[iPsi]
+            iRowPsi <- index.Psi[iPsi,"row"]
+            iColPsi <- index.Psi[iPsi,"col"]
+            A[,iNamePsi] <- A[,iNamePsi] + (tZ[,index.Psi[iPsi,"row"]] %o% Z[index.Psi[iPsi,"col"],])[index.upper.tri]
+            if(iRowPsi!=iColPsi){
+                A[,iNamePsi] <- A[,iNamePsi] + (tZ[,index.Psi[iPsi,"col"]] %o% Z[index.Psi[iPsi,"row"],])[index.upper.tri]
+            }
+        }
+        
+    }
+
+    ## *** solve equation
+    ## microbenchmark::microbenchmark(svd = {asvd <- svd(A) ; asvd$v %*% diag(1/asvd$d) %*% t(asvd$u) %*% eq.rhs;},
+    ## qr = qr.coef(qr(A), eq.rhs),
+    ## Rcpp = OLS_cpp(A, eq.rhs),
+    ## RcppTry = try(OLS_cpp(A, eq.rhs)[,1], silent = TRUE),
+    ## Rcpp2 = OLS2_cpp(A, eq.rhs),
+    ## OLS1 = solve(crossprod(A), crossprod(A, eq.rhs)),
+    ## OLS2 = solve(t(A) %*% A) %*% t(A) %*% eq.rhs,
+    ## OLS_stats = stats::lsfit(x = A, y = eq.rhs),
+    ## OLS_LINPACK = .Call(stats:::C_Cdqrls, x = A, y = eq.rhs, tolerance = 1e-7, FALSE)$coefficients, times = 500)
+    if(lava.options()$method.estimate2=="svd"){
+        asvd <- svd(A)
+        iSolution <- try((asvd$v %*% diag(1/asvd$d) %*% t(asvd$u) %*% eq.rhs)[,1], silent = TRUE)
+    }else if(lava.options()$method.estimate2=="ols"){
+        iSolution <- try(OLS_cpp(A, eq.rhs)[,1], silent = TRUE)
+    }else{
+        stop("unknown OLS methods \n")
+    }
+    
+    if(inherits(iSolution, "try-error")){
+        if(abs(det(t(A) %*% A)) <  1e-10){            
+            stop("Singular matrix: cannot update the estimates \n")
+        }else{
+            stop(iSolution)
+        }
+    }
+    names(iSolution) <- attr(A,"name")
+
+    ## *** update parameters
+    ## param0[ssc$name.var] - iSolution
+    param0[names(iSolution)] <- iSolution
+
+    ## ** Step (vi-vii): update derivatives and information matrix
+    ## done in estimate2 via moments2
+        
+    ## ** Export
+    attr(param0,"Omega") <- Omega.adj 
+    attr(param0,"Psi") <- Psi
+    return(param0)
+}
+
+
+
+##----------------------------------------------------------------------
+### sCorrect-sscResiduals.R ends here
diff --git a/R/sCorrect-summary.glht2.R b/R/sCorrect-summary.glht2.R
new file mode 100644
index 0000000..b45d44d
--- /dev/null
+++ b/R/sCorrect-summary.glht2.R
@@ -0,0 +1,191 @@
+### sCorrect-summary.glht2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: maj  2 2018 (09:20) 
+## Version: 
+## Last-Updated: jan 24 2022 (11:11) 
+##           By: Brice Ozenne
+##     Update #: 228
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+#' @title Outcome of Linear Hypothesis Testing 
+#' @description Estimates, p-values, and confidence intevals for linear hypothesis testing, possibly adjusted for multiple comparisons.
+#' 
+#' @param object a \code{glht2} object.
+#' @param confint [logical] should confidence intervals be output
+#' @param conf.level [numeric 0-1] level of the confidence intervals.
+#' @param transform [function] function to backtransform the estimates, standard errors, null hypothesis, and the associated confidence intervals
+#' (e.g. \code{exp} if the outcomes have been log-transformed).
+#' @param seed [integer] value that will be set before adjustment for multiple comparisons to ensure reproducible results.
+#' Can also be \code{NULL}: in such a case no seed is set.
+#' @param rowname.rhs [logical] when naming the hypotheses, add the right-hand side (i.e. "X1-X2=0" instead of "X1-X2").
+#' @param ... argument passed to \code{multcomp:::summary.glht}, e.g. argument \code{test} to choose the type of adjustment for multiple comparisons.
+#' 
+
+## * summary.glht2
+#' @export
+summary.glht2 <- function(object, confint = TRUE, conf.level = 0.95, transform = NULL, seed = NULL, rowname.rhs = TRUE, ...){
+    if(!is.null(seed)){
+        old.seed <- get0(".Random.seed", envir = .GlobalEnv, inherits = FALSE)
+        on.exit( assign(".Random.seed", old.seed, envir = .GlobalEnv, inherits = FALSE) )
+        set.seed(seed)
+    }
+    
+    keep.class <- class(object)
+    object$test <- NULL
+    object$confint <- NULL
+    class(object) <- setdiff(keep.class, "glht2")
+    keep.df <- object$df
+    test.df <- any( (keep.df>0) * (!is.infinite(keep.df)) == 1 )
+    object$df <- round(stats::median(object$df))
+    output <- summary(object, ...)
+    ## restaure df when possible
+    method.adjust <- output$test$type
+    if(NROW(object$linfct)==1){method.adjust <- "none"}
+    if(test.df && method.adjust %in% c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none","univariate")){
+        output$df <- keep.df
+        output$test$pvalues <- stats::p.adjust(2*(1-stats::pt(abs(output$test$tstat), df = keep.df)), method = method.adjust)
+    }
+    
+    name.hypo <- rownames(output$linfct)
+    n.hypo <- length(name.hypo)
+    if(confint && method.adjust %in% c("univariate","none","bonferroni","single-step")){
+        if(method.adjust %in% c("none","univariate","bonferroni")){
+            alpha <- switch(method.adjust,
+                            "none" = 1-conf.level,
+                            "univariate" = 1-conf.level,
+                            "bonferroni" = (1-conf.level)/n.hypo)
+            if(test.df){
+                q <- stats::qt(1-alpha/2, df = output$df)
+            }else{
+                q <- stats::qnorm(1-alpha/2)
+            }
+            output$confint <- data.frame(matrix(NA, ncol = 3, nrow = n.hypo,
+                                                dimnames = list(name.hypo, c("Estimate","lwr","upr"))))
+            output$confint$Estimate <- as.double(output$test$coef)
+            output$confint$lwr <- as.double(output$test$coef - q * output$test$sigma)
+            output$confint$upr <- as.double(output$test$coef + q * output$test$sigma)
+            ## range(confint(output, level = 1-alpha, calpha = univariate_calpha())$confint-output$confint)
+        }else if(method.adjust == "single-step"){
+            output <- confint(output, level = conf.level, calpha = multcomp::adjusted_calpha())
+        }else{
+            output$confint <- matrix(NA, nrow = n.hypo, ncol = 3,
+                                     dimnames = list(name.hypo, c("Estimate","lwr","upr")))
+        }
+    }
+    if(rowname.rhs){
+        table2.rownames <- paste0(name.hypo, " == ", output$rhs)
+    }else{
+        table2.rownames <- name.hypo
+    }
+    output$table2 <- data.frame(matrix(NA, nrow = n.hypo, ncol = 7,
+                                       dimnames = list(table2.rownames,
+                                                       c("estimate","se","df","lower","upper","statistic","p.value"))
+                                       ), stringsAsFactors = FALSE)
+    output$table2$estimate <- output$test$coefficients
+    output$table2$se <- output$test$sigma
+    output$table2$df <- output$df
+    output$table2$df[output$table2$df==0] <- Inf
+    output$table2$lower <- output$confint[,"lwr"]
+    output$table2$upper <- output$confint[,"upr"]
+    output$table2$statistic <- output$test$tstat
+    output$table2$p.value <- output$test$pvalues
+    output$seed <- seed
+    
+    ## ** transformation
+    output$transform <- transform
+    output$table2 <- transformSummaryTable(output$table2,
+                                           transform = transform)
+
+    ## ** export    
+    class(output) <- append(c("summary.glht2","summary.glht"),keep.class)
+    return(output)
+}
+
+## * print.summary.glht2
+#' @export
+print.summary.glht2 <- function(x,
+                                digits = max(3L, getOption("digits") - 2L),
+                                digits.p.value = max(3L, getOption("digits") - 2L),
+                                columns = c("estimate","se","df","lower","upper","statistic","p.value"),
+                                ...){
+    
+    columns <- match.arg(columns, choices = c("estimate","se","df","lower","upper","statistic","p.value"), several.ok = TRUE)
+    type <- x$type
+    call <- if(isS4(x$model)){x$model@call}else{x$model$call}
+    alternative <- x$alternativ
+    type <- x$test$type
+    txt.type <- switch(type,
+                       "univariate" = "(CIs/p-values not adjusted for multiple comparisons)", 
+                       "none" = "(CIs/p-values not adjusted for multiple comparisons)", 
+                       "single-step" = paste0("(CIs/p-values adjusted for multiple comparisons -- single step max-test)"), 
+                       "free" = paste0("(CIs/p-values adjusted for multiple comparisons -- step down max-test)"), 
+                       "Westfall" = paste0("(CIs/p-values adjusted for multiple comparisons -- step down max-test with logical restrictions)"), 
+                       paste0("(CIs/p-values adjusted for multiple comparisons -- ", type, " method)")
+                       )
+    txt.robust <- switch(as.character(x$robust),
+                         "TRUE" = "Robust",
+                         "FALSE" = "Model-based"
+                         )
+
+    ## txt.correction <- switch(as.character(x$ssc),
+    ##                          "Cox" = " corrected for small sample bias (Cox correction)",
+    ##                          "residuals" = " corrected for small sample bias (residual correction)",
+    ##                          "NA" = ""
+    ##                          )
+    
+    txt.alternative <- switch(alternative,
+                              "less" = "one sided tests - inferiority",
+                              "greater" = "one sided tests - superiority",
+                              "two.sided" = "two sided tests")
+
+    ## display
+    cat("\n\t", "Simultaneous Tests for General Linear Hypotheses\n\n")
+    if (!is.null(type)) {
+        cat("Multiple Comparisons of Means (",txt.alternative,") \n\n", sep = "")
+    }
+    if (!is.null(call)) {
+        cat("Fit: ")
+        print(call)
+        cat("Standard errors: ",txt.robust,"\n",sep="")
+        cat("\n")
+    }
+    cat("Linear Hypotheses:\n")
+    stats::printCoefmat(x$table2[,columns[columns %in% names(x$table2)],drop=FALSE], digits = digits,
+                        has.Pvalue = "p.value" %in% columns,
+                        P.values = "p.value" %in% columns,
+                        eps.Pvalue = 10^{-digits.p.value})
+
+    if(NROW(x$table2)>1){
+        cat(txt.type,"\n")
+    }
+    error <- attr(x$test$pvalues,"error")
+    if(!is.null(error) && error > 1e-12 && "p.value" %in% columns){
+        txt.error <- paste0("Error when computing the adjusted p-value by numerical integration: ", signif(error, digits = digits))
+        if(!is.null(x$seed)){
+            txt.error <- paste0(txt.error," (seed ",x$seed,")")
+        }
+        cat(txt.error,"\n")
+    }
+
+    
+    if(!is.null(x$global)){
+        cat("\nGlobal test: p.value=",format.pval(x$global["p.value"], digits = digits, eps = 10^(-digits.p.value)),
+            " (statistic=",round(x$global["statistic"], digits = digits),
+            ", df=",round(x$global["df"], digits = digits),")\n",sep="")
+    }
+    ## if(nchar(txt.correction)>0){cat("(",txt.correction,")\n",sep="")}
+    cat("\n")
+    return(invisible(x))
+}
+
+
+######################################################################
+### sCorrect-summary.glht2.R ends here
diff --git a/R/sCorrect-summary2.R b/R/sCorrect-summary2.R
new file mode 100644
index 0000000..b333ed7
--- /dev/null
+++ b/R/sCorrect-summary2.R
@@ -0,0 +1,181 @@
+
+### sCorrect-summary2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 10 2017 (10:57) 
+## Version: 
+## Last-Updated: jan 18 2022 (09:48) 
+##           By: Brice Ozenne
+##     Update #: 552
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation - summary2
+#' @title Latent Variable Model Summary After Small Sample Correction
+#' @description Summarize a fitted latent variable model.
+#' Similar to \code{stats::summary} with small sample correction.
+#' @name summary2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param digit [integer > 0] the number of decimal places to use when displaying the summary.
+#' @param robust [logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param df [character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+#' Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... [logical] arguments passed to lower level methods.
+#' 
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#'
+#' @details \code{summary2} is the same as \code{summary}
+#' except that it first computes the small sample correction (but does not store it).
+#' So if \code{summary2} is to be called several times,
+#' it is more efficient to pre-compute the quantities for the small sample correction
+#' using \code{sCorrect} and then call \code{summary2}.
+#'
+#' \code{summary2} returns an object with an element \code{table2} containing the estimates, standard errors, degrees of freedom,
+#' upper and lower limits of the confidence intervals, test statistics, and p-values.
+#' 
+#' @examples
+#' #### simulate data ####
+#' m <- lvm(Y~X1+X2)
+#' set.seed(10)
+#' d <- lava::sim(m, 2e1)
+#'
+#' #### latent variable models ####
+#' e.lvm <- estimate(m, data = d)
+#' summary(e.lvm)$coef
+#' 
+#' summary2(e.lvm)
+#' summary2(e.lvm, ssc = "none")
+#' 
+#' @concept small sample inference
+#' @export
+`summary2` <-
+  function(object, robust, cluster, digit, ...) UseMethod("summary2")
+
+## * summary2.lvmfit
+#' @rdname summary2
+#' @export
+summary2.lvmfit <- function(object, robust = FALSE, cluster = NULL, digit = max(5, getOption("digit")), ssc = lava.options()$ssc, df = lava.options()$df, ...){
+
+    return(summary(estimate2(object, ssc = ssc, df = df, dVcov.robust = robust, ...), robust = robust, cluster = NULL, digit = digit))
+
+}
+
+## * summary2.lvmfit2
+#' @rdname summary2
+#' @export
+summary2.lvmfit2 <- function(object, robust = FALSE, cluster = NULL, digit = max(5, getOption("digit")), ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+
+    ## ** table with se, df, confint, p-value for the corrected parameters
+    tableS.all <- model.tables(object, robust = robust, cluster = cluster, as.lava = TRUE)
+    name.param <- rownames(tableS.all)
+    n.param <- length(name.param)
+
+    ## ** get and  normalize lava summary
+    object0 <- object
+    class(object0) <- setdiff(class(object0),c("lvmfit2"))
+    object.summary <- summary(object0, digits = digit)
+    
+    previous.summary <- object.summary$coef
+    object.summary$coef <- tableS.all[name.param,c("estimate","se","statistic","df","p.value"),drop=FALSE]
+
+
+    ## find digit
+    vec.char <- setdiff(object.summary$coefmat[,"Estimate"],"")
+    digit <- max(c(nchar(gsub(".","",vec.char,fixed = TRUE)))-1,1)
+
+    ## ** update summary
+    ## *** vcov
+    object.summary$vcov <- attr(object$dVcov, "vcov.param")[name.param,name.param]    
+
+    ## *** coef
+    lava.rownames <- rownames(previous.summary)
+    
+    ## *** coefmat
+    name.label0 <- trimws(rownames(CoefMat(object0, labels = 0, level = 9)), which = "both")
+    index.titleVariance <- which(name.label0=="Residual Variances:")
+    if(length(index.titleVariance)>0){
+        ## rename variance parameters from Y to Y~~Y
+        index.vcov <- (index.titleVariance+1):length(name.label0)
+        index.var <- setdiff(index.vcov,grep("~~",name.label0,fixed=TRUE)) ## exclude covariance parameters that are already correctly named
+        name.label0[index.var] <- paste0(name.label0[index.var],lava.options()$symbols[2],name.label0[index.var])
+    }
+    table.coefmat <- object.summary$coefmat
+    if(object$sCorrect$df=="satterthwaite"){
+        colnames(table.coefmat)[3:5] <- c("t-value","P-value","df")
+    }else{
+        colnames(table.coefmat)[3:5] <- c("Z-value","P-value","df")
+    }
+
+    ## mimic lava:::CoefMat (called by lava:::summary.lvmfit)
+    table.coef <- object.summary$coef
+    e2add <- format(round(table.coef[,"estimate"], max(1, digit - 1)), digits = digit - 1)
+    e2add <- gsub(" NA","",e2add)
+    sd2add <- format(round(table.coef[,"se"], max(1, digit - 1)), digits = digit - 1)
+    sd2add <- gsub(" NA","",sd2add)
+    df2add <- as.character(round(table.coef[,"df"],2))    
+    df2add[is.na(df2add)] <- ""
+    t2add <- format(round(table.coef[,"statistic"], max(1, digit - 1)), digits = digit - 1)
+    t2add <- gsub(" NA","",t2add)
+
+    p2add <- formatC(table.coef[,"p.value"], digits = digit - 1, format = "g",  preserve.width = "common", flag = "")
+    p2add <- gsub(" NA","",p2add)
+    p2add[table.coef[,4] < 1e-12] <- "  <1e-12"
+
+    M2add <- cbind(e2add,sd2add,t2add,p2add,df2add)
+    table.coefmat[,"df"] <- ""
+    table.coefmat[match(rownames(table.coef), name.label0),] <- M2add
+
+    table.coefmat[object.summary$coefmat[,4]=="",4] <- ""
+    object.summary$coefmat <- table.coefmat
+
+    ## ** Export
+    if(robust){
+        colnames(object.summary$coefmat)[2] <- "robust SE"
+        colnames(object.summary$coef)[2] <- "robust SE"
+    }
+
+    ## ** gather all results in one table
+    object.summary$table2 <- data.frame(matrix(NA, nrow = n.param, ncol = 7,
+                                               dimnames = list(name.param,
+                                                               c("estimate","se","df","lower","upper","statistic","p.value"))
+                                               ), stringsAsFactors = FALSE)
+
+    object.summary$table2$estimate <- tableS.all[name.param,"estimate"]
+    object.summary$table2$se <- tableS.all[name.param,"se"]
+    object.summary$table2$df <- tableS.all[name.param,"df"]
+    object.summary$table2$lower <- object.summary$table2$estimate + object.summary$table2$se * stats::qt(p=0.025, df = object.summary$table2$df)
+    object.summary$table2$upper <- object.summary$table2$estimate + object.summary$table2$se * stats::qt(p=0.975, df = object.summary$table2$df)
+    object.summary$table2$statistic <- tableS.all[name.param,"statistic"]
+    object.summary$table2$p.value <- tableS.all[name.param,"p.value"]
+
+    ## ** export
+    return(object.summary)
+    
+}
+
+## * summary.lvmfit2
+#' @rdname summary2
+#' @export
+summary.lvmfit2 <- summary2.lvmfit2
+
+
+##----------------------------------------------------------------------
+### Scorrect-summary2.R ends here
+
diff --git a/R/sCorrect-updateMoment.R b/R/sCorrect-updateMoment.R
new file mode 100644
index 0000000..e715b7f
--- /dev/null
+++ b/R/sCorrect-updateMoment.R
@@ -0,0 +1,352 @@
+### sCorrect-updateMoment.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: dec 10 2019 (09:58) 
+## Version: 
+## Last-Updated: jan 17 2022 (14:57) 
+##           By: Brice Ozenne
+##     Update #: 232
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * updateMoment
+updateMoment <- function(skeleton, value, toUpdate,
+                         name.pattern, unique.pattern,
+                         param, Omega, endogenous, latent, n.cluster){
+    if(lava.options()$debug){cat("updateMoment \n")}
+    ## remove attributes used by moments2
+    attr(Omega,"Omega.residuals") <- NULL
+    attr(Omega,"Omega.leverage") <- NULL
+    attr(Omega,"dOmega.leverage") <- NULL
+
+    n.endogenous <- length(endogenous)
+    n.latent <- length(latent)
+    
+    ## ** Update with the current values
+    name.update  <- names(toUpdate[toUpdate==TRUE])
+    if(length(name.update)>0){
+        for(iUpdate in name.update){ ## iUpdate <- "Sigma"
+            if(iUpdate == "SigmaValue"){
+                index.update <- which(!is.na(skeleton$SigmaParam))
+                skeleton$SigmaValue[index.update] <- param[skeleton$SigmaParam[index.update]]
+                value$Sigma <- apply(skeleton$SigmaValue, MARGIN = 1:2, FUN = prod)
+                attr(value$Sigma,"detail") <- skeleton$SigmaValue
+            }else{
+                index.update <- which(!is.na(skeleton[[iUpdate]]))
+                value[[iUpdate]][index.update] <- param[skeleton[[iUpdate]][index.update]]
+            }
+        }
+    }
+
+    ## ** Pre-compute relevant quantities
+    if(n.latent>0){
+        ## alpha + X\Gamma
+        value$alpha.XGamma <- matrix(0, nrow = n.cluster, ncol = n.latent,
+                                     dimnames = list(NULL, latent))
+        if("alpha" %in% names(value)){
+            value$alpha.XGamma <- value$alpha.XGamma + skeleton$Xalpha %o% value$alpha
+        }
+        if("Gamma" %in% names(value)){
+            value$alpha.XGamma <- value$alpha.XGamma + do.call(cbind,lapply(latent, function(iL){skeleton$XGamma[[iL]] %*% value$Gamma[,iL]}))
+        }
+
+        ## (I-B)^{-1}
+        if("B" %in% names(value)){
+            value$iIB <- solve(diag(1, nrow = n.latent, ncol = n.latent) - value$B)
+        }else{
+            value$iIB <- diag(1, nrow = n.latent, ncol = n.latent)
+            dimnames(value$iIB) <- list(latent,latent)
+        }
+
+        ## (alpha + X\Gamma) (I-B)^{-1}
+        value$alpha.XGamma.iIB <- value$alpha.XGamma %*% value$iIB
+        
+        ## (I-B)^{-1} \Lambda
+        value$iIB.Lambda <-  value$iIB %*% value$Lambda
+        value$tLambda.tiIB <-  t(value$iIB.Lambda)
+
+        ## \Psi (I-B)^{-1}
+        value$Psi.iIB <- value$Psi %*% value$iIB
+
+        ## (I-B)^{-t} \Psi (I-B)^{-1}
+        value$tiIB.Psi.iIB <-  t(value$iIB) %*% value$Psi
+
+        ## \Lambda^t (I-B)^{-t} \Psi (I-B)^{-1}
+        value$tLambda.tiIB.Psi.iIB <- t(value$iIB.Lambda) %*% value$Psi.iIB
+
+        ## (I-B)^{-t} \Psi (I-B)^{-1} \Lambda
+        value$tiIB.Psi.iIB.Lambda <- t(value$tLambda.tiIB.Psi.iIB) 
+    }
+
+    ## ** Compute mean
+    value$mu <- matrix(0, nrow = n.cluster, ncol = n.endogenous,
+                       dimnames = list(NULL,endogenous))
+
+    if("nu" %in% names(value)){
+        value$mu <- value$mu + sweep(skeleton$Xnu, MARGIN = 2, FUN = "*", STATS = value$nu)
+    }
+    if("K" %in% names(value)){
+        value$mu <- value$mu + do.call(cbind,lapply(endogenous, function(iE){skeleton$XK[[iE]] %*% value$K[,iE]})) ## iE <- endogenous[1]
+    }
+    if(n.latent>0){
+        value$mu <- value$mu + value$alpha.XGamma %*% value$iIB.Lambda
+    }
+
+    ## ** Compute variance
+    Omega.param <- matrix(0, nrow = n.endogenous, ncol = n.endogenous, 
+                          dimnames = list(endogenous,endogenous))
+    if("Sigma" %in% names(value)){
+        Omega.param <- Omega.param + value$Sigma
+    }
+    if("Psi" %in% names(value)){
+        Omega.param <- Omega.param + value$tLambda.tiIB.Psi.iIB %*% value$Lambda
+    }
+
+    if(!is.null(Omega)){
+        value$Omega <- Omega        
+        attr(value$Omega,"discrepancy") <- Omega-Omega.param
+    }else{
+        value$Omega <- Omega.param
+    }
+
+    value$Omega.missing.pattern <- lapply(1:length(name.pattern), function(iM){ ## iM <- 1
+        iIndex <- which(unique.pattern[iM,]==1)
+        return(value$Omega[iIndex,iIndex,drop=FALSE])
+    })
+    names(value$Omega.missing.pattern) <- name.pattern
+    value$OmegaM1.missing.pattern <- lapply(value$Omega.missing.pattern, solve)
+
+    ## ** Export
+    return(value)
+}
+
+## * updateDMoment
+updateDMoment <- function(moment, skeleton, param){
+    if(lava.options()$debug){cat("updateDMoment \n")}
+
+    ## ** import information
+    dmu <- skeleton$dmu.dparam
+    dOmega <- skeleton$dOmega.dparam
+
+    iIB.Lambda <- moment$iIB.Lambda
+    tLambda.tiIB <- moment$tLambda.tiIB
+    alpha.XGamma.iIB <- moment$alpha.XGamma.iIB
+    tiIB.Psi.iIB.Lambda <- moment$tiIB.Psi.iIB.Lambda
+    tLambda.tiIB.Psi.iIB <- moment$tLambda.tiIB.Psi.iIB
+    Sigma <- moment$Sigma
+    attr(Sigma,"detail") <- NULL
+
+    ## ** Compute partial derivative regarding the mean
+    ## NOTE: no "nu", "K", or "Gamma" as the partial derivative is independent of the parameter values
+    ##       and can therefore be computed once for all
+    
+    if("alpha" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$alpha)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dmu[[iParam]] <- skeleton$dmat.dparam$alpha[[iParam]] %*% iIB.Lambda
+        }
+    }
+
+    if("Gamma" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$Gamma)   
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dmu[[iParam]] <- skeleton$dmat.dparam$Gamma[[iParam]] %*% iIB.Lambda
+        }
+    }
+
+    if("Lambda" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$Lambda)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dmu[[iParam]] <- alpha.XGamma.iIB %*% skeleton$dmat.dparam$Lambda[[iParam]]
+        }
+    }
+
+    if("B" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$B)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dmu[[iParam]] <- alpha.XGamma.iIB %*% skeleton$dmat.dparam$B[[iParam]] %*% iIB.Lambda
+        }
+    }
+    
+    ## ** Compute partial derivative regarding the variance
+    if("Lambda" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$Lambda)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dOmega[[iParam]] <- t(skeleton$dmat.dparam$Lambda[[iParam]]) %*% tiIB.Psi.iIB.Lambda + tLambda.tiIB.Psi.iIB %*% skeleton$dmat.dparam$Lambda[[iParam]]
+        }
+    }
+
+    if("B" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$B)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dOmega[[iParam]] <- t(iIB.Lambda) %*% t(skeleton$dmat.dparam$B[[iParam]]) %*% tiIB.Psi.iIB.Lambda + tLambda.tiIB.Psi.iIB %*% skeleton$dmat.dparam$B[[iParam]] %*% iIB.Lambda
+        }
+    }
+
+    if("Psi" %in% names(skeleton$dmat.dparam)){
+        iName.param <- names(skeleton$dmat.dparam$Psi)
+        for(iParam in iName.param){ ## iParam <- iName.param[1]
+            dOmega[[iParam]] <- tLambda.tiIB %*% skeleton$dmat.dparam$Psi[[iParam]] %*% iIB.Lambda
+        }
+    }
+
+   ## *** Export
+    return(list(dmu = dmu, dOmega = dOmega))
+
+}
+
+
+## * updateD2Moment
+updateD2Moment <- function(moment, skeleton, param){
+    if(lava.options()$debug){cat("updateD2Moment \n")}
+
+    ## ** Import information
+    d2mu <- skeleton$d2mu.dparam
+    d2Omega <- skeleton$d2Omega.dparam
+
+    dalpha <- skeleton$dmat.dparam$alpha
+    dLambda <- skeleton$dmat.dparam$Lambda
+    dGamma <- skeleton$dmat.dparam$Gamma
+    dB <- skeleton$dmat.dparam$B
+    dPsi <- skeleton$dmat.dparam$Psi
+
+    Psi <- moment$Psi
+    Lambda <- moment$Lambda
+    iIB <- moment$iIB
+    Psi.iIB <- moment$Psi.iIB
+    iIB.Lambda <- moment$iIB.Lambda
+    tLambda.tiIB <- moment$tLambda.tiIB
+    alpha.XGamma.iIB <- moment$alpha.XGamma.iIB
+    tiIB.Psi.iIB <- moment$tiIB.Psi.iIB
+    tiIB.Psi.iIB.Lambda <- moment$tiIB.Psi.iIB.Lambda
+    tLambda.tiIB.Psi.iIB <- moment$tLambda.tiIB.Psi.iIB
+    
+    grid.mean <- skeleton$grid.d2moment$mean
+    grid.var <- skeleton$grid.d2moment$var
+    names.grid.mean <- names(grid.mean)
+    names.grid.var <- names(grid.var)
+    
+    ## ** Compute partial derivative regarding the mean
+    ## NOTE: no "nu", "K", or "Gamma" as the partial derivative is 0
+
+    if("alpha.B" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$alpha.B)){ # iP <- 1
+            iName1 <- grid.mean$alpha.B[iP,"alpha"]
+            iName2 <- grid.mean$alpha.B[iP,"B"]
+            d2mu[[iName1]][[iName2]] <- dalpha[[iName1]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
+        }
+    }
+    
+    if("alpha.Lambda" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$alpha.Lambda)){ # iP <- 1
+            iName1 <- grid.mean$alpha.Lambda[iP,"alpha"]
+            iName2 <- grid.mean$alpha.Lambda[iP,"Lambda"]
+            d2mu[[iName1]][[iName2]] <- dalpha[[iName1]] %*% iIB %*% dLambda[[iName2]]
+        }
+    }
+
+    if("Gamma.B" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$Gamma.B)){ # iP <- 1
+                iName1 <- grid.mean$Gamma.B[iP,"Gamma"]
+                iName2 <- grid.mean$Gamma.B[iP,"B"]
+                d2mu[[iName1]][[iName2]] <- dGamma[[iName1]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
+        }
+    }
+
+    if("Gamma.Lambda" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$Gamma.Lambda)){ # iP <- 1
+                iName1 <- grid.mean$Gamma.Lambda[iP,"Gamma"]
+                iName2 <- grid.mean$Gamma.Lambda[iP,"Lambda"]                
+                d2mu[[iName1]][[iName2]] <- dGamma[[iName1]] %*% iIB %*% dLambda[[iName2]]
+        }
+    }
+
+    if("Lambda.B" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$Lambda.B)){ # iP <- 1
+                iName1 <- grid.mean$Lambda.B[iP,"Lambda"]
+                iName2 <- grid.mean$Lambda.B[iP,"B"]
+                d2mu[[iName1]][[iName2]] <- alpha.XGamma.iIB %*% dB[[iName2]] %*% iIB %*% dLambda[[iName1]]
+        }
+    }
+
+    if("B.B" %in% names.grid.mean){
+        for(iP in 1:NROW(grid.mean$B.B)){ # iP <- 1
+                iName1 <- grid.mean$B.B[iP,"B1"]
+                iName2 <- grid.mean$B.B[iP,"B2"]
+
+                term1 <- alpha.XGamma.iIB %*% dB[[iName2]] %*% iIB %*% dB[[iName1]] %*% iIB.Lambda
+                term2 <- alpha.XGamma.iIB %*% dB[[iName1]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
+                d2mu[[iName1]][[iName2]] <- term1 + term2
+        }
+    }
+
+    ## ** Compute partial derivative regarding the variance
+    if("Psi.Lambda" %in% names.grid.var){
+        for(iP in 1:NROW(grid.var$Psi.Lambda)){ # iP <- 1
+            iName1 <- grid.var$Psi.Lambda[iP,"Psi"]
+            iName2 <- grid.var$Psi.Lambda[iP,"Lambda"]
+
+            term1 <- t(dLambda[[iName2]]) %*% t(iIB) %*% dPsi[[iName1]] %*% iIB.Lambda                
+            d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
+        }
+    }
+    
+    if("Psi.B" %in% names.grid.var){
+        for(iP in 1:NROW(grid.var$Psi.B)){ # iP <- 1
+                iName1 <- grid.var$Psi.B[iP,"Psi"]
+                iName2 <- grid.var$Psi.B[iP,"B"]
+
+                term1 <- t(iIB.Lambda) %*% t(dB[[iName2]]) %*% t(iIB) %*% dPsi[[iName1]] %*% iIB.Lambda
+                d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
+        }
+    }
+    
+    if("Lambda.B" %in% names.grid.var){
+        for(iP in 1:NROW(grid.var$Lambda.B)){ # iP <- 1
+                iName1 <- grid.var$Lambda.B[iP,"Lambda"]
+                iName2 <- grid.var$Lambda.B[iP,"B"]
+
+                term1 <- t(dLambda[[iName1]]) %*% t(iIB) %*% t(dB[[iName2]]) %*% t(iIB) %*% Psi %*% iIB.Lambda
+                term2 <- t(dLambda[[iName1]]) %*% t(iIB) %*% Psi %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
+                ## term2 <- tLambda.tiIB.Psi.iIB %*% dB[[iName2]] %*% iIB %*% dLambda[[iName1]]                
+                d2Omega[[iName1]][[iName2]] <- term1 + t(term1) + term2 + t(term2)
+        }
+    }
+
+    if("Lambda.Lambda" %in% names.grid.var){
+        for(iP in 1:NROW(grid.var$Lambda.Lambda)){ # iP <- 1
+            iName1 <- grid.var$Lambda.Lambda[iP,"Lambda1"]
+            iName2 <- grid.var$Lambda.Lambda[iP,"Lambda2"]
+                
+            term1 <- t(dLambda[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% dLambda[[iName2]]
+            d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
+        }
+    }
+
+    if("B.B" %in% names.grid.var){
+        for(iP in 1:NROW(grid.var$B.B)){ # iP <- 1
+            iName1 <- grid.var$B.B[iP,"B1"]
+            iName2 <- grid.var$B.B[iP,"B2"]
+
+            term1 <- t(iIB.Lambda) %*% t(dB[[iName2]]) %*% t(iIB) %*% t(dB[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% Lambda
+            term2 <- t(iIB.Lambda) %*% t(dB[[iName1]]) %*% t(iIB) %*% t(dB[[iName2]]) %*% t(iIB) %*% Psi.iIB %*% Lambda
+            term3 <- t(iIB.Lambda) %*% t(dB[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% dB[[iName2]] %*% iIB %*% Lambda
+            d2Omega[[iName1]][[iName2]] <- term1 + t(term1) + term2 + t(term2) + term3 + t(term3)
+        }
+    }
+
+    ## ** Export
+    return(list(d2mu = d2mu, d2Omega = d2Omega))
+
+}
+
+
+
+######################################################################
+### sCorrect-updateMoment.R ends here
diff --git a/R/sCorrect-vcov2.R b/R/sCorrect-vcov2.R
new file mode 100644
index 0000000..89a30e3
--- /dev/null
+++ b/R/sCorrect-vcov2.R
@@ -0,0 +1,117 @@
+### sCorrect-vcov2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: dec 11 2019 (13:55) 
+## Version: 
+## Last-Updated: jan 18 2022 (09:37) 
+##           By: Brice Ozenne
+##     Update #: 101
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * vcov2 (documentation)
+#' @title  Variance-Covariance With Small Sample Correction
+#' @description  Extract the variance-covariance matrix from a latent variable model.
+#' Similar to \code{stats::vcov} but with small sample correction.
+#' @name vcov2
+#'
+#' @param object a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).
+#' @param robust [logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.
+#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
+#' @param as.lava [logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.
+#' @param ssc [character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+#' correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+#' Only relevant when using a \code{lvmfit} object. 
+#' @param ... additional argument passed to \code{estimate2} when using a \code{lvmfit} object. 
+#' 
+#' @details When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the variance-covariance matrix.
+#'
+#' @seealso \code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+#'
+#' @return A matrix with as many rows and columns as the number of coefficients.
+#' 
+#' @examples
+#' #### simulate data ####
+#' n <- 5e1
+#' p <- 3
+#' X.name <- paste0("X",1:p)
+#' link.lvm <- paste0("Y~",X.name)
+#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
+#'
+#' m <- lvm(formula.lvm)
+#' distribution(m,~Id) <- Sequence.lvm(0)
+#' set.seed(10)
+#' d <- lava::sim(m,n)
+#'
+#' #### linear models ####
+#' e.lm <- lm(formula.lvm,data=d)
+#'
+#' #### latent variable models ####
+#' e.lvm <- estimate(lvm(formula.lvm),data=d)
+#' vcov0 <- vcov(e.lvm)
+#' vcovSSC <- vcov2(e.lvm)
+#' 
+#' vcovSSC/vcov0
+#' vcovSSC[1:4,1:4]/vcov(e.lm)
+#'
+#' @concept extractor
+#' @keywords smallSampleCorrection
+#' @export
+`vcov2` <-
+    function(object, robust, cluster, as.lava, ...) UseMethod("vcov2")
+
+## * vcov2.lvmfit
+#' @rdname vcov2
+#' @export
+vcov2.lvmfit <- function(object, robust = FALSE, cluster = NULL, as.lava = TRUE, ssc = lava.options()$ssc, ...){
+
+    return(vcov(estimate2(object, ssc = ssc, ...), robust = robust, cluster = cluster, as.lava = as.lava))
+
+}
+
+## * vcov2.lvmfit2
+#' @rdname vcov2
+#' @export
+vcov2.lvmfit2 <- function(object, robust = FALSE, cluster = NULL, as.lava = TRUE, ...){
+
+    dots <- list(...)
+    if(length(dots)>0){
+        warning("Argument(s) \'",paste(names(dots),collapse="\' \'"),"\' not used by ",match.call()[1],". \n")
+    }
+    
+
+    if(robust){
+        out <- crossprod(iid(object, cluster = cluster, as.lava = as.lava))
+    }else{
+        if(!is.null(cluster)){
+            warning("Argument \'cluster\' disregarded when argument \'robust\' is FALSE. \n")
+        }
+        out <- object$sCorrect$vcov.param
+    }
+
+    out <- out[names(object$sCorrect$skeleton$originalLink2param),
+               names(object$sCorrect$skeleton$originalLink2param),
+               drop=FALSE]
+    if(as.lava==FALSE){
+        dimnames(out) <- list(as.character(object$sCorrect$skeleton$originalLink2param),
+                              as.character(object$sCorrect$skeleton$originalLink2param))
+    }
+
+    return(out)
+}
+
+## * vcov.lvmfit2
+#' @rdname vcov2
+#' @export
+vcov.lvmfit2 <- vcov2.lvmfit2
+
+
+
+######################################################################
+### sCorrect-vcov2.R ends here
diff --git a/R/sCorrect.R b/R/sCorrect.R
index 3197674..178e307 100644
--- a/R/sCorrect.R
+++ b/R/sCorrect.R
@@ -1,11 +1,11 @@
 ### sCorrect.R --- 
 ##----------------------------------------------------------------------
 ## Author: Brice Ozenne
-## Created: jan  3 2018 (14:29) 
+## Created: Jan 22 2022 (13:50) 
 ## Version: 
-## Last-Updated: jul 31 2020 (10:44) 
+## Last-Updated: Jan 22 2022 (14:05) 
 ##           By: Brice Ozenne
-##     Update #: 1529
+##     Update #: 12
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -15,930 +15,36 @@
 ## 
 ### Code:
 
-## * Documentation - sCorrect
-#' @title  Satterthwaite Correction and Small Sample Correction
-#' @description Correct the bias of the ML estimate of the variance and compute the first derivative of the information matrix.
+#' @title Depreciated Method For Small Sample Correction
+#' @description Depreciated method for small sample correction, now replaced by the \code{\link{estimate2}} method.
 #' @name sCorrect
-#'
-#' @param object,x a \code{gls}, \code{lme}, or \code{lvm} object.
-#' @param param [numeric vector, optional] the values of the parameters at which to perform the correction.
-#' @param data [data.frame, optional] the dataset relative to which the correction should be performed.
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' Only required for \code{gls} models with no correlation argument.
-#' @param value [logical] value for the arguments \code{adjust.Omega} and \code{adjust.n}.
-#' @param df [logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-#' Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.
-#' @param adjust.Omega [logical] should the standard errors of the coefficients be corrected for small sample bias?
-#' @param adjust.n [logical] should the correction for the degree of freedom be performed?
-#' @param tol [numeric >0] the minimum absolute difference between two estimation of the small sample bias.
-#' Below this value, the algorithm used to estimate the bias stop.
-#' @param n.iter [integer >0] the maximum number of iterations used to estimate the small sample bias of the residual variance-covariance matrix. 
-#' @param numeric.derivative [logical] should a numerical derivative be used to compute the first derivative of the information matrix?
-#' Otherwise an analytic formula is used.
-#' @param trace [logical] should the execution of the function be traced.
-#' @param ... [internal] only used by the generic method or by the <- methods.
-#'
-#' @details The argument \code{value} is equivalent to the argument \code{bias.correct} of the function \code{summary2}.
 #' 
-#' @concept small sample inference
-#' @concept derivative of the score equation
-#' @examples
-#' n <- 5e1
-#' p <- 3
-#' X.name <- paste0("X",1:p)
-#' link.lvm <- paste0("Y~",X.name)
-#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
-#' 
-#' m <- lvm(formula.lvm)
-#' distribution(m,~Id) <- Sequence.lvm(0)
-#' set.seed(10)
-#' d <- lava::sim(m,n)
+#' @param object,x a \code{lvmfit} object.
+#' @param value not used.
+#' @param ... not used.
 #'
-#' ## linear model
-#' e.lm <- lm(formula.lvm,data=d)
-#' system.time(
-#' sCorrect(e.lm) <- TRUE ## i.e. bias.correct = TRUE
-#')
-#' 
-#' ## gls model
-#' library(nlme)
-#' e.gls <- gls(formula.lvm, data = d, method = "ML")
-#' sCorrect(e.gls, cluster = 1:NROW(d)) <- TRUE ## i.e. bias.correct = TRUE
-#' summary2(e.gls)
-#'
-#' ## latent variable model
-#' e.lvm <- estimate(lvm(formula.lvm),data=d)
-#' sCorrect(e.lvm) <- TRUE ## i.e. bias.correct = TRUE
-#' summary2(e.lvm)
-#' 
-#' @export
-`sCorrect` <-
-    function(object, adjust.Omega, adjust.n,
-             df, numeric.derivative,
-             param, data,
-             tol, n.iter, trace, ...) UseMethod("sCorrect")
-
-
-## * sCorrect.lm
-#' @rdname sCorrect
-#' @export
-sCorrect.lm <- function(object, adjust.Omega = TRUE, adjust.n = TRUE,
-                        df = TRUE, numeric.derivative = FALSE,
-                        param = NULL, data = NULL,
-                        tol = 1e-5, n.iter = 20, trace = 0, ...){
-    
-### ** Extract quantities from object
-    name.endogenous <- all.vars(stats::update(formula(object), ".~1"))
-
-    if(is.null(param)){
-        param <- .coef2(object)
-        param["sigma2"] <- mean(residuals(object)^2)
-        model.param <- param
-    }else{
-        model.param <- .coef2(object)
-        if(any(names(param) %in% names(model.param) == FALSE)){
-            stop("Argument \'param\' have appropriate names: \"",
-                 paste(setdiff(names(param),names(model.param)), collapse = "\" \""),
-                 "\" \n")
-        }
-        model.param[names(param)] <- param
-    }
-    name.param <- names(model.param)
-    name.meanparam <- attr(model.param,"mean.coef")
-    name.varparam <- attr(model.param,"var.coef")
-
-    if(is.null(data)){
-      data <- model.frame(object)
-    }
-
-### ** Number of samples
-    test.NNA <- sum(is.na(data[[name.endogenous]]))==0
-    if(any(test.NNA==FALSE)){ ## complete case analysis
-        if(trace>0){
-            cat("* Exclude missing values and recompute moments and residuals ")
-        }        
-        data <- data[which(test.NNA),,drop=FALSE]
-        if(trace>0){
-            cat("- done \n")
-        }        
-    }
-    
-    n.cluster <- NROW(data)
-    
-### ** Compute conditional moments
-    if(trace>0){
-        cat("Compute conditional moments")
-    }
-    object$conditionalMoment <- conditionalMoment(object, data = data, param = model.param,
-                                                  name.endogenous = name.endogenous,
-                                                  first.order = TRUE, second.order = FALSE)
-    if(trace>0){
-        cat(" - done \n")
-    }
-    
-    ### ** Compute residuals
-    if(trace>0){
-        cat("* Extract residuals ")
-    }
-    object.residuals <- data[[name.endogenous]] - object$conditionalMoment$mu    
-    dimnames(object.residuals) <- list(NULL, name.endogenous)
-    if(trace>0){
-        cat("- done \n")
-    }
-
-### ** args
-    args <- list(adjust.Omega = adjust.Omega,
-                 adjust.n = adjust.n,
-                 df = df,
-                 numeric.derivative = numeric.derivative,
-                 tol = tol, n.iter = n.iter)
-
-    if(df && numeric.derivative){
-        argsNumDeriv <- list(data=data)
-    }else{
-        argsNumDeriv <- list()
-    }
-    
-### ** correction
-    if(df == FALSE){
-        derivative <- "none"
-    }else if(numeric.derivative){
-        derivative <- "numeric"
-    }else{
-        derivative <- "analytic"
-    }
-
-    out <- .sCorrect(object,
-                     param = model.param,
-                     epsilon = object.residuals,
-                     name.param = name.param,
-                     name.endogenous = name.endogenous,
-                     n.cluster = n.cluster,
-                     index.Omega = NULL,
-                     derivative = derivative,
-                     args = args,
-                     argsNumDeriv = argsNumDeriv,
-                     trace = trace,
-                     ...)
-    
-    ## ** export
-    return(out)    
-}
-
-## * sCorrect.lm2
-#' @rdname sCorrect
-#' @export
-sCorrect.lm2 <- function(object, ...){
-    class(object) <- setdiff(class(object),"lm2")
-    return(sCorrect(object, ...))    
-}
-## * sCorrect.gls
-#' @rdname sCorrect
-#' @export
-sCorrect.gls <- function(object, adjust.Omega = TRUE, adjust.n = TRUE,
-                         df = TRUE, numeric.derivative = FALSE, 
-                         param = NULL, data = NULL,
-                         tol = 1e-5, n.iter = 20, trace = 0,
-                         cluster, ...){
-### ** limitations
-    if(object$method!="ML"){
-        if(adjust.Omega==TRUE || adjust.n == TRUE){
-            warning("Small sample corrections were derived for ML not for REML\n")
-        }else if(df){
-            warning("The Satterthwaite approximation ignores that fact that the model was fitted using REML\n")
-        }
-    }
-    
-    ## check valid class for corStruct and varStruct: see .getVarCov2
-### ** Extract quantities from the model
-
-    ## *** data
-    if(is.null(data)){
-        data <- extractData(object, design.matrix = FALSE, as.data.frame = TRUE,
-                            envir = parent.env(environment()))
-        
-        if(length(object$na.action)>0){ ## remove rows corresponding to missing values
-            data <- data[setdiff(1:NROW(data),object$na.action),,drop=FALSE]
-        }
-    }
-    
-    ## *** endogenous variable
-    formula.object <- .getFormula2(object)
-    name.Y <- all.vars(stats::update(formula.object, ".~1"))
-    
-    ## *** parameters
-    model.param <- .coef2(object)
-    if(!is.null(param)){        
-        if(any(names(param) %in% names(model.param) == FALSE)){
-            stop("Argument \'param\' have appropriate names: \"",
-                 paste(setdiff(names(param),names(model.param)), collapse = "\" \""),
-                 "\" \n")
-        }
-        model.param[names(param)] <- param
-    }
-    name.param <- names(model.param)
-    name.meanparam <- attr(model.param,"mean.coef")
-    name.varparam <- c(attr(model.param,"var.coef"),
-                       attr(model.param,"cor.coef"),
-                       attr(model.param,"ran.coef"))
 
-    ## *** group
-    if(trace>0){
-        cat("* Reconstruct iid sample ")
-    }
-    res.cluster <- .getCluster2(object,
-                                data = data,
-                                cluster = cluster)
-    n.cluster <- res.cluster$n.cluster
-    cluster <- res.cluster$cluster
-    if(length(cluster) != NROW(data)){
-        if(length(object$na.action)>0){
-            stop("Number of rows of \'data\' does not match length of cluster \n",
-                 "Consider removing the rows of \'data\' containing NA before fitting the model \n")
-        }else{
-            stop("Number of rows of data does not match length of cluster \n")
-        }
-    }
-    if(trace>0){
-        cat("- done \n")
-    }
-
-    ## data before re-ordering
-    args <- list(adjust.Omega = adjust.Omega,
-                 adjust.n = adjust.n,                     
-                 df = df,
-                 numeric.derivative = numeric.derivative,
-                 tol = tol, n.iter = n.iter,
-                 cluster = cluster) ## for score2
-
-    if(df && numeric.derivative){
-        argsNumDeriv <- list(data = data)
-    }else{
-        argsNumDeriv <- list()
-    }
-    
-    ## *** repetition relative to each observation
-    if(trace>0){
-        cat("* Relate observations to endogenous variables ")
-    }
-    res.index <- .getIndexOmega2(object,
-                                 param = model.param,
-                                 attr.param = attributes(model.param),
-                                 name.Y = name.Y,
-                                 cluster = cluster,
-                                 levels.cluster = res.cluster$levels.cluster,
-                                 data = data)
-    name.endogenous <- res.index$name.endogenous
-    n.endogenous <- res.index$n.endogenous
-    index.Omega <- res.index$index.Omega
-    if(trace>0){
-        cat("- done \n")
-    }
-    
-    ## *** sort data by group
-    vec.endogenous <- rep(NA, length(cluster))
-    for(iC in 1:n.cluster){ ## iC <- 1        
-        ## vec.endogenous[cluster==res.cluster$levels.cluster[iC]] <- index.Omega[[res.cluster$levels.cluster[iC]]]
-        vec.endogenous[cluster==iC] <- index.Omega[[iC]]
-    }
-    order.obs <- order(cluster,vec.endogenous)
-    if(is.unsorted(order.obs)==TRUE){
-        test.reorder <- TRUE
-        data <- data[order.obs,,drop=FALSE]
-
-        cluster <- cluster[order.obs]
-        vec.endogenous <- vec.endogenous[order.obs]
-        res.cluster$levels.cluster <- unique(cluster)        
-    }else{
-        test.reorder <- FALSE
-    }
-    ## for vector format to matrix format (for residuals and fitted values)
-    vec.OmegaMat <- cluster + (vec.endogenous-1)*n.cluster
-    ## M.check <- matrix(NA, nrow = n.cluster, ncol = n.endogenous)
-    ## M.check[vec.endogenous + (cluster-1)*n.endogenous] <- data[["G"]]
-    ## M.check[cluster + (vec.endogenous-1)*n.cluster] <- data[["G"]]
-
-### ** Compute conditional moments and derivatives
-    if(trace>0){
-        cat("* Compute conditional moments ")
-    }
-    object$conditionalMoment <- conditionalMoment(object,
-                                                  data = data,
-                                                  formula = formula.object,
-                                                  param = model.param,
-                                                  attr.param = attributes(model.param)[-1],
-                                                  ref.group = res.index$ref.group,
-                                                  first.order = TRUE,
-                                                  second.order = FALSE,
-                                                  n.cluster = n.cluster,
-                                                  cluster = cluster,
-                                                  name.endogenous = name.endogenous,
-                                                  n.endogenous = n.endogenous,
-                                                  index.Omega = index.Omega,
-                                                  vec.OmegaMat = vec.OmegaMat)
-    if(trace>0){
-        cat("- done \n")
-    }
-   
-### ** Compute observed residuals
-    if(trace>0){
-        cat("* Extract residuals ")
-    }
-    epsilon <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                      dimnames = list(NULL, name.endogenous))
-    epsilon[vec.OmegaMat] <- data[[name.Y]]
-    epsilon <- epsilon -  object$conditionalMoment$mu
-    
-    if(trace>0){
-        cat("- done \n")
-    }
-    ## stats::residuals(object)-na.omit(as.double(t(epsilon)))
-    ## epsilon + object$conditionalMoment$mu
-    ## data
-    ## as.double(stats::residuals(object))
-    ## ** Check missing value
-    if(all(!is.na(epsilon))){
-        index.Omega <- NULL
-    }
-    
-    ## ** correction
-    if(df == FALSE){
-        derivative <- "none"
-    }else if(numeric.derivative){
-        derivative <- "numeric"
-    }else{
-        derivative <- "analytic"
-    }
-    out <- .sCorrect(object,
-                     param = model.param,
-                     epsilon = epsilon,
-                     name.param = name.param,
-                     name.endogenous = name.endogenous,
-                     n.cluster = n.cluster,
-                     index.Omega = index.Omega,
-                     derivative = derivative,
-                     args = args,
-                     argsNumDeriv = argsNumDeriv,
-                     trace = trace,
-                     ...)
-    
-    ## ** export
-    ## *** restaure original order
-    if(test.reorder==TRUE){
-        restaure.order <- order(order.obs[!duplicated(cluster)])
-        out$score <- out$score[restaure.order,,drop=FALSE]
-        out$residuals <- out$residuals[restaure.order,,drop=FALSE]
-        out$leverage <- out$leverage[restaure.order,,drop=FALSE]
-    }    
-    ##
-    return(out)          
- 
-}
-
-## * sCorrect.gls2
-#' @rdname sCorrect
-#' @export
-sCorrect.gls2 <- function(object, ...){
-    class(object) <- setdiff(class(object),"gls2")
-    return(sCorrect(object, ...))    
-}
-## * sCorrect.lme
-#' @rdname sCorrect
-#' @export
-sCorrect.lme <- sCorrect.gls
-## * sCorrect.lme2
-#' @rdname sCorrect
-#' @export
-sCorrect.lme2 <- function(object, ...){
-    class(object) <- setdiff(class(object),"lme2")
-    return(sCorrect(object, ...))    
-}
-
-## * sCorrect.lvmfit
 #' @rdname sCorrect
 #' @export
-sCorrect.lvmfit <- function(object, adjust.Omega = TRUE, adjust.n = TRUE,
-                            df = TRUE, numeric.derivative = FALSE, 
-                            param = NULL, data = NULL,
-                            tol = 1e-5, n.iter = 20, trace = 0,
-                            ...){
-
-### ** Check valid lvm object
-    if("multigroupfit" %in% class(object)){
-        stop("sCorrect does not handle multigroup models \n")
-    }
-    ## if(!is.null(object$cluster)){
-    ##     stop("sCorrect does not handle lvmfit object with cluster \n")
-    ## }
-    if(!is.logical(numeric.derivative)){
-        stop("Argument \'numeric.derivative\' must be logical \n")
-    }
-    
-    if(length(object$model$attributes$ordinal)>0){
-        name.t <- names(object$model$attributes$ordinal)
-        stop("sCorrect does not handle ordinal variables \n",
-             "ordinal variable(s): \"",paste(name.t, collapse = "\" \""),"\"\n")
-    }
-    
-    if(length(object$model$attributes$transform)>0){
-        name.t <- names(object$model$attributes$transform)
-        stop("sCorrect does not handle transformed variables \n",
-             "transformed variable(s): \"",paste(name.t, collapse = "\" \""),"\"\n")
-    }
-    
-### ** Extract quantities from object
-    name.endogenous <- endogenous(object)
-
-    model.param <- lava::pars(object)
-    if(!is.null(param)){
-        if(any(names(param) %in% names(model.param) == FALSE)){
-            stop("Argument \'param\' have appropriate names: \"",
-                 paste(setdiff(names(param),names(model.param)), collapse = "\" \""),
-                 "\" \n")
-        }
-        model.param[names(param)] <- param
-    }
-
-    if(is.null(data)){
-        data <- as.data.frame(object$data$model.frame)
-    }
-
-    name.param <- names(model.param)
-
-    name.latent <- latent(object)
-    n.latent <- length(name.latent)
-
-### ** number of samples
-    test.NNA <- rowSums(is.na(data[,name.endogenous,drop=FALSE]))==0    
-    if(any(test.NNA==FALSE) && !inherits(object,"lvm.missing")){ ## complete case analysis
-        if(trace>0){
-            cat("* Exclude missing values and recompute moments and residuals ")
-        }        
-        data <- data[which(test.NNA),,drop=FALSE]
-        if(trace>0){
-            cat("- done \n")
-        }        
-    }
-    
-    n.cluster <- NROW(data)
-
-### ** Compute conditional moments and derivatives
-    if(trace>0){
-        cat("* Compute conditional moments and their derivative ")
-    }
-    object$conditionalMoment <- conditionalMoment(object, data = data, param = model.param,
-                                                  first.order = TRUE, second.order = FALSE, usefit = TRUE)
-
-    if(df == TRUE && (numeric.derivative == FALSE)){
-        object$conditionalMoment$d2Moment.init <- skeletonDtheta2(lava::Model(object),
-                                                                  data = data,
-                                                                  df.param.all = object$conditionalMoment$df.param,
-                                                                  param2originalLink = object$conditionalMoment$param2originalLink,
-                                                                  name.latent = name.latent)
-    }
-    if(trace>0){
-        cat("- done \n")
-    }
-
-#### ** Compute residuals
-    if(trace>0){
-        cat("* Extract residuals ")
-    }
-    ## predict(object) - object$conditionalMoment$mu
-    epsilon <- as.matrix(data[, name.endogenous,drop=FALSE] - object$conditionalMoment$mu)
-    ## residuals(object) - epsilon
-    if(trace>0){
-        cat("- done \n")
-    }
-
-### ** Identify missing values
-    if(any(test.NNA==FALSE) && inherits(object,"lvm.missing")){ ## full information
-        if(trace>0){
-            cat("* Identify missing values ")
-        }
-        index.Omega <- lapply(1:n.cluster,function(iC){which(!is.na(epsilon[iC,]))})
-        if(trace>0){
-            cat("- done \n")
-        }        
-    }else{
-        index.Omega <- NULL
-    }
-
-### ** args
-    args <- list(adjust.Omega = adjust.Omega,
-                 adjust.n = adjust.n,                     
-                 df = df,
-                 numeric.derivative = numeric.derivative,
-                 tol = tol, n.iter = n.iter)
-    if(df && numeric.derivative){
-        argsNumDeriv <- list(data = data)
-    }else{
-        argsNumDeriv <- list()
-    }
-    
-### ** correction
-    if(df == FALSE){
-        derivative <- "none"
-    }else if(numeric.derivative){
-        derivative <- "numeric"
-    }else{
-        derivative <- "analytic"
-    }
-
-    out <- .sCorrect(object,
-                     param = model.param,
-                     epsilon = epsilon,
-                     name.param = name.param,
-                     name.endogenous = name.endogenous,
-                     n.cluster = n.cluster,
-                     index.Omega = index.Omega,
-                     derivative = derivative,
-                     args = args,
-                     argsNumDeriv = argsNumDeriv,
-                     trace = trace,
-                     ...)
-
-    ## ** export
-    return(out)       
-}
+`sCorrect` <-
+    function(object, ...) UseMethod("sCorrect")
 
-## * sCorrect.lvmfit2
 #' @rdname sCorrect
 #' @export
-sCorrect.lvmfit2 <- function(object, ...){
-    class(object) <- setdiff(class(object),"lvmfit2")
-    return(sCorrect(object, ...))    
-}
-## * .sCorrect
-.sCorrect <- function(object, param, epsilon, 
-                      name.param, name.endogenous, 
-                      n.cluster, index.Omega,
-                      derivative, args, argsNumDeriv, trace){
-
-    n.param <- length(param)
-    if(!is.null(index.Omega)){
-        n.endogenous.cluster <- lapply(index.Omega,length)        
-    }else{
-        n.endogenous.cluster <- NULL
-    }
-    name.3deriv <- object$conditionalMoment$name.3deriv
-    
-    ## ** check names of the mean and variance parameters
-    name.meanparam <- names(object$conditionalMoment$dmu)
-    name.meanparam <- as.character(sort(factor(name.meanparam, levels = name.param)))
-    if(any(is.na(name.meanparam))){
-        stop("An element in name.meanparam is not in name.param. \n")
-    }
-    if(length(name.meanparam)>0 && !identical(sort(name.meanparam),sort(names(object$conditionalMoment$dmu)))){
-        stop("Mismatch first derivative of the conditional mean and name.meanparam \n")
-    }
-
-    name.varparam <- names(object$conditionalMoment$dOmega)
-    name.varparam <- as.character(sort(factor(name.varparam, levels = name.param)))
-    if(any(is.na(name.varparam))){
-        stop("An element in name.varparam is not in name.param. \n")
-    }
-    if(length(name.varparam)>0 && !identical(sort(name.varparam),sort(names(object$conditionalMoment$dOmega)))){
-        stop("Mismatch first derivative of the conditional variance and name.varparam \n")
-    }
-    if(length(name.varparam)==0){
-        args$adjust.n <- FALSE
-        args$adjust.Omega <- FALSE
-    }
-    
-    ## ** corrected ML estimates
-    object  <- .estimate2(object = object,
-                          epsilon = epsilon,
-                          n.cluster = n.cluster,
-                          name.param = name.param,
-                          name.meanparam = name.meanparam,
-                          name.varparam = name.varparam,
-                          name.endogenous = name.endogenous,
-                          index.Omega = index.Omega, ## mode2
-                          adjust.Omega = args$adjust.Omega,
-                          adjust.n = args$adjust.n,
-                          tol = args$tol, n.iter = args$n.iter,
-                          trace = trace)
-    Omega <- object$conditionalMoment$Omega
-    if(!is.null(index.Omega)){
-        OmegaM1 <- lapply(1:n.cluster, function(iC){
-            return(solve(Omega[index.Omega[[iC]],index.Omega[[iC]]]))
-        })    
-    }else{
-        OmegaM1 <- chol2inv(chol(Omega))
-    }
-    
-    ## ** corrected score
-    if(trace>0){
-        if(args$adjust.n == FALSE && args$adjust.Omega == FALSE){
-            cat("* Compute score ")
-        }else{
-            cat("* Compute corrected score ")
-        }
-    }
-    object$dVcov$score <- .score2(epsilon = object$dVcov$residuals,
-                                  Omega = Omega,
-                                  OmegaM1 = OmegaM1,
-                                  dmu = object$conditionalMoment$dmu,
-                                  dOmega = object$conditionalMoment$dOmega,
-                                  name.param = name.param,
-                                  name.meanparam = name.meanparam,
-                                  name.varparam = name.varparam,
-                                  index.Omega = index.Omega, ## mode2
-                                  n.cluster = n.cluster,
-                                  indiv = TRUE)
-    if(trace>0){
-        cat("- done \n")
-    }
-
-
-    ## ** Hessian and first derivative of the expected information matrix    
-    if(args$df == FALSE || length(name.3deriv)==0){
-        object$dVcov$dVcov.param <- NULL
-    }else if(derivative == "none"){
-        object$dVcov$dVcov.param <- NA
-    }else if(derivative == "numeric"){
-        if(trace>0){
-            cat("Compute first derivative of the information matrix using numerical differentiation ")
-        }
-        test.package <- try(requireNamespace("numDeriv"), silent = TRUE)
-        if(inherits(test.package,"try-error")){
-            stop("There is no package \'numDeriv\' \n",
-                 "This package is necessary when argument \'numeric.derivative\' is TRUE \n")
-        }
-        if(args$adjust.Omega || args$adjust.n){
-            warning("The numerical derivative of the information matrix is computed ignoring the small sample correction \n")
-        }
-
-        args.tempo <- args
-        args.tempo$data <- argsNumDeriv$data
-        args.tempo$df <- FALSE
-        args.tempo$adjust.n <- FALSE
-        args.tempo$adjust.Omega <- FALSE
-
-        ## *** objective functions
-        calcVcov <- function(iParam){ # x <- p.obj
-            pp <- param
-            pp[names(iParam)] <- iParam
-            vcov.param <- do.call(sCorrect,
-                                  args = c(list(object, param = pp), args.tempo))$vcov.param
-            return(as.double(vcov.param))
-            ## return(solve(vcov.param))
-        }
-
-        calcRvcov <- function(iParam){
-            pp <- param
-            pp[names(iParam)] <- iParam
-            iObject <- do.call(sCorrect,
-                               args = c(list(object, param = pp), args.tempo))
-            ## rvcov.param <- crossprod(iObject$score %*% iObject$vcov.param)
-            rvcov.param <- crossprod(iObject$score %*% object$dVcov$vcov.param)
-            ## rvcov.param <- crossprod(iObject$score)
-            return(as.double(rvcov.param))
-        }
-
-        calcScore <- function(iParam){
-            pp <- param
-            pp[names(iParam)] <- iParam
-            score <- do.call(sCorrect,
-                             args = c(list(object, param = pp), args.tempo))$score
-            return(as.double(score))
-        }
-
-        ## *** numerical derivative
-        jac.param <- param[name.3deriv]
-        res.numDeriv <- numDeriv::jacobian(calcVcov, x = jac.param, method = "Richardson")
-        object$dVcov$dVcov.param <- array(res.numDeriv,
-                                          dim = c(n.param,n.param,length(name.3deriv)),
-                                          dimnames = list(name.param, name.param, name.3deriv))
-        ## jac.param <- param
-        ## res.numDeriv <- numDeriv::jacobian(calcRvcov, x = jac.param, method = "Richardson")
-        ## object$dVcov$dRvcov.param <- array(res.numDeriv, 
-        ##                                    dim = c(n.param,n.param,n.param),
-        ##                                    dimnames = list(name.param, name.param, name.param))
-        ## ## browser()
-        ## ## round(e2$sCorrect$dRvcov.param - object$dVcov$dRvcov.param, 10)
-        ## jac.param <- param
-        ## res.numDeriv <- numDeriv::jacobian(calcScore, x = jac.param, method = "Richardson")
-        ## object$dVcov$hessian <- aperm(array(res.numDeriv,
-        ##                                     dim = c(n.cluster,n.param,n.param),
-        ##                                     dimnames = list(NULL, name.param, name.param)),
-        ##                               perm = 3:1)
-        
-        if(trace>0){
-            cat("- done \n")
-        }
-        
-    }else if(derivative == "analytic"){
-        if(trace>0){
-            cat("* Compute first derivative of the information matrix using analytic formula ")
-        }
-
-        ## update conditional moments
-        resD2 <- skeletonDtheta2(object)
-
-        ## identify parameters with second order derivatives
-        if(NROW(object$dVcov$opt$grid.meanparam)>0){
-            object$dVcov$opt$grid.meanparam <- .matchTableList(table = object$dVcov$opt$grid.meanparam,
-                                                               list = resD2$d2mu)
-        }
-        if(NROW(object$dVcov$opt$grid.varparam)>0){
-            object$dVcov$opt$grid.varparam <- .matchTableList(table = object$dVcov$opt$grid.varparam,
-                                                              list = resD2$d2Omega)
-        }
-        
-        ## Hessian
-        object$dVcov$hessian <- .hessian2(dmu = object$conditionalMoment$dmu,
-                                          d2mu = resD2$d2mu,
-                                          dOmega = object$conditionalMoment$dOmega,
-                                          d2Omega = resD2$d2Omega,
-                                          Omega = Omega,
-                                          n.corrected = object$dVcov$n.corrected,
-                                          index.Omega = index.Omega,
-                                          leverage = object$dVcov$leverage,
-                                          n.cluster = n.cluster,
-                                          grid.meanparam = object$dVcov$opt$grid.meanparam,
-                                          n.grid.meanparam = NROW(object$dVcov$opt$grid.meanparam),
-                                          grid.varparam = object$dVcov$opt$grid.varparam,
-                                          n.grid.varparam = NROW(object$dVcov$opt$grid.varparam),
-                                          name.param = name.param,
-                                          n.param = n.param,
-                                          residuals = object$dVcov$residuals)
-
-        ## First derivative of the information matrix
-        dInfo.dtheta <- .dInformation2(dmu = object$conditionalMoment$dmu,
-                                       d2mu = resD2$d2mu,
-                                       dOmega = object$conditionalMoment$dOmega,
-                                       d2Omega = resD2$d2Omega,
-                                       Omega = Omega,
-                                       OmegaM1 = OmegaM1,
-                                       n.corrected = object$dVcov$n.corrected,
-                                       n.cluster = n.cluster,
-                                       index.Omega = index.Omega,
-                                       leverage = object$dVcov$leverage,
-                                       name.param  = name.param,
-                                       name.3deriv = name.3deriv)
-
-        ## First derivative of the variance covariance matrix of the parameters
-        p3 <- dim(dInfo.dtheta)[3]
-
-        object$dVcov$dVcov.param <- array(NA, dim = dim(dInfo.dtheta), dimnames = dimnames(dInfo.dtheta))
-        for(iP in 1:p3){
-            object$dVcov$dVcov.param[,,iP] <- - object$dVcov$vcov.param %*% dInfo.dtheta[,,iP] %*% object$dVcov$vcov.param
-            ## object$dVcov$dVcov.param[,,iP] <- dInfo.dtheta[,,iP]
-        }
-        object$dVcov$dRvcov.param <- array(NA, dim = c(n.param,n.param,n.param), dimnames = list(name.param,name.param,name.param))
-        for(iP in 1:n.param){ ## iP <- 1
-            ## if(name.param[iP] %in% name.3deriv){
-            ##     term1 <- object$dVcov$dVcov.param[,,name.param[iP]] %*% crossprod(object$dVcov$score) %*% object$dVcov$vcov.param
-            ## }else{
-            ##     term1 <- matrix(0, nrow = n.param, ncol = n.param)
-            ## }
-            ## term2 <- object$dVcov$vcov.param %*% object$dVcov$hessian[iP,,] %*% object$dVcov$score %*% object$dVcov$vcov.param
-            ## object$dVcov$dRvcov.param[,,iP] <- term1 + t(term1) + term2 + t(term2)
-
-            term2 <- object$dVcov$vcov.param %*% object$dVcov$hessian[iP,,] %*% object$dVcov$score %*% object$dVcov$vcov.param
-            Reduce("+",lapply(1:NROW(object$dVcov$score), function(iObs){object$dVcov$hessian[iP,,iObs] %*% t(object$dVcov$score[iObs,])}))
-            object$dVcov$dRvcov.param[,,iP] <- term2 + t(term2)
-        }
-
-        if(trace>0){
-            cat("- done \n")
-        }
-    }
-       
-    ## ** export
-    object$dVcov$args <- args
-    return(object$dVcov)
+`sCorrect.default` <- function(object, ...){
+    .Defunct("estimate2", package = "lavaSearch2", msg = "Function sCorrect has been removed from lavaSearch2 version 2.0.0 or higher and replace by the estimate2 method. \n")
 }
 
-## * sCorrect<-
 #' @rdname sCorrect
 #' @export
 `sCorrect<-` <-
   function(x, ..., value) UseMethod("sCorrect<-")
 
-## * sCorrect<-.lm
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.lm` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    class(x) <- append("lm2",class(x))
-    return(x)
-}    
-## * sCorrect<-.lm2
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.lm2` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    return(x)
-}    
-## * sCorrect<-.gls
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.gls` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    class(x) <- append("gls2",class(x))
-    return(x)
-}    
-## * sCorrect<-.gls2
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.gls2` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    return(x)
-}    
-## * sCorrect<-.lme
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.lme` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    class(x) <- append("lme2",class(x))
-    return(x)
-}    
-## * sCorrect<-.lme2
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.lme2` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    return(x)
-}    
-## * sCorrect<-.lvmfit
-#' @rdname sCorrect
-#' @export
-`sCorrect<-.lvmfit` <- function(x, ..., value){
-    dots <- list(...)
-    safeMode <- dots$safeMode
-    dots[["safeMode"]] <- NULL
-    
-    if(identical(safeMode,TRUE)){
-        x$sCorrect <-  try(do.call(sCorrect,
-                                   args = c(list(x, adjust.Omega = value, adjust.n = value),
-                                            dots) ), silent = TRUE)
-        if(value == TRUE && inherits(x$sCorrect,"try-error")){
-            warn <- x$sCorrect
-            x$sCorrect <- do.call(sCorrect,
-                                  args = c(list(x, adjust.Omega = value, adjust.n = FALSE),
-                                           dots) )
-            attr(x$sCorrect,"warning") <- warn
-            warning("sCorrect failed and has been re-run setting the argument \'adjust.n\' to FALSE \n",
-                    "see the attribute \"warning\" of object$sCorrect for the error message \n")
-        }
-    }else{
-        x$sCorrect <-  do.call(sCorrect,
-                               args = c(list(x, adjust.Omega = value, adjust.n = value),
-                                        dots) )
-    }
-    class(x) <- append("lvmfit2",class(x))
-
-    return(x)
-}    
-
-## * sCorrect<-.lvmfit2
 #' @rdname sCorrect
 #' @export
-`sCorrect<-.lvmfit2` <- function(x, ..., value){
-    x$sCorrect <- sCorrect(x, ..., adjust.Omega = value, adjust.n = value)
-    return(x)
-}
-
-## * .matchTableList
-.matchTableList <- function(table, list){
-
-    table$index <- 1:NROW(table)
-    table$deriv12 <- FALSE
-    table$deriv21 <- FALSE
-
-    if(length(list)>0){
-        name1 <- names(list)
-        name2 <- lapply(list, names)
-
-        df.pair <- as.data.frame(do.call(rbind, lapply(1:length(list), function(iParam){
-            cbind(Var1 = name1[iParam], Var2 = name2[[iParam]])
-        })))
-
-        df.merge12 <- merge(table, df.pair, by.x = c("Var1","Var2"), by.y = c("Var1","Var2"))
-        if(NROW(df.merge12)>0){
-            table[df.merge12$index,"deriv12"] <- TRUE
-        }
-        df.merge21 <- merge(table, df.pair, by.x = c("Var1","Var2"), by.y = c("Var2","Var1"))
-        if(NROW(df.merge21)>0){
-            table[df.merge21$index,"deriv21"] <- TRUE
-        }
-    }
-    table$deriv <- (table$deriv12 + table$deriv21) > 0
-    
-    return(table)        
+`sCorrect<-.default` <- function(x, ..., value){
+    .Defunct("estimate2", package = "lavaSearch2", msg = "Function sCorrect has been removed from lavaSearch2 version 2.0.0 or higher and replace by the estimate2 method. \n")
 }
 
 ##----------------------------------------------------------------------
 ### sCorrect.R ends here
-
-
-
-
-
-
-
-
-
diff --git a/R/sampleRepeated.R b/R/sampleRepeated.R
new file mode 100644
index 0000000..9a70788
--- /dev/null
+++ b/R/sampleRepeated.R
@@ -0,0 +1,84 @@
+### sampleRepeated.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 18 2019 (15:37) 
+## Version: 
+## Last-Updated: Jan 11 2022 (17:39) 
+##           By: Brice Ozenne
+##     Update #: 20
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * Documentation
+#' @title Simulate Repeated Measurements over time
+#' @description Simulate repeated measurements over time (one factor model).
+#' @name sampleRepeated
+#' 
+#' @param n [integer] sample size.
+#' @param n.Xcont [integer] number of continuous covariates acting on the latent variable.
+#' @param n.Xcat [integer] number of categorical covariates acting on the latent variable.
+#' @param n.rep [integer] number of measurement of the response variable.
+#' @param format [character] should the dataset be returned in the \code{"long"} format or in the \code{"wide"} format.
+#'
+#' @return a \code{data.frame} object.
+#'
+#' @examples
+#' 
+#' sampleRepeated(10, format = "wide")
+#' sampleRepeated(10, format = "long")
+
+## * sampleRepeated
+#' @rdname sampleRepeated
+#' @export
+sampleRepeated <- function(n, n.Xcont = 2, n.Xcat = 2, n.rep = 5, format = "long"){
+
+    ## ** check arguments
+    format <- match.arg(format, choices = c("wide","long"))
+    
+    ## ** define lvm
+    m <- lava::lvm()
+    idvars <- "id"
+    distribution(m, ~id) <- function(n, ...){return(1:n)}    
+    for(iY in 1:n.rep){ ## iY <- 1
+        regression(m) <- stats::as.formula(paste0("Y",iY,"~eta"))
+    }
+    if(n.Xcont>0){
+        for(iXcont in 1:n.Xcont){ ## iY <- 1
+            regression(m) <- stats::as.formula(paste0("eta~X",iXcont))
+        }
+        idvars <- c(idvars,paste0("X",1:n.Xcont))
+    }
+    
+    if(n.Xcat>0){
+        for(iXcat in 1:n.Xcat){ ## iY <- 1
+            regression(m) <- stats::as.formula(paste0("eta~Z",iXcat))
+            categorical(m, labels=c("a","b","c")) <- paste0("Z",iXcat)
+        }
+        idvars <- c(idvars,paste0("Z",1:n.Xcat))
+    }    
+    latent(m) <- ~eta
+
+    dW <- lava::sim(m, n = n, latent = FALSE)
+    if(format == "wide"){
+        return(dW)
+    }else{
+        dL <- stats::reshape(dW,
+                             direction = "long",
+                             idvar = idvars,
+                             varying = list(paste0("Y",1:n.rep)),
+                             v.names = "Y",
+                             timevar = "time")
+        rownames(dL) <- NULL
+        return(dL)
+    }
+    
+}
+
+######################################################################
+### sampleRepeated.R ends here
diff --git a/R/score2.R b/R/score2.R
deleted file mode 100644
index 033768a..0000000
--- a/R/score2.R
+++ /dev/null
@@ -1,189 +0,0 @@
-### score2.R --- 
-#----------------------------------------------------------------------
-## author: Brice Ozenne
-## created: okt 12 2017 (16:43) 
-## Version: 
-## last-updated: jul 31 2020 (10:44) 
-##           By: Brice Ozenne
-##     Update #: 2263
-#----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-#----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - score2
-#' @title  Extract the Individual Score
-#' @description  Extract the individual score from a Gaussian linear model.
-#' @name score2
-#'
-#' @param object a linear model or a latent variable model
-#' @param param [optional] the fitted parameters.
-#' @param data [optional] the data set.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.
-#' @param ... arguments to be passed to \code{sCorrect}.
-#'
-#' @details If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
-#'
-#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
-#'
-#' @return A matrix containing the score relative to each sample (in rows)
-#' and each model coefficient (in columns).
-#' 
-#' @examples
-#' n <- 5e1
-#' p <- 3
-#' X.name <- paste0("X",1:p)
-#' link.lvm <- paste0("Y~",X.name)
-#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
-#'
-#' m <- lvm(formula.lvm)
-#' distribution(m,~Id) <- Sequence.lvm(0)
-#' set.seed(10)
-#' d <- lava::sim(m,n)
-#'
-#' ## linear model
-#' e.lm <- lm(formula.lvm,data=d)
-#' score.tempo <- score2(e.lm, bias.correct = FALSE)
-#' colMeans(score.tempo)
-#'
-#' ## latent variable model
-#' e.lvm <- estimate(lvm(formula.lvm),data=d)
-#' score.tempo <- score2(e.lvm, bias.correct = FALSE)
-#' range(score.tempo-score(e.lvm, indiv = TRUE))
-#'
-#' @concept small sample inference
-#' @export
-`score2` <-
-  function(object, ...) UseMethod("score2")
-
-## * score2.lm
-#' @rdname score2
-#' @export
-score2.lm <- function(object, param = NULL, data = NULL, bias.correct = TRUE, ...){
-    sCorrect(object, param = param, data = data, df = FALSE, ...) <- bias.correct
-
-    ### ** export
-    return(object$sCorrect$score)
-}
-
-## * score2.gls
-#' @rdname score2
-#' @export
-score2.gls <- score2.lm
-
-## * score2.lme
-#' @rdname score2
-#' @export
-score2.lme <- score2.lm
-
-## * score2.lvmfit
-#' @rdname score2
-#' @export
-score2.lvmfit <- score2.lm
-
-## * score2.lm2
-#' @rdname score2
-#' @export
-score2.lm2 <- function(object, param = NULL, data = NULL, ...){
-
-    ### ** compute the score
-    if(!is.null(param) || !is.null(data)){
-        args <- object$sCorrect$args
-        args$df <- FALSE
-        object$sCorrect <- do.call(sCorrect,
-                                   args = c(list(object, param = param, data = data),
-                                            args))
-    }
-
-    ### ** export
-    return(object$sCorrect$score)
-
-}
-
-## * score2.gls2
-#' @rdname score2
-#' @export
-score2.gls2 <- score2.lm2
-
-## * score2.lme2
-#' @rdname score2
-#' @export
-score2.lme2 <- score2.lm2
-
-## * score2.lvmfit
-#' @rdname score2
-#' @export
-score2.lvmfit2 <- score2.lm2
-
-## * .score2
-#' @title Compute the Corrected Score.
-#' @description Compute the corrected score when there is no missing value.
-#' @name score2-internal
-#' 
-#' @param n.cluster [integer >0] the number of observations.
-#' 
-#' @keywords internal
-.score2 <- function(epsilon, Omega, OmegaM1, dmu, dOmega,                    
-                    name.param, name.meanparam, name.varparam,
-                    index.Omega, n.cluster, indiv){
-
-### ** Prepare
-    test.global <- is.null(index.Omega)
-    out.score <- matrix(0, nrow = n.cluster, ncol = length(name.param),
-                        dimnames = list(NULL,name.param))
-            
-### ** global
-    if(test.global){
-        epsilon.OmegaM1 <- epsilon %*% OmegaM1
-
-        ## *** Compute score relative to the mean coefficients
-        for(iP in name.meanparam){ # iP <- 1
-            out.score[,iP] <- out.score[,iP] + rowSums(dmu[[iP]] * epsilon.OmegaM1)
-        }
-        
-        ## *** Compute score relative to the variance-covariance coefficients
-        for(iP in name.varparam){ # iP <- 1
-            term2 <- - 1/2 * tr(OmegaM1 %*% dOmega[[iP]])            
-            term3 <- 1/2 * rowSums(epsilon.OmegaM1 %*% dOmega[[iP]] * epsilon.OmegaM1)
-            out.score[,iP] <- out.score[,iP] + as.double(term2) + term3
-        }        
-    }
-
-
-### ** individual specific
-    if(!test.global){
-
-        for(iC in 1:n.cluster){
-            iIndex <- index.Omega[[iC]]
-            iEpsilon.OmegaM1 <- OmegaM1[[iC]] %*% cbind(epsilon[iC,iIndex])
-
-
-            ## *** Compute score relative to the mean coefficients
-            for(iP in name.meanparam){ # iP <- name.meanparam[1]
-                out.score[iC,iP] <- out.score[iC,iP] + dmu[[iP]][iC,iIndex] %*% iEpsilon.OmegaM1
-            }
-
-            ## *** Compute score relative to the variance-covariance coefficients
-            for(iP in name.varparam){ # iP <- name.varparam[1]
-                term2 <- - 1/2 * tr(OmegaM1[[iC]] %*% dOmega[[iP]][iIndex,iIndex,drop=FALSE])
-                term3 <- 1/2 * sum(iEpsilon.OmegaM1 * dOmega[[iP]][iIndex,iIndex,drop=FALSE] %*% iEpsilon.OmegaM1)
-                out.score[iC,iP] <- out.score[iC,iP] + as.double(term2) + term3 
-            }
-        }
-        
-    }
-
-    ### ** export
-    if(indiv==FALSE){
-        out.score <- colSums(out.score)
-    }
-    return(out.score)
-}
-
-
-#----------------------------------------------------------------------
-### score2.R ends her
diff --git a/R/skeleton.R b/R/skeleton.R
deleted file mode 100644
index 4de5118..0000000
--- a/R/skeleton.R
+++ /dev/null
@@ -1,1325 +0,0 @@
-### skeleton.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov  8 2017 (10:35) 
-## Version: 
-## Last-Updated: feb  8 2019 (11:48) 
-##           By: Brice Ozenne
-##     Update #: 1025
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - skeleton
-#' @title Pre-computation for the Score
-#' @description Pre-compute quantities that are necessary to compute the score of a lvm model.
-#' @name skeleton
-#' 
-#' @param object a \code{lvm} object.
-#' @param df.param.all [data.frame] output of \code{\link{coefType}} containing the type of each coefficient.
-#' @param param2originalLink [named character vector] matching between the name of the coefficient in lava and their label.
-#' @param B,alpha.XGamma,Lambda,Psi [matrix] pre-computed matrix.
-#' @param OD [list] the pre-computed quantities for the second derivatives. 
-#' @param as.lava [logical] should the name of the links be used to name the coefficient?
-#' Otherwise uses the labels (when defined) of each coefficient.
-#' @param name.endogenous [character vector] name of the endogenous variables
-#' @param name.latent [character vector] name of the latent variables
-#' @param p [numeric vector, optional] vector of coefficients at which to evaluate the score.
-#' @param data [data.frame, optional] data set.
-#' @param ... [internal] only used by the generic method.
-#' 
-#' @details
-#' When the use specify names for the coefficients (e.g. Y1[mu:sigma]) or uses constrains (Y1~beta*X1), \code{as.lava=FALSE} will use the names specified by the user (e.g. mu, sigma, beta) while \code{as.lava=TRUE} will use the name of the first link defining the coefficient.
-#'
-#' @examples
-#' \dontrun{
-#' skeleton <- lavaSearch2::skeleton
-#' skeleton.lvm <- lavaSearch2::skeleton.lvm
-#' skeleton.lvmfit <- lavaSearch2::skeleton.lvmfit
-#' 
-#' ## without constrain
-#' m <- lvm(Y1~X1+X2+eta,Y2~X3+eta,Y3~eta)
-#' latent(m) <- ~eta
-#' 
-#' e <- estimate(m, lava::sim(m,1e2))
-#' M.data <- as.matrix(model.frame(e))
-#'
-#' skeleton(e$model, as.lava = TRUE,
-#'          name.endogenous = endogenous(e), n.endogenous = 3,
-#'          name.latent = latent(e), 
-#'          update.value = FALSE)
-#' skeleton(e, data = M.data, p = pars(e), as.lava = TRUE,
-#'          name.endogenous = endogenous(e), n.endogenous = 3,
-#'          name.latent = latent(e), 
-#'          update.value = TRUE)
-#'
-#' ## with constrains
-#' m <- lvm(Y[mu:sigma] ~ beta*X1+X2)
-#' e <- estimate(m, lava::sim(m,1e2))
-#' M.data <- as.matrix(model.frame(e))
-#'
-#' skeleton(e$model, as.lava = TRUE,
-#'          name.endogenous = "Y", n.endogenous = 1,
-#'          name.latent = NULL, 
-#'          update.value = FALSE)$skeleton
-#' 
-#' skeleton(e, data = M.data, p = pars(e), as.lava = FALSE,
-#'          name.endogenous = "Y", n.endogenous = 1,
-#'          name.latent = NULL, 
-#'          update.value = FALSE)$skeleton
-#' 
-#'}
-#' @concept small sample inference
-#' @concept derivative of the score equation
-#' @keywords internal
-`skeleton` <-
-    function(object, ...) UseMethod("skeleton")
-
-
-## * skeleton.lvm
-#' @rdname skeleton
-skeleton.lvm <- function(object, as.lava,
-                         name.endogenous, name.latent,
-                         ...){
-    detail <- Y <- NULL ## [:for CRAN check] subset
-
-    n.endogenous <- length(name.endogenous)
-    n.latent <- length(name.latent)
-    
-### ** prepare
-    df.param.all  <- coefType(object, as.lava = FALSE)
-    if(as.lava){
-        param2originalLink <- subset(df.param.all, subset = !is.na(lava), select = c("originalLink", "param"))
-        param2originalLink <- stats::setNames(param2originalLink$originalLink, param2originalLink$param)
-    }else{
-        param2originalLink <- subset(df.param.all, subset = !is.na(lava), select = "param", drop = TRUE)
-        param2originalLink <- stats::setNames(param2originalLink, param2originalLink)
-    }
-    df.param.detail <- subset(df.param.all, subset = !is.na(detail)) ## important cannot be lava because we need to keep track of the constrained parameters
-    
-    skeleton <- list()
-    value <- list()
-    skeleton$type <- setNames(df.param.all[!is.na(df.param.all$lava),"detail"], df.param.all[!is.na(df.param.all$lava),"name"])
-    skeleton$toUpdate <- stats::setNames(c(rep(FALSE,8),TRUE,TRUE,TRUE,TRUE),
-                                         c("nu","K","Lambda","Sigma","alpha","Gamma","B","Psi",
-                                           "extra","mu","Omega","param"))
-   
-### ** Measurement model
-    
-    ## *** nu
-    df.param.nu <-  subset(df.param.detail, subset = detail=="nu", select = c("value", "param", "Y", "name"))
-    df.param.nu <- df.param.nu[order(df.param.nu$name),]
-    value$nu <- stats::setNames(df.param.nu$value,df.param.nu$Y)
-    skeleton$nu <- stats::setNames(param2originalLink[df.param.nu$param],df.param.nu$Y)
-    skeleton$toUpdate["nu"] <- any(is.na(value$nu))
-    
-    ## *** X K
-    df.param.K <- subset(df.param.detail, subset = detail == "K", select = c("value", "param", "X", "Y"))
-    df.param.K <- df.param.K[order(df.param.K$Y),]
-
-    if(NROW(df.param.K)>0){
-        value$K <- stats::setNames(lapply(1:n.endogenous, function(iEndogenous){ # iEndogenous <- 1
-            subset(df.param.K, subset = Y == name.endogenous[iEndogenous], select = "value", drop = TRUE)
-        }), name.endogenous)
-            
-        skeleton$K <- stats::setNames(lapply(1:n.endogenous, function(iEndogenous){
-            param2originalLink[subset(df.param.K, subset = Y == name.endogenous[iEndogenous], select = "param", drop = TRUE)]
-        }), name.endogenous)
-    
-        skeleton$XK <- stats::setNames(lapply(1:n.endogenous, function(iEndogenous){
-            subset(df.param.K, subset = Y == name.endogenous[iEndogenous], select = "X", drop = TRUE)
-        }), name.endogenous)
-        
-        skeleton$toUpdate["K"] <- any(unlist(lapply(value$K,is.na)))
-    }
-    
-    ## *** Lambda
-    if(n.latent>0){
-        ## define matrix
-        value$Lambda <- matrix(0,nrow = n.latent, ncol = n.endogenous,
-                               dimnames = list(name.latent,name.endogenous))
-        skeleton$Lambda <- matrix(as.character(NA),nrow = n.latent, ncol = n.endogenous,
-                                  dimnames = list(name.latent,name.endogenous))
-        ## update according to the model
-        df.param.Lambda <- subset(df.param.detail, subset = detail == "Lambda", select = c("X","Y","param","value","name"))
-        df.param.Lambda$index <- match(df.param.Lambda$X, name.latent) + n.latent * (match(df.param.Lambda$Y, name.endogenous)-1)
-        df.param.Lambda <- df.param.Lambda[order(df.param.Lambda$name),]
-
-        ## store in the Lambda matrix the name of the coefficient and their pre-computed values
-        dfNA.tempo <- subset(df.param.Lambda, subset = is.na(value))
-        skeleton$Lambda[dfNA.tempo$index] <- stats::setNames(param2originalLink[dfNA.tempo$param],dfNA.tempo$Y)
-        dfNNA.tempo <- subset(df.param.Lambda, subset = !is.na(value))
-        value$Lambda[dfNNA.tempo$index] <- stats::setNames(dfNNA.tempo$value,dfNNA.tempo$Y)
-        value$Lambda[!is.na(skeleton$Lambda)] <- NA
-
-        skeleton$toUpdate["Lambda"] <- any(is.na(value$Lambda))
-    }
-
-    ## *** Sigma    
-    ## define matrix
-    value$Sigma <- matrix(0,nrow = n.endogenous, ncol = n.endogenous,
-                          dimnames = list(name.endogenous,name.endogenous))
-    skeleton$Sigma <- matrix(as.character(NA),nrow = n.endogenous, ncol = n.endogenous,
-                             dimnames = list(name.endogenous,name.endogenous))
-    
-    ## update according to the model
-    df.param.Sigma <- subset(df.param.detail,
-                             subset = detail %in% c("Sigma_var","Sigma_cov"),
-                             select = c("X","Y","param","value","name"))
-    df.param.Sigma$index <- match(df.param.Sigma$X, name.endogenous) + n.endogenous*(match(df.param.Sigma$Y, name.endogenous)-1)
-
-    dfNA.tempo <- subset(df.param.Sigma, subset = is.na(value))
-    skeleton$Sigma[dfNA.tempo$index] <- param2originalLink[dfNA.tempo$param]
-    dfNNA.tempo <- subset(df.param.Sigma, subset = !is.na(value))
-    value$Sigma[dfNNA.tempo$index] <- dfNNA.tempo$value
-
-    ## symmetrize
-    skeleton$Sigma <- symmetrize(skeleton$Sigma, update.upper = TRUE)
-    value$Sigma <- symmetrize(value$Sigma, update.upper = TRUE)
-    value$Sigma[!is.na(skeleton$Sigma)] <- NA
-
-    skeleton$toUpdate["Sigma"] <- any(is.na(value$Sigma))
-
-### ** Structural model
-    if(n.latent>0){
-        ## *** alpha 
-        df.param.alpha <-  subset(df.param.detail,
-                                  subset = detail=="alpha",
-                                  select = c("value","param","Y"))
-        value$alpha <- stats::setNames(df.param.alpha$value,df.param.alpha$Y)
-        skeleton$alpha <- param2originalLink[stats::setNames(df.param.alpha$param,df.param.alpha$Y)]
-
-        skeleton$toUpdate["alpha"] <- any(is.na(value$alpha))
-        
-        ## *** X Gamma
-        df.param.Gamma <- subset(df.param.detail,
-                                 subset = detail=="Gamma",
-                                 select = c("value","param","X","Y"))
-        
-        if(NROW(df.param.Gamma)>0){
-            
-            value$Gamma <- stats::setNames(lapply(1:n.latent, function(iLatent){
-                subset(df.param.Gamma, subset = Y==name.latent[iLatent], select = "value", drop = TRUE)
-            }), name.latent)
-            
-            skeleton$Gamma <- stats::setNames(lapply(1:n.latent, function(iLatent){ # iLatent <- 1
-                param2originalLink[subset(df.param.Gamma, subset = Y==name.latent[iLatent], select = "param", drop = TRUE)]
-            }), name.latent)
-    
-            skeleton$XGamma <- stats::setNames(lapply(1:n.latent, function(iLatent){
-                subset(df.param.Gamma, subset = Y==name.latent[iLatent], select = "X", drop = TRUE)
-            }), name.latent)
-
-            skeleton$toUpdate["Gamma"] <- any(unlist(lapply(value$Gamma,is.na)))
-        }
-        
-        ## *** B
-        ## define matrix
-        value$B <- matrix(0,nrow = n.latent, ncol = n.latent,
-                          dimnames = list(name.latent,name.latent))
-        skeleton$B <- matrix(as.character(NA),nrow = n.latent, ncol = n.latent,
-                             dimnames = list(name.latent,name.latent))
-
-        if(any("B" %in% df.param.all$detail)){
-            ## update according to the model
-            df.param.B <- subset(df.param.detail,
-                                 subset = detail == "B",
-                                 select = c("X", "Y", "param", "value", "name"))
-            df.param.B$index <- match(df.param.B$X, name.latent) + n.latent*(match(df.param.B$Y, name.latent)-1)
-            dfNA.tempo <- subset(df.param.B, subset = is.na(value))            
-            skeleton$B[dfNA.tempo$index] <- param2originalLink[dfNA.tempo$param]
-            dfNNA.tempo <- subset(df.param.B, subset = is.na(value))
-            value$B[dfNNA.tempo$index] <- dfNNA.tempo$value
-            value$B[!is.na(skeleton$B)] <- NA
-
-            skeleton$toUpdate["B"] <- any(is.na(value$B))
-        }
-    
-        ## *** Psi    
-        ## define matrix
-        value$Psi <- matrix(0,nrow = n.latent, ncol = n.latent,
-                            dimnames = list(name.latent,name.latent))
-        skeleton$Psi <- matrix(as.character(NA),nrow = n.latent, ncol = n.latent,
-                               dimnames = list(name.latent,name.latent))
-
-        ## update according to the model
-        df.param.Psi <- subset(df.param.all,
-                               subset = detail %in% c("Psi_var","Psi_cov"),
-                               select = c("X", "Y", "param", "value", "Y", "name"))
-                               
-        df.param.Psi$index <- match(df.param.Psi$X, name.latent) + n.latent*(match(df.param.Psi$Y, name.latent)-1)
-
-        dfNA.tempo <- subset(df.param.Psi, subset = is.na(value))      
-        skeleton$Psi[dfNA.tempo$index] <- param2originalLink[dfNA.tempo$param]
-        dfNNA.tempo <- subset(df.param.Psi, subset = is.na(value))
-        value$Psi[dfNNA.tempo$index] <- dfNNA.tempo$value
-
-        ## symmetrize
-        skeleton$Psi <- symmetrize(skeleton$Psi, update.upper = TRUE)
-        value$Psi <- symmetrize(value$Psi, update.upper = TRUE)
-        value$Psi[!is.na(skeleton$Psi)] <- NA
-
-        skeleton$toUpdate["Psi"] <- any(is.na(value$Psi))
-    }
-
-### ** prepare matrix for updating the variance parameter according to the adjusted Omega
-    index.matrix <- data.frame(index = which(upper.tri(skeleton$Sigma, diag = TRUE)),
-                               which(upper.tri(skeleton$Sigma, diag = TRUE), arr.ind = TRUE)
-                               )
-    index.keep <- intersect(which(df.param.all$detail %in% c("Sigma_var","Sigma_cov","Psi_var","Psi_cov")),
-                            which(!is.na(df.param.all$lava)))
-    name.var <- df.param.all[index.keep,"name"]
-
-    name.rhs <- paste(name.endogenous[index.matrix[,"row"]],
-                      lava.options()$symbols[2],
-                      name.endogenous[index.matrix[,"col"]],
-                      sep = "")
-    n.rhs <- length(name.rhs)
-    
-    A <- matrix(0, nrow = n.rhs, ncol = length(name.var),
-                dimnames = list(name.rhs, name.var))
-    vec.Sigma <- skeleton$Sigma[index.matrix$index]
-    for(i in which(!is.na(vec.Sigma))){
-        A[i, vec.Sigma[i]] <- 1
-    }
-
-    if(n.latent>0){
-        index.Psi <- rbind(cbind(index = which(value$Psi!=0),
-                                 which(value$Psi!=0, arr.ind = TRUE)),
-                           cbind(index = which(is.na(value$Psi)),
-                                 which(is.na(value$Psi), arr.ind = TRUE))
-                           )
-    }else{
-        index.Psi <- NULL        
-    }
-
-    toUpdate2 <- c(nu = FALSE,
-                   K = FALSE,
-                   Lambda = FALSE,
-                   Sigma = TRUE,
-                   alpha = FALSE,
-                   Gamma = FALSE,
-                   B = FALSE,
-                   Psi = TRUE,
-                   extra = TRUE,
-                   mu = FALSE,
-                   Omega = FALSE,
-                   param = FALSE)
-
-    index.LambdaB <- names(skeleton$type)[which(skeleton$type %in% c("Lambda","B"))]
-    adjustMoment <- list(index.matrix = index.matrix,
-                         index.Psi = index.Psi,
-                         index.LambdaB = index.LambdaB,
-                         toUpdate = toUpdate2,
-                         A = A,
-                         name.endogenous = name.endogenous,
-                         name.latent = name.latent,
-                         name.var = name.var,
-                         n.rhs = n.rhs)
-
-
-### ** export
-    return(list(skeleton = skeleton,
-                value = value,
-                df.param = df.param.all,
-                adjustMoment = adjustMoment, 
-                param2originalLink = param2originalLink)
-           )
-}
-
-
-## * skeleton.lvmfit
-#' @rdname skeleton
-skeleton.lvmfit <- function(object, param, data,
-                            name.endogenous, name.latent,
-                            ...){
-    
-    n.endogenous <- length(name.endogenous)
-    n.latent <- length(name.latent)
-    n.data <- NROW(data)
-    
-    skeleton <- object$conditionalMoment$skeleton
-    toUpdate <- skeleton$toUpdate
-    value <- object$conditionalMoment$value
-    
-### ** Update skeleton with the current values
-    ## *** nu
-    if(toUpdate["nu"]){
-        index.update <- which(!is.na(skeleton$nu))
-        value$nu[index.update] <- param[skeleton$nu[index.update]]
-    }
-    
-    ## *** K
-    if(toUpdate["K"]){
-        for(iY in 1:n.endogenous){ # iY <- 3
-            if(length(skeleton$K[[iY]])>0){
-                index.update <- which(!is.na(skeleton$K[[iY]]))
-                value$K[[iY]][index.update] <- param[skeleton$K[[iY]][index.update]]
-            }
-        }
-    }
-
-    ## *** Lambda
-    if(toUpdate["Lambda"]){
-        index.update <- which(!is.na(skeleton$Lambda))
-        value$Lambda[index.update] <- param[skeleton$Lambda[index.update]]
-    }
-    
-    ## *** Sigma
-    if(toUpdate["Sigma"]){
-        index.update <- which(!is.na(skeleton$Sigma))
-        value$Sigma[index.update] <- param[skeleton$Sigma[index.update]]
-    }
-
-    ## *** mu
-    if(toUpdate["mu"]){ ## linear predictor (measurement model without latent variable)   
-        value$nu.XK <- matrix(0, nrow = n.data, ncol = n.endogenous, byrow = TRUE,
-                                       dimnames = list(NULL,name.endogenous))
-        for(iY in 1:n.endogenous){ # iY <- 1
-            iY2 <- name.endogenous[iY]
-            if(length(value$K[[iY2]])>0){
-                value$nu.XK[,iY2] <- value$nu[iY2] + data[,skeleton$XK[[iY2]],drop=FALSE] %*% value$K[[iY2]]
-            }else{
-                value$nu.XK[,iY2] <- value$nu[iY2]
-            }
-        }
-    }
-        
-### ** Structural model
-    if(n.latent>0){
-        ## *** alpha
-        if(toUpdate["alpha"]){
-            index.update <- which(!is.na(skeleton$alpha))
-            value$alpha[index.update] <- param[skeleton$alpha[index.update]]
-        }
-        
-        ## *** Gamma
-        if(toUpdate["Gamma"]){
-            for(iLatent in 1:n.latent){
-                if(length(skeleton$Gamma[[iLatent]])>0){
-                    index.update <- which(!is.na(skeleton$Gamma[[iLatent]]))
-                    value$Gamma[[iLatent]][index.update] <- param[skeleton$Gamma[[iLatent]][index.update]]
-                }
-            }
-        }
-        
-        ## *** B
-        if(toUpdate["B"] && length(skeleton$B)>0){
-            index.update <- which(!is.na(skeleton$B))
-            value$B[index.update] <- param[skeleton$B[index.update]]
-        }
-        
-        ## *** Psi
-        if(toUpdate["Psi"] && length(skeleton$Psi)>0){
-            index.update <- which(!is.na(skeleton$Psi))
-            value$Psi[index.update] <- param[skeleton$Psi[index.update]]
-        }
-        
-        ## *** mu
-        if(toUpdate["mu"]){ ## linear predictor (latent variable)            
-            value$alpha.XGamma <- matrix(0,nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                         dimnames = list(NULL,name.latent))
-        
-            for(iLatent in 1:n.latent){
-                iLatent2 <- name.latent[iLatent]
-                if(length(value$Gamma[[iLatent2]])>0){
-                    value$alpha.XGamma[,iLatent2] <- value$alpha[iLatent2] + data[,skeleton$XGamma[[iLatent2]],drop=FALSE] %*% value$Gamma[[iLatent2]]
-                }else{
-                    value$alpha.XGamma[,iLatent2] <- value$alpha[iLatent2]
-                }
-            }
-            value$iIB <- solve(diag(1,n.latent,n.latent)-value$B)            
-            value$alpha.XGamma.iIB <- value$alpha.XGamma %*% value$iIB
-        }
-        
-        ## *** extra
-        if(toUpdate["extra"]){
-            value$iIB.Lambda <-  value$iIB %*% value$Lambda    
-            value$Psi.iIB <- value$Psi %*% value$iIB
-            value$tLambda.tiIB.Psi.iIB <- t(value$iIB.Lambda) %*% value$Psi.iIB
-        }
-    }
-
-### ** Export
-    return(value)
-}
-
-
-## * skeletonDtheta
-#' @rdname skeleton
-`skeletonDtheta` <-
-    function(object, ...) UseMethod("skeletonDtheta")
-## * skeletonDtheta.gls
-#' @rdname skeleton
-skeletonDtheta.gls <- function(object, class.cor, class.var, X, 
-                               sigma2.base0, Msigma2.base0, M.corcoef, ref.group,
-                               index.lower.tri, indexArr.lower.tri,
-                               name.endogenous, n.endogenous, cluster, n.cluster,
-                               var.coef, name.varcoef, name.otherVar, n.varcoef,
-                               cor.coef, name.corcoef, n.corcoef,
-                               index.Omega, update.mean, update.variance,
-                               ...){
-
-    out <- list()
-    ## ** mean
-    if(update.mean == TRUE){
-        name.X <- colnames(X)
-        out$dmu <- lapply(name.X, function(iCoef){ # iCoef <- name.X[1]
-            dmu.tempo <- matrix(NA, nrow = n.cluster, ncol = n.endogenous,
-                                dimnames = list(NULL, name.endogenous))
-            for(iC in 1:n.cluster){ ## iC <- 5
-                dmu.tempo[iC,index.Omega[[iC]]] <- X[cluster==iC,iCoef]
-            }
-        
-            return(dmu.tempo)
-        })
-        names(out$dmu) <- name.X
-    }
-    
-    ## ** variance
-    if(update.variance == TRUE){
-        out$dOmega <- vector(mode = "list", length = n.corcoef + n.varcoef)
-        names(out$dOmega) <- c(name.corcoef, name.varcoef)
-
-        ## *** dispersion coefficient
-        out$dOmega[["sigma2"]] <- diag(sigma2.base0, nrow = n.endogenous, ncol = n.endogenous)
-   
-        if("NULL" %in% class.cor == FALSE){
-            out$dOmega[["sigma2"]][index.lower.tri] <- Msigma2.base0[index.lower.tri] * cor.coef[M.corcoef[index.lower.tri]]
-            out$dOmega[["sigma2"]] <- symmetrize(out$dOmega[["sigma2"]])      
-        }
-        dimnames(out$dOmega[["sigma2"]]) <-  list(name.endogenous, name.endogenous)
-
-        ## *** multiplicative factors
-        if("NULL" %in% class.var == FALSE){
-
-            for(iVar in name.otherVar){ # iVar <- name.otherVar
-                iTest.endogenous <- ref.group %in% iVar
-                out$dOmega[[iVar]] <- var.coef["sigma2"]*diag(iTest.endogenous,
-                                                              nrow = n.endogenous, ncol = n.endogenous)
-
-                if("NULL" %in% class.cor == FALSE){
-                    index.iVar <- which(rowSums(indexArr.lower.tri==which(iTest.endogenous))>0)
-
-                    ##  d sqrt(x) / d x = 1/(2 sqrt(x)) = sqrt(x) / (2*x)
-                    out$dOmega[[iVar]][index.lower.tri[index.iVar]] <- var.coef["sigma2"]*out$dOmega[["sigma2"]][index.lower.tri[index.iVar]]/(2*var.coef[iVar])
-                    out$dOmega[[iVar]] <- symmetrize(out$dOmega[[iVar]])
-                }
-            
-                dimnames(out$dOmega[[iVar]]) <- list(name.endogenous, name.endogenous)            
-            }
-        }
-    
-        ## ** correlation
-        if("NULL" %in% class.cor == FALSE){
-            for(iVar in name.corcoef){
-                out$dOmega[[iVar]] <- Msigma2.base0 * var.coef["sigma2"] * (M.corcoef==iVar)
-            }
-        }
-    }
-    
-    ### ** export
-    return(out)
-}
-
-## * skeletonDtheta.lme
-#' @rdname skeleton
-skeletonDtheta.lme <- function(object, name.endogenous, n.endogenous,
-                               name.rancoef, ...){
-
-    out <- skeletonDtheta.gls(object,
-                              name.endogenous = name.endogenous,
-                              n.endogenous = n.endogenous,
-                              ...)
-
-    out$dOmega[[name.rancoef]] <- matrix(1, nrow = n.endogenous, ncol = n.endogenous,
-                                         dimnames = list(name.endogenous,name.endogenous)
-                                         )
-
-    return(out)
-}
-
-## * skeletonDtheta.lvm
-#' @rdname skeleton
-skeletonDtheta.lvm <- function(object, data,
-                               df.param.all, param2originalLink,
-                               name.endogenous, name.latent, ...){
-
-    factitious <- marginal <- param <- value <- X <- Y <- NULL ## [:for CRAN check] subset
-
-    n.endogenous <- length(name.endogenous)
-    n.latent <- length(name.latent)
-
-    df.param <- subset(df.param.all, subset = is.na(value) & marginal == FALSE & factitious == FALSE)
-    Utype.by.detail <- tapply(df.param$detail, df.param$param, function(x){length(unique(x))})
-    if(any(Utype.by.detail>1)){
-        stop("cannot constrain two coefficients of different types to be equal \n")
-    }
-    name.param <- subset(df.param, subset = !duplicated(param), select = param, drop = TRUE)
-    n.param <- length(name.param)
-
-    name.originalLink <- as.character(param2originalLink)
-
-### ** prepare
-    n.data <- NROW(data)
-    name.data <- colnames(data)
-    
-    mean.param <- c("nu","K","alpha","Gamma","Lambda","B")
-    vcov.param <- c("Sigma_var","Sigma_cov","Psi_var","Psi_cov","Lambda","B")    
-    dmu <- list()
-    dOmega <- list()
-    dLambda <- list()
-    dB <- list()
-    dPsi <- list()
-
-    toUpdate <- stats::setNames(vector(mode = "logical", n.param),name.originalLink)
-    
-    ### ** Compute derivative or prepare for the derivative
-    for(iName in name.param){ # iName <- name.param[1]
-
-        iName2 <- as.character(param2originalLink[iName])
-        iType <- unique(subset(df.param, subset = (param == iName), select = "detail", drop = TRUE))
-        iY <- subset(df.param, subset = param %in% iName, select = Y, drop = TRUE)
-        iX <- subset(df.param, subset = param %in% iName, select = X, drop = TRUE)
-
-        ## *** derivative regarding the mean        
-        if(iType %in% mean.param){            
-            if(iType=="nu"){
-                dmu[[iName2]] <- matrix(as.numeric(name.endogenous %in% iY),
-                                        nrow = n.data, ncol = n.endogenous, byrow = TRUE,
-                                        dimnames = list(NULL, name.endogenous))
-                toUpdate[iName2] <- FALSE
-            }else if(iType=="K"){
-                dmu[[iName2]] <- matrix(0, nrow = n.data, ncol = n.endogenous, byrow = TRUE,
-                                        dimnames = list(NULL, name.endogenous))
-                for(Y.tempo in unique(iY)){                    
-                    dmu[[iName2]][,Y.tempo] <- rowSums(data[,iX[iY == Y.tempo],drop=FALSE])
-                }
-                toUpdate[iName2] <- FALSE
-            }else if(iType=="alpha"){
-                dmu[[iName2]] <- matrix(as.numeric(name.latent %in% unique(iY)), nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                        dimnames = list(NULL, name.latent))                
-                toUpdate[iName2] <- TRUE
-            }else if(iType=="Gamma"){
-                dmu[[iName2]] <- matrix(0, nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                        dimnames = list(NULL, name.latent))
-                for(Y.tempo in unique(iY)){ # Y.tempo <- "eta"
-                    dmu[[iName2]][,Y.tempo] <- rowSums(data[,iX[iY == Y.tempo],drop=FALSE])
-                }
-                toUpdate[iName2] <- TRUE
-            }
-        }
-        
-        ## *** derivative regarding the residual variance covariance
-        if(iType %in% vcov.param){
-            
-            if(iType=="Sigma_var"){
-                dOmega[[iName2]] <- matrix(0,
-                                           nrow = n.endogenous, ncol = n.endogenous, byrow = TRUE,
-                                           dimnames = list(name.endogenous, name.endogenous))
-                dOmega[[iName2]][match(iX, name.endogenous) + (match(iY, name.endogenous) - 1) * n.endogenous] <- 1
-                toUpdate[iName2] <- FALSE
-            }else if(iType=="Sigma_cov"){
-                dOmega[[iName2]] <- matrix(0,
-                                           nrow = n.endogenous, ncol = n.endogenous, byrow = TRUE,
-                                           dimnames = list(name.endogenous, name.endogenous))
-                dOmega[[iName2]][match(iX, name.endogenous) + (match(iY, name.endogenous) - 1) * n.endogenous] <- 1
-                dOmega[[iName2]][match(iY, name.endogenous) + (match(iX, name.endogenous) - 1) * n.endogenous] <- 1
-                toUpdate[iName2] <- FALSE
-            }
-            
-        }        
-
-        ## *** matrices
-        if(iType=="Lambda"){            
-            dLambda[[iName2]] <- matrix(0,
-                                        nrow = n.latent, ncol = n.endogenous, byrow = TRUE,
-                                        dimnames = list(name.latent, name.endogenous))
-            dLambda[[iName2]][match(iX, name.latent) + (match(iY, name.endogenous) - 1) * n.latent] <- 1            
-            toUpdate[iName2] <- TRUE
-        }else if(iType=="B"){
-            dB[[iName2]] <- matrix(0,
-                                   nrow = n.latent, ncol = n.latent, byrow = TRUE,
-                                   dimnames = list(name.latent, name.latent))
-            dB[[iName2]][match(iX, name.latent) + (match(iY, name.latent) - 1) * n.latent] <- 1
-            toUpdate[iName2] <- TRUE
-        }else if(iType=="Psi_var"){
-            dPsi[[iName2]] <- matrix(0,
-                                     nrow = n.latent, ncol = n.latent, byrow = TRUE,
-                                     dimnames = list(name.latent, name.latent))
-            dPsi[[iName2]][match(iX, name.latent) + (match(iY, name.latent) - 1) * n.latent] <- 1
-            toUpdate[iName2] <- TRUE
-        }else if(iType=="Psi_cov"){
-            dPsi[[iName2]] <- matrix(0,
-                                     nrow = n.latent, ncol = n.latent, byrow = TRUE,
-                                     dimnames = list(name.latent, name.latent))
-            dPsi[[iName2]][match(iX, name.latent) + (match(iY, name.latent) - 1) * n.latent] <- 1
-            dPsi[[iName2]][match(iY, name.latent) + (match(iX, name.latent) - 1) * n.latent] <- 1            
-            toUpdate[iName2] <- TRUE
-        } 
-    }
-### ** export
-    return(list(
-        dmu = dmu,
-        dOmega = dOmega,
-        dLambda = dLambda,
-        dB = dB,
-        dPsi = dPsi,
-        toUpdate = toUpdate
-    ))
-}
-
-
-## * skeletonDtheta.lvmfit
-#' @rdname skeleton
-skeletonDtheta.lvmfit <- function(object, name.endogenous, name.latent, ...){
-
-### ** Import information
-    n.endogenous <- length(name.endogenous)
-    n.latent <- length(name.latent)
-
-    ## from Moment
-    type <- object$conditionalMoment$skeleton$type
-    iIB.Lambda <- object$conditionalMoment$value$iIB.Lambda
-    alpha.XGamma.iIB <- object$conditionalMoment$value$alpha.XGamma.iIB
-    tLambda.tiIB.Psi.iIB <- object$conditionalMoment$value$tLambda.tiIB.Psi.iIB
-
-    ## from dMoment.init
-    dmu <- object$conditionalMoment$dMoment.init$dmu
-    dOmega <- object$conditionalMoment$dMoment.init$dOmega
-    dLambda <- object$conditionalMoment$dMoment.init$dLambda
-    dB <- object$conditionalMoment$dMoment.init$dB
-    dPsi <- object$conditionalMoment$dMoment.init$dPsi
-    toUpdate <- object$conditionalMoment$dMoment.init$toUpdate
-    name2Update <- names(toUpdate)
-    type2Update <- type[name2Update]
-    
-### ** Update partial derivatives
-
-    ## *** mean coefficients
-    type2Update.meanparam <- type2Update[type2Update %in% c("alpha","Lambda","Gamma","B")]
-    name2Update.meanparam <- names(type2Update.meanparam)
-    n2Update.meanparam <- length(name2Update.meanparam)
-        
-    if(n2Update.meanparam>0){
-        for(iP in 1:n2Update.meanparam){ # iP <- 1
-            iType <- type2Update.meanparam[iP]
-            iName <- name2Update.meanparam[iP]
-            
-            if(iType == "alpha"){
-                dmu[[iName]] <- dmu[[iName]] %*% iIB.Lambda
-            }else if(iType == "Gamma"){
-                dmu[[iName]] <- dmu[[iName]] %*% iIB.Lambda 
-            }else if(iType == "Lambda"){
-                dmu[[iName]] <- alpha.XGamma.iIB %*% dLambda[[iName]]
-            }else if(iType == "B"){
-                dmu[[iName]] <- alpha.XGamma.iIB %*% dB[[iName]] %*% iIB.Lambda
-            }
-
-            colnames(dmu[[iName]]) <- name.endogenous
-        }
-    }
-
-    ## *** variance-covariance coefficients
-    type2Update.vcovparam <- type2Update[type2Update %in% c("Psi_var","Psi_cov","Lambda","B")]
-    name2Update.vcovparam <- names(type2Update.vcovparam)
-    n2Update.vcovparam <- length(name2Update.vcovparam)
-
-    if(n2Update.vcovparam>0){
-        for(iP in 1:n2Update.vcovparam){ # iP <- 1
-            iType <- type2Update.vcovparam[iP]
-            iName <- name2Update.vcovparam[iP]
-        
-            if(iType %in% "Psi_var"){
-                dOmega[[iName]] <-  t(iIB.Lambda) %*% dPsi[[iName]] %*% iIB.Lambda
-            }else if(iType %in% "Psi_cov"){
-                dOmega[[iName]] <-  t(iIB.Lambda) %*% dPsi[[iName]] %*% iIB.Lambda
-            }else if(iType == "Lambda"){
-                dOmega[[iName]] <- tLambda.tiIB.Psi.iIB %*% dLambda[[iName]]
-                dOmega[[iName]] <- dOmega[[iName]] + t(dOmega[[iName]])
-            }else if(iType == "B"){
-                dOmega[[iName]] <- tLambda.tiIB.Psi.iIB %*% dB[[iName]] %*% iIB.Lambda
-                dOmega[[iName]] <- dOmega[[iName]] + t(dOmega[[iName]])
-            }
-
-            colnames(dOmega[[iName]]) <- name.endogenous
-            rownames(dOmega[[iName]]) <- name.endogenous
-        }
-    }
-
-### ** Export
-    return(list(dmu = dmu, dOmega = dOmega))
-
-}
-
-## * skeletonDtheta2
-#' @rdname skeleton
-`skeletonDtheta2` <-
-    function(object, ...) UseMethod("skeletonDtheta2")
-
-## * skeletonDtheta2.gls
-#' @rdname skeleton
-skeletonDtheta2.gls <- function(object, dOmega = NULL,
-                                class.cor = NULL, class.var = NULL,
-                                M.corcoef = NULL, n.endogenous = NULL,
-                                index.lower.tri = NULL, indexArr.lower.tri = NULL,
-                                var.coef = NULL, name.otherVar = NULL, name.varcoef = NULL, n.varcoef = NULL,
-                                cor.coef = NULL, name.corcoef = NULL,
-                                ...){
-
-    ## ** import information
-    if(is.null(dOmega)){
-        dOmega <- object$conditionalMoment$dOmega
-    }
-    if(is.null(class.cor)){
-        class.cor <- object$conditionalMoment$skeleton$class.cor
-    }
-    if(is.null(class.var)){
-        class.var <- object$conditionalMoment$skeleton$class.var
-    }
-    if(is.null(M.corcoef)){
-        M.corcoef <- object$conditionalMoment$skeleton$M.corcoef
-    }
-    if(is.null(n.endogenous)){
-        n.endogenous <- object$conditionalMoment$skeleton$n.endogenous
-    }
-    if(is.null(index.lower.tri)){
-        index.lower.tri <- object$conditionalMoment$skeleton$index.lower.tri
-    }
-    if(is.null(indexArr.lower.tri)){
-        indexArr.lower.tri <- object$conditionalMoment$skeleton$indexArr.lower.tri
-    }
-    if(is.null(var.coef)){
-        var.coef <- object$conditionalMoment$skeleton$var.coef
-    }
-    if(is.null(name.varcoef)){
-       name.varcoef <- object$conditionalMoment$skeleton$name.varcoef
-    }
-    if(is.null(name.otherVar)){
-        name.otherVar <- object$conditionalMoment$skeleton$name.otherVar
-    }
-    if(is.null(n.varcoef)){
-       n.varcoef <- object$conditionalMoment$skeleton$n.varcoef
-    }
-    if(is.null(cor.coef)){
-        cor.coef <- object$conditionalMoment$skeleton$cor.coef
-    }
-    if(is.null(name.corcoef)){
-       name.corcoef <- object$conditionalMoment$skeleton$name.corcoef
-    }
-    out <- list(d2Omega = list(), d2mu = NULL)
-
-    ## ** derivative: dispersion parameter with other variance parameter
-    if("NULL" %in% class.var == FALSE){
-        for(iVar in name.otherVar){ ## iVar <- name.otherVar[1]
-            out$d2Omega[["sigma2"]][[iVar]] <- dOmega[[iVar]]/var.coef["sigma2"]
-        }
-    }
-
-    ## ** derivative: dispersion parameter with correlation parameters
-    if("NULL" %in% class.cor == FALSE){
-        for(iVar in name.corcoef){
-            out$d2Omega[["sigma2"]][[iVar]] <- dOmega[[iVar]]/var.coef["sigma2"]
-        }
-    }
-
-    ## ** derivative: correlation parameter with other variance parameters
-    if("NULL" %in% class.var == FALSE && "NULL" %in% class.cor == FALSE){
-        M.corvalue <- matrix(1, nrow = n.endogenous, ncol = n.endogenous)
-        M.corvalue[index.lower.tri] <- cor.coef[M.corcoef[index.lower.tri]]
-        M.corvalue <- symmetrize(M.corvalue, update.upper = TRUE)
-
-            for(iVar1 in name.otherVar){ ## iVar <- name.otherVar[1]
-
-                iIndex.var1 <- which(name.varcoef == iVar1)
-                
-                ## var var
-                for(iVar2 in name.varcoef[iIndex.var1:n.varcoef]){
-
-                    ##
-                    M.tempo <- c(1,-1)[(iVar1==iVar2)+1] * dOmega[[iVar1]]/(2*var.coef[iVar2])
-
-                    ## remove null derivative on the diagonal
-                    diag(M.tempo) <- 0
-
-                    ## remove null derivative outside the diagonal
-                    iIndex.var2 <- which(name.varcoef == iVar2)
-                    
-                    index0 <- union(which(rowSums(indexArr.lower.tri==iIndex.var1)==0),
-                                    which(rowSums(indexArr.lower.tri==iIndex.var2)==0))
-                    M.tempo[index.lower.tri[index0]] <- 0
-                    M.tempo <- symmetrize(M.tempo, update.upper = TRUE)
-
-                    out$d2Omega[[iVar1]][[iVar2]] <- M.tempo
-                }                
-                
-                ## var cor
-                for(iVar2 in name.corcoef){                    
-                    M.tempo <- dOmega[[iVar1]]/M.corvalue
-                    M.tempo[M.corcoef!=iVar2] <- 0
-                    if(any(M.tempo!=0)){
-                        out$d2Omega[[iVar1]][[iVar2]] <- M.tempo
-                    }
-                }
-
-            }
-    }
-
-    return(out)
-}
-
-## * skeletonDtheta2.lme
-#' @rdname skeleton
-skeletonDtheta2.lme <- skeletonDtheta2.gls
-
-## * skeletonDtheta2.lm
-#' @rdname skeleton
-skeletonDtheta2.lm <- function(object, ...){
-    return(list(d2mu = NULL, d2Omega = NULL))    
-}
-
-## * skeletonDtheta2.lvm
-#' @rdname skeleton
-skeletonDtheta2.lvm <- function(object, data, df.param.all,
-                                param2originalLink, name.latent, ...){
-
-    detail <- factitious <- marginal <- param <- value <- Y <- NULL ## [:for CRAN check] subset
-    
-    df.param <- subset(df.param.all, is.na(value) & marginal == FALSE & factitious == FALSE)
-    dfred.param <- subset(df.param, subset = !duplicated(param))
-    
-    n.latent <- length(name.latent)
-    n.data <- NROW(data)
-
-### ** identify all combinations of coefficients with second derivative
-    grid.mean <- list()
-
-    grid.mean$alpha.B <- .combinationDF(dfred.param,
-                                        detail1 = "alpha", name1 = "alpha",
-                                        detail2 = "B", name2 = "B")
-
-    grid.mean$alpha.Lambda <- .combinationDF(dfred.param,
-                                             detail1 = "alpha", name1 = "alpha",
-                                             detail2 = "Lambda", name2 = "Lambda")
-
-    grid.mean$Gamma.B <- .combinationDF(dfred.param,
-                                        detail1 = "Gamma", name1 = "Gamma",
-                                        detail2 = "B", name2 = "B")
-
-    grid.mean$Gamma.Lambda <- .combinationDF(dfred.param,
-                                             detail1 = "Gamma", name1 = "Gamma",
-                                             detail2 = "Lambda", name2 = "Lambda")
-    
-    grid.mean$Lambda.B <- .combinationDF(dfred.param,
-                                        detail1 = "Lambda", name1 = "Lambda",
-                                        detail2 = "B", name2 = "B")
-
-    grid.mean$B.B <- .combinationDF(dfred.param,
-                                    detail1 = "B", name1 = "B1",
-                                    detail2 = "B", name2 = "B2")
-
-    n.mean <- lapply(grid.mean, NROW)
-    
-
-    grid.vcov <- list()
-    
-    grid.vcov$Psi.Lambda <- .combinationDF(dfred.param,
-                                           detail1 = c("Psi_var","Psi_cov"), name1 = "Psi",
-                                           detail2 = "Lambda", name2 = "Lambda")
-
-    grid.vcov$Psi.B <- .combinationDF(dfred.param,
-                                      detail1 = c("Psi_var","Psi_cov"), name1 = "Psi",
-                                      detail2 = "B", name2 = "B")
-
-    grid.vcov$Lambda.B <- .combinationDF(dfred.param,
-                                         detail1 = "Lambda", name1 = "Lambda",
-                                         detail2 = "B", name2 = "B")
-
-    grid.vcov$Lambda.Lambda <- .combinationDF(dfred.param,
-                                              detail1 = "Lambda", name1 = "Lambda1",
-                                              detail2 = "Lambda", name2 = "Lambda2")
-
-    grid.vcov$B.B <- .combinationDF(dfred.param,
-                                    detail1 = "B", name1 = "B1",
-                                    detail2 = "B", name2 = "B2")
-    
-    n.vcov <- lapply(grid.vcov, NROW)
-    
-### ** convert back to lava names
-    grid.mean <- lapply(grid.mean, function(x){ ## x <- grid.mean[[2]]
-        if(length(x)>0){
-            x[,1] <- param2originalLink[x[,1]]
-            x[,2] <- param2originalLink[x[,2]]
-        }
-        return(x)
-    })
-
-    grid.vcov <- lapply(grid.vcov, function(x){ ## x <- grid.vcov[[2]]
-        if(length(x)>0){
-            x[,1] <- param2originalLink[x[,1]]
-            x[,2] <- param2originalLink[x[,2]]
-        }
-        return(x)
-    })
-
-### ** prepare export
-    if(any(unlist(n.mean)>0)){
-        xx <- lapply(grid.mean, function(x){
-            if(NROW(x)>0){
-                colnames(x) <- c("x","y")
-            }
-            return(x)
-        })
-        collapseGrid <- do.call(rbind, xx)
-        name.tempo <- as.character(unique(collapseGrid[[1]]))
-        d2mu <- lapply(name.tempo, function(x){
-            iIndex <- which(collapseGrid[[1]]==x)
-            v <- vector(mode = "list", length(iIndex))
-            names(v) <- collapseGrid[[2]][iIndex]
-            return(v)
-        })
-        names(d2mu) <- name.tempo
-    }else{
-        d2mu <- list()
-    }
-    
-    if(any(unlist(n.vcov)>0)){
-        xx <- lapply(grid.vcov, function(x){
-            if(NROW(x)>0){
-                colnames(x) <- c("x","y")
-            }
-            return(x)
-        })
-        collapseGrid <- do.call(rbind, xx)
-        name.tempo <- as.character(unique(collapseGrid[[1]]))
-        d2Omega <- lapply(name.tempo, function(x){
-            iIndex <- which(collapseGrid[[1]]==x)
-            v <- vector(mode = "list", length(iIndex))
-            names(v) <- collapseGrid[[2]][iIndex]
-            return(v)
-        })
-        names(d2Omega) <- name.tempo
-    }else{
-        d2Omega <- list()
-    }
-    
-    ## ** prepare alpha.B and alpha.Lambda
-    if(any(df.param$detail == "alpha")){
-        name.alpha <- subset(df.param, subset = !duplicated(param) & detail == "alpha", select = "param", drop = TRUE)
-        ls.Malpha <- list()
-        for(iName in name.alpha){ # iName <- name.alpha[1]
-
-            iParam <- df.param[df.param$name == iName, "param"]
-            iY <- subset(df.param, subset = param %in% iParam, select = Y, drop = TRUE)
-            ls.Malpha[[iName]] <- matrix(as.numeric(name.latent %in% unique(iY)),
-                                         nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                         dimnames = list(NULL, name.latent))
-            
-        }
-    }
-    if(n.mean$alpha.B>0){
-        for(iP in 1:n.mean$alpha.B){ ## iP <- 1
-            iName1 <- grid.mean$alpha.B[iP,"alpha"]
-            iName2 <- grid.mean$alpha.B[iP,"B"]
-            
-            d2mu[[iName1]][[iName2]] <- ls.Malpha[[iName1]]
-        }
-    }
-    if(n.mean$alpha.Lambda>0){
-        for(iP in 1:n.mean$alpha.Lambda){ ## iP <- 1
-            iName1 <- grid.mean$alpha.Lambda[iP,"alpha"]
-            iName2 <- grid.mean$alpha.Lambda[iP,"Lambda"]
-            
-            d2mu[[iName1]][[iName2]] <- ls.Malpha[[iName1]]
-        }
-    }
-    
-    ## ** Store X for Gamma
-    if(n.mean$Gamma.Lambda>0){
-        for(iP in 1:n.mean$Gamma.Lambda){ ## iP <- 1
-            iName1 <- grid.mean$Gamma.Lambda[iP,"Gamma"]
-            iName2 <- grid.mean$Gamma.Lambda[iP,"Lambda"]
-
-            iParam <- df.param[df.param$name == iName1, "param"]
-            iX <- subset(df.param.all, subset = param %in% iParam, select = "X", drop = TRUE)
-            iY <- subset(df.param.all, subset = param %in% iParam, select = "Y", drop = TRUE)
-
-            d2mu[[iName1]][[iName2]] <- matrix(0, nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                               dimnames = list(NULL, name.latent))
-            for(Y.tempo in unique(iY)){
-                d2mu[[iName1]][[iName2]][,Y.tempo] <- rowSums(data[,iX[iY == Y.tempo],drop=FALSE])
-            }
-        }
-    }
-    
-    if(n.mean$Gamma.B>0){
-        for(iP in 1:n.mean$Gamma.B){ ## iP <- 1
-            iName1 <- grid.mean$Gamma.B[iP,"Gamma"]
-            iName2 <- grid.mean$Gamma.B[iP,"B"]
-            
-            iParam <- df.param[df.param$name == iName1, "param"]
-            iX <- subset(df.param.all, subset = param %in% iParam, select = "X", drop = TRUE)
-            iY <- subset(df.param.all, subset = param %in% iParam, select = "Y", drop = TRUE)
-
-            d2mu[[iName1]][[iName2]] <- matrix(0, nrow = n.data, ncol = n.latent, byrow = TRUE,
-                                               dimnames = list(NULL, name.latent))
-            for(Y.tempo in unique(iY)){
-                d2mu[[iName1]][[iName2]][,Y.tempo] <- rowSums(data[,iX[iY == Y.tempo],drop=FALSE])
-            }
-        }
-    }
-
-### ** export
-    toUpdate <- unlist(lapply(c(n.mean,n.vcov), function(x){x>0}))
-    return(list(grid.mean = grid.mean,
-                n.mean = n.mean,                
-                grid.vcov = grid.vcov,
-                n.vcov = n.vcov,
-                d2mu = d2mu,
-                d2Omega = d2Omega,
-                toUpdate = toUpdate
-                ))
-}
-
-## * skeletonDtheta2.lvmfit
-#' @rdname skeleton
-skeletonDtheta2.lvmfit <- function(object, ...){
-    
-### ** Import information
-    n.endogenous <- NCOL(object$conditionalMoment$Omega)
-
-    ## from Moment
-    Psi <- object$conditionalMoment$value$Psi
-    Lambda <- object$conditionalMoment$value$Lambda
-    iIB <- object$conditionalMoment$value$iIB
-    Psi.iIB <- object$conditionalMoment$value$Psi.iIB
-    iIB.Lambda <- object$conditionalMoment$value$iIB.Lambda
-    alpha.XGamma.iIB <- object$conditionalMoment$value$alpha.XGamma.iIB
-    type <- object$conditionalMoment$skeleton$type
-
-    ## from dMoment.init
-    dLambda <- object$conditionalMoment$dMoment.init$dLambda
-    dB <- object$conditionalMoment$dMoment.init$dB
-    dPsi <- object$conditionalMoment$dMoment.init$dPsi
-    
-    ## from d2Moment.init
-    d2mu <- object$conditionalMoment$d2Moment.init$d2mu
-    d2Omega <- object$conditionalMoment$d2Moment.init$d2Omega
-
-    grid.mean <- object$conditionalMoment$d2Moment.init$grid.mean
-    grid.vcov <- object$conditionalMoment$d2Moment.init$grid.vcov
-
-    n.mean <- object$conditionalMoment$d2Moment.init$n.mean
-    n.vcov <- object$conditionalMoment$d2Moment.init$n.vcov
-
-    toUpdate <- object$conditionalMoment$d2Moment.init$toUpdate
-    ##    names(object$conditionalMoment$d2Moment)
-    
-### ** second order partial derivatives
-    if(any(toUpdate)){
-        
-        ## *** mean coefficients        
-        if(toUpdate["alpha.B"]){
-            for(iP in 1:n.mean$alpha.B){ # iP <- 1
-                iName1 <- grid.mean$alpha.B[iP,"alpha"]
-                iName2 <- grid.mean$alpha.B[iP,"B"]
-
-                d2mu[[iName1]][[iName2]] <- d2mu[[iName1]][[iName2]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
-            }
-        }
-        
-        if(toUpdate["alpha.Lambda"]){
-            for(iP in 1:n.mean$alpha.Lambda){ # iP <- 1
-                iName1 <- grid.mean$alpha.Lambda[iP,"alpha"]
-                iName2 <- grid.mean$alpha.Lambda[iP,"Lambda"]
-
-                d2mu[[iName1]][[iName2]] <- d2mu[[iName1]][[iName2]] %*% iIB %*% dLambda[[iName2]]
-                
-            }
-        }
-
-        if(toUpdate["Gamma.B"]){
-            for(iP in 1:n.mean$Gamma.B){ # iP <- 1
-                iName1 <- grid.mean$Gamma.B[iP,"Gamma"]
-                iName2 <- grid.mean$Gamma.B[iP,"B"]
-
-                d2mu[[iName1]][[iName2]] <- d2mu[[iName1]][[iName2]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
-            }
-        }        
-
-        if(toUpdate["Gamma.Lambda"]){
-            for(iP in 1:n.mean$Gamma.Lambda){ # iP <- 1
-                iName1 <- grid.mean$Gamma.Lambda[iP,"Gamma"]
-                iName2 <- grid.mean$Gamma.Lambda[iP,"Lambda"]                
-
-                d2mu[[iName1]][[iName2]] <- d2mu[[iName1]][[iName2]] %*% iIB %*% dLambda[[iName2]]
-            }
-        }        
-
-        if(toUpdate["Lambda.B"]){
-            for(iP in 1:n.mean$Lambda.B){ # iP <- 1
-                iName1 <- grid.mean$Lambda.B[iP,"Lambda"]
-                iName2 <- grid.mean$Lambda.B[iP,"B"]
-
-                d2mu[[iName1]][[iName2]] <- alpha.XGamma.iIB %*% dB[[iName2]] %*% iIB %*% dLambda[[iName1]]
-            }
-        }
-
-        if(toUpdate["B.B"]){
-            for(iP in 1:n.mean$B.B){ # iP <- 1
-                iName1 <- grid.mean$B.B[iP,"B1"]
-                iName2 <- grid.mean$B.B[iP,"B2"]
-
-                term1 <- alpha.XGamma.iIB %*% dB[[iName2]] %*% iIB %*% dB[[iName1]] %*% iIB.Lambda
-                term2 <- alpha.XGamma.iIB %*% dB[[iName1]] %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
-                d2mu[[iName1]][[iName2]] <- term1 + term2
-            }
-        }
-
-        ## *** variance-covariance coefficients
-        if(toUpdate["Psi.Lambda"]){
-            for(iP in 1:n.vcov$Psi.Lambda){ # iP <- 1
-                iName1 <- grid.vcov$Psi.Lambda[iP,"Psi"]
-                iName2 <- grid.vcov$Psi.Lambda[iP,"Lambda"]
-
-                term1 <- t(dLambda[[iName2]]) %*% t(iIB) %*% dPsi[[iName1]] %*% iIB.Lambda                
-                d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
-            }
-        }
-
-        if(toUpdate["Psi.B"]){
-            for(iP in 1:n.vcov$Psi.B){ # iP <- 1
-                iName1 <- grid.vcov$Psi.B[iP,"Psi"]
-                iName2 <- grid.vcov$Psi.B[iP,"B"]
-
-                term1 <- t(iIB.Lambda) %*% t(dB[[iName2]]) %*% t(iIB) %*% dPsi[[iName1]] %*% iIB.Lambda
-                d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
-            }
-        }
-
-        if(toUpdate["Lambda.B"]){
-            for(iP in 1:n.vcov$Lambda.B){ # iP <- 1
-                iName1 <- grid.vcov$Lambda.B[iP,"Lambda"]
-                iName2 <- grid.vcov$Lambda.B[iP,"B"]
-
-                term1 <- t(dLambda[[iName1]]) %*% t(iIB) %*% t(dB[[iName2]]) %*% t(iIB) %*% Psi %*% iIB.Lambda
-                term2 <- t(dLambda[[iName1]]) %*% t(iIB) %*% Psi %*% iIB %*% dB[[iName2]] %*% iIB.Lambda
-                ## term2 <- tLambda.tiIB.Psi.iIB %*% dB[[iName2]] %*% iIB %*% dLambda[[iName1]]                
-                d2Omega[[iName1]][[iName2]] <- term1 + t(term1) + term2 + t(term2)
-            }
-        }
-
-        if(toUpdate["Lambda.Lambda"]){
-            for(iP in 1:n.vcov$Lambda.Lambda){ # iP <- 1
-                iName1 <- grid.vcov$Lambda.Lambda[iP,"Lambda1"]
-                iName2 <- grid.vcov$Lambda.Lambda[iP,"Lambda2"]
-                
-                term1 <- t(dLambda[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% dLambda[[iName2]]
-                d2Omega[[iName1]][[iName2]] <- term1 + t(term1)
-            }
-        }
-
-        if(toUpdate["B.B"]){
-            for(iP in 1:n.vcov$B.B){ # iP <- 1
-                iName1 <- grid.vcov$B.B[iP,"B1"]
-                iName2 <- grid.vcov$B.B[iP,"B2"]
-
-                term1 <- t(iIB.Lambda) %*% t(dB[[iName2]]) %*% t(iIB) %*% t(dB[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% Lambda
-                term2 <- t(iIB.Lambda) %*% t(dB[[iName1]]) %*% t(iIB) %*% t(dB[[iName2]]) %*% t(iIB) %*% Psi.iIB %*% Lambda
-                term3 <- t(iIB.Lambda) %*% t(dB[[iName1]]) %*% t(iIB) %*% Psi.iIB %*% dB[[iName2]] %*% iIB %*% Lambda
-                d2Omega[[iName1]][[iName2]] <- term1 + t(term1) + term2 + t(term2) + term3 + t(term3)
-            }
-        }
-
-    }
-
-### ** Export
-    return(list(d2mu = d2mu, d2Omega = d2Omega))
-
-}
-
-## * .combination
-#' @title Form all Unique Combinations Between two Vectors
-#' @description Form all unique combinations between two vectors (removing symmetric combinations).
-#' @name combination
-#'
-#' @param ... [vectors] elements to be combined.
-#'
-#' @return A matrix, each row being a different combination.
-#' 
-#' @examples
-#' .combination <- lavaSearch2:::.combination
-#' 
-#' .combination(1,1)
-#' .combination(1:2,1:2)
-#' .combination(c(1:2,1:2),1:2)
-#' 
-#' .combination(alpha = 1:2, beta = 3:4)
-#'
-#' @keywords internal
-.combination <- function(...){
-
-    ## ** normalize arguments
-    dots <- list(...)
-    if(length(dots)!=2){
-        stop("can only handle two vectors \n")
-    }
-    test.null <- unlist(lapply(dots,is.null))    
-    if(any(test.null)){
-        return(NULL)
-    }
-    dots <- lapply(dots,unique)
-
-    ## ** form all combinations
-    grid <- expand.grid(dots, stringsAsFactors = FALSE) 
-    
-    ## ** remove combinations (b,a) when (a,b) is already there
-    name1 <- paste0(grid[,1],grid[,2])
-    name2 <- paste0(grid[,2],grid[,1])
-
-    if(NROW(grid)>1 && any(name1 %in% name2)){ 
-
-        n.grid <- NROW(grid)
-        test.duplicated <- c(FALSE,sapply(2:n.grid, function(iG){
-            any(name2[iG] %in% name1[1:(iG-1)]) ## find duplicates
-        }))
-
-        grid <- grid[test.duplicated==FALSE,]
-    }
-
-    ## ** export
-    return(grid)        
-}
-
-
-## * .combinationDF
-.combinationDF <- function(data,
-                           detail1, detail2,
-                           name1, name2){
-
-    detail <- NULL # [:for CRAN check] subset
-    
-    if(any(detail1 %in% data$detail) && any(detail2 %in% data$detail) ){
-        ls.args <- list(subset(data, subset = detail %in% detail1, select = "param", drop = TRUE),
-                        subset(data, subset = detail %in% detail2, select = "param", drop = TRUE))
-        names(ls.args) <- c(name1,name2)
-    
-        return(do.call(.combination, args = ls.args))
-        
-    }else{
-        
-        return(numeric(0))
-        
-    }
-}
-
-
-
-##----------------------------------------------------------------------
-### skeleton.R ends here
-
diff --git a/R/summary.calibrateType1.R b/R/summary.calibrateType1.R
index d541a85..3542b68 100644
--- a/R/summary.calibrateType1.R
+++ b/R/summary.calibrateType1.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: apr 23 2018 (12:58) 
 ## Version: 
-## Last-Updated: jul 16 2018 (16:37) 
+## Last-Updated: Jan 11 2022 (16:48) 
 ##           By: Brice Ozenne
-##     Update #: 71
+##     Update #: 72
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -103,7 +103,7 @@ summary.calibrateType1 <- function(object, robust = FALSE, type = "type1error",
         ## display
         if(print){
             seqN <- unique(dfS$n)
-            seqRep <- setNames(dfS$n.rep[duplicated(dfS$n) == FALSE],seqN)
+            seqRep <- stats::setNames(dfS$n.rep[duplicated(dfS$n) == FALSE],seqN)
 
             vecTrans <- c("Gaus" = "Gaussian approx.",
                           "Satt" = "Satterthwaite approx.",
diff --git a/R/summary.glht2.R b/R/summary.glht2.R
deleted file mode 100644
index d4b46b1..0000000
--- a/R/summary.glht2.R
+++ /dev/null
@@ -1,49 +0,0 @@
-### summary.glht2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: maj  2 2018 (09:20) 
-## Version: 
-## Last-Updated: maj  2 2018 (16:58) 
-##           By: Brice Ozenne
-##     Update #: 34
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-summary.glht2 <- function(object, ...){
-
-    class(object) <- setdiff(class(object), "glht2")
-    output <- summary(object, ...)
-    class(output) <- append("summary.glht2",class(output))
-    
-    return(output)
-}
-
-print.summary.glht2 <- function(object, ...){
-
-    class(object) <- setdiff(class(object), "summary.glht2")
-    output <- utils::capture.output(print(object))
-    
-    txt.robust <- switch(as.character(object$robust),
-                         "TRUE" = "Robust standard errors",
-                         "FALSE" = "Model-based standard errors"
-                         )
-    txt.correction <- switch(as.character(object$bias.correct),
-                             "TRUE" = " corrected for small sample bias",
-                             "FALSE" = ""
-                             )
-    output[length(output)] <- paste0("(",txt.robust,txt.correction,")\n")
-
-    cat(paste0(output,collapse = "\n"))
-    
-    return(invisible(object))
-}
-
-
-######################################################################
-### summary.glht2.R ends here
diff --git a/R/summary2.R b/R/summary2.R
deleted file mode 100644
index d97333e..0000000
--- a/R/summary2.R
+++ /dev/null
@@ -1,295 +0,0 @@
-### summary2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov 10 2017 (10:57) 
-## Version: 
-## Last-Updated: feb 18 2019 (13:36) 
-##           By: Brice Ozenne
-##     Update #: 310
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - summary2
-#' @title Summary with Small Sample Correction
-#' @description Summary with small sample correction.
-#' @name summary2
-#'
-#' @param object a \code{gls}, \code{lme} or \code{lvm} object.
-#' @param digit [integer > 0] the number of decimal places to use when displaying the summary.
-#' @param df [logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-#' Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias?
-#' See \code{\link{sCorrect}} for more details.
-#' @param robust [logical] should the robust standard errors be used instead of the model based standard errors?
-#' @param cluster [integer vector] the grouping variable relative to which the observations are iid.
-#' @param ... arguments passed to the \code{summary} method of the object.
-#' 
-#' @seealso \code{\link{sCorrect}} for more detail about the small sample correction.
-#'
-#' @details \code{summary2} is the same as \code{summary}
-#' except that it first computes the small sample correction (but does not store it).
-#' So if \code{summary2} is to be called several times,
-#' it is more efficient to pre-compute the quantities for the small sample correction
-#' using \code{sCorrect} and then call \code{summary2}.
-#' 
-#' @examples
-#' m <- lvm(Y~X1+X2)
-#' set.seed(10)
-#' d <- lava::sim(m, 2e1)
-#'
-#' ## Gold standard
-#' summary(lm(Y~X1+X2, d))$coef
-#' 
-#' ## gls models
-#' library(nlme)
-#' e.gls <- gls(Y~X1+X2, data = d, method = "ML")
-#' summary(e.gls)$tTable
-#' sCorrect(e.gls, cluster = 1:NROW(d)) <- FALSE ## no small sample correction
-#' summary2(e.gls)$tTable
-#' 
-#' sCorrect(e.gls, cluster = 1:NROW(d)) <- TRUE ## small sample correction
-#' summary2(e.gls)$tTable
-#' 
-#' ## lvm models
-#' e.lvm <- estimate(m, data = d)
-#' summary(e.lvm)$coef
-#' 
-#' sCorrect(e.lvm) <- FALSE ## no small sample correction
-#' summary2(e.lvm)$coef
-#' 
-#' sCorrect(e.lvm) <- TRUE ## small sample correction
-#' summary2(e.lvm)$coef
-#' 
-#' @concept small sample inference
-#' @export
-`summary2` <-
-  function(object, ...) UseMethod("summary2")
-
-## * summary2.lm
-#' @rdname summary2
-#' @method summary2 lm
-#' @export
-summary2.lm <- function(object, df = TRUE, bias.correct = TRUE, ...){
-    sCorrect(object, df = df) <- bias.correct
-    return(summary2(object, df = df, ...))
-}
-## * summary2.gls
-#' @rdname summary2
-#' @method summary2 gls
-#' @export
-summary2.gls <- function(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...){
-    sCorrect(object, df = df, cluster = cluster) <- bias.correct
-    return(summary2(object, df = df, ...))
-}
-
-## * summary2.lme
-#' @rdname summary2
-#' @method summary2 lme
-#' @export
-summary2.lme <- summary2.lm
-
-## * summary2.lvmfit
-#' @rdname summary2
-#' @method summary2 lvmfit
-#' @export
-summary2.lvmfit <- summary2.lm
-
-## * summary2.lm2
-#' @rdname summary2
-#' @method summary2 lm2
-#' @export
-summary2.lm2 <- function(object, 
-                         digit = max(3, getOption("digit")),
-                         robust = FALSE,
-                         df = TRUE,
-                         ...){
-
-### ** perform Wald test
-    name.param <- names(coef(object))
-    n.param <- length(name.param)
-
-    tTable.all <- compare2(object,
-                           par = name.param,
-                           robust = robust,
-                           df = df,
-                           F.test = FALSE,
-                           as.lava = FALSE)
-    tTable <- tTable.all[1:n.param,c("estimate","std","statistic","p-value","df")]
-    dimnames(tTable) <- list(name.param,
-                             c("Value","Std.Error","t-value","p-value","df")
-                             )
-
-### ** get summary
-    class(object) <- setdiff(class(object),c("lm2"))
-    object.summary <- summary(object, digits = digit, ...)
-    
-### ** update summary
-    object.summary$coefficients <- tTable
-
-### ** export
-    return(object.summary)
-
-}
-## * summary2.gls2
-#' @rdname summary2
-#' @method summary2 gls2
-#' @export
-summary2.gls2 <- function(object, 
-                          digit = max(3, getOption("digit")),
-                          robust = FALSE,
-                          df = TRUE,
-                          ...){
-    
-    ### ** perform Wald test
-    name.param <- names(coef(object))
-    n.param <- length(name.param)
-
-    tTable.all <- compare2(object,
-                           par = name.param,
-                           robust = robust,
-                           df = df,
-                           F.test = FALSE,
-                           as.lava = FALSE)
-    tTable <- tTable.all[1:n.param,c("estimate","std","statistic","p-value","df")]
-    dimnames(tTable) <- list(name.param,
-                             c("Value","Std.Error","t-value","p-value","df")
-                             )
-
-    ### ** get summary
-    class(object) <- setdiff(class(object),c("gls2","lme2"))
-    object.summary <- summary(object, digits = digit, ...)
-    
-    ### ** update summary
-    object.summary$tTable <- tTable
-
-    ### ** export
-    return(object.summary)
-}
-
-## * summary2.lme2
-#' @rdname summary2
-#' @method summary2 lme2
-#' @export
-summary2.lme2 <- summary2.gls2
-
-## * summary2.lvmfit2
-#' @rdname summary2
-#' @method summary2 lvmfit2
-#' @export
-summary2.lvmfit2 <- function(object, cluster = NULL, robust = FALSE, df = TRUE, ...){
-
-    
-### ** perform Wald test
-    param <- lava::pars(object)
-    name.param <- names(param)
-    n.param <- length(param)
-
-    table.all <- compare2(object,
-                          par = name.param,
-                          robust = robust,
-                          cluster = cluster,
-                          df = df,
-                          F.test = FALSE,
-                          as.lava = FALSE)
-    table.coef <- table.all[1:n.param,c("estimate","std","statistic","p-value","df")]
-    dimnames(table.coef) <- list(name.param,
-                                 c("Estimate", "Std. Error", "t-value", "P-value", "df")
-                                 )
-
-### ** get summary
-    class(object) <- setdiff(class(object),"lvmfit2")
-    object.summary <- summary(object, ...)
-    if(!is.null(object$cluster) || inherits(object,"lvm.missing")){
-        
-        ## if(robust == FALSE){
-        ##     stop("Can only print summary for robust standard errors \n",
-        ##          "when the object contain a cluster variable \n")
-        ## }
-        colnames(object.summary$coef) <- c("Estimate","Std. Error","Z-value","P-value")
-        object.summary$coef[,"Z-value"] <- NA
-
-        colnames(object.summary$coefmat) <- c("Estimate","Std. Error","Z-value","P-value", "std.xy")
-        object.summary$coefmat[,"Z-value"] <- ""
-        
-    }
-    
-    ## find digit
-    vec.char <- setdiff(object.summary$coefmat[,"Estimate"],"")
-    digit <- max(c(nchar(gsub(".","",vec.char,fixed = TRUE)))-1,1)
-
-    ### ** update summary
-    ### *** vcov
-    object.summary$vcov <- attr(object$dVcov, "vcov.param")[name.param,name.param]    
-
-### *** coef
-    lava.rownames <- rownames(object.summary$coef)
-    ## add rows corresponding to reference parameters
-    missing.rows <- setdiff(lava.rownames,rownames(table.coef))
-    if(length(missing.rows)>0){
-        addon <- object.summary$coef[missing.rows,
-                                     c("Estimate","Std. Error","Z-value","P-value"),
-                                     drop=FALSE]
-        colnames(addon)[3] <- "t-value"
-        table.coef <- rbind(table.coef, cbind(addon,df=NA))
-    }
-
-    ## re-order table according to lava
-    table.coef <- table.coef[intersect(lava.rownames,rownames(table.coef)),,drop=FALSE]
-    ## remove unappropriate p.values
-    lava.NApvalue <- which(is.na(object.summary$coef[,"P-value"]))
-    table.coef[intersect(lava.rownames[lava.NApvalue],rownames(table.coef)),"P-value"] <- NA
-    object.summary$coef <- table.coef
-    
-    
-    ### *** coefmat
-    name.label0 <- trimws(rownames(CoefMat(object, labels = 0, level = 9)), which = "both")
-    index.titleVariance <- which(name.label0=="Residual Variances:")
-    if(length(index.titleVariance)>0){
-        ## rename variance parameters from Y to Y~~Y
-        index.vcov <- (index.titleVariance+1):length(name.label0)
-        index.var <- setdiff(index.vcov,grep("~~",name.label0,fixed=TRUE)) ## exclude covariance parameters that are already correctly named
-        name.label0[index.var] <- paste0(name.label0[index.var],lava.options()$symbols[2],name.label0[index.var])
-    }
-
-    table.coefmat <- object.summary$coefmat
-    colnames(table.coefmat)[3:5] <- c("t-value","P-value","df")
-    
-    ## mimic lava:::CoefMat (called by lava:::summary.lvmfit)    
-    e2add <- format(round(table.coef[,"Estimate"], max(1, digit - 1)), digits = digit - 1)
-    e2add <- gsub(" NA","",e2add)
-    sd2add <- format(round(table.coef[,"Std. Error"], max(1, digit - 1)), digits = digit - 1)
-    sd2add <- gsub(" NA","",sd2add)
-    df2add <- as.character(round(table.coef[,"df"],2))    
-    df2add[is.na(df2add)] <- ""
-    t2add <- format(round(table.coef[,"t-value"], max(1, digit - 1)), digits = digit - 1)
-    t2add <- gsub(" NA","",t2add)
-
-    p2add <- formatC(table.coef[,"P-value"], digits = digit - 1, format = "g",  preserve.width = "common", flag = "")
-    p2add <- gsub(" NA","",p2add)
-    p2add[table.coef[,"P-value"] < 1e-12] <- "  <1e-12"
-
-    M2add <- cbind(e2add,sd2add,t2add,p2add,df2add)
-    table.coefmat[match(rownames(table.coef), name.label0),] <- M2add
-
-    table.coefmat[object.summary$coefmat[,"P-value"]=="","P-value"] <- ""
-    object.summary$coefmat <- table.coefmat
-
-### ** Export
-    if(robust){
-        colnames(object.summary$coefmat)[2] <- "robust SE"
-        colnames(object.summary$coef)[2] <- "robust SE"
-    }
-    return(object.summary)    
-}
-
-
-
-##----------------------------------------------------------------------
-### summary2.R ends here
-
diff --git a/R/transformSummaryTable.R b/R/transformSummaryTable.R
new file mode 100644
index 0000000..e8a3ac7
--- /dev/null
+++ b/R/transformSummaryTable.R
@@ -0,0 +1,68 @@
+### transformSummaryTable.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: feb  3 2020 (18:29) 
+## Version: 
+## Last-Updated: jan 24 2022 (10:47) 
+##           By: Brice Ozenne
+##     Update #: 29
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+#' @title Apply Transformation to Summary Table
+#' @description Update summary table according to a transformation, e.g. log-transformtion.
+#' P-values are left unchanged but estimates, standard errors, and confidence intervals are updated.
+#'
+#' @param object A data.frame with columns estimate, se, lower, upper.
+#' @param transform the name of a transformation or a function.
+#'
+#' @return a data.frame
+#' @export
+transformSummaryTable <- function(object, transform = NULL){
+    if(is.null(transform)){
+        return(object)
+    }else if(identical(transform,"atanh")){
+        transform <- atanh
+        dtransform <- function(x){1/(1-x^2)}
+    }else if(identical(transform,"exp")){
+        transform <- exp
+        dtransform <- function(x){exp(x)}
+    }else if(identical(transform,"log")){
+        transform <- log
+        dtransform <- function(x){1/x}
+    }else if(identical(transform,"loglog")){
+        transform <- function(x){log(log(x))}
+        dtransform <- function(x){1/(-x*log(x))}
+    }else if(identical(transform,"cloglog")){
+        transform <- function(x){log(log(1-x))}
+        dtransform <- function(x){1/(-(1-x)*log(1-x))}
+    }else if(!is.null(attr(transform,"derivative"))){
+        dtransform <- attr(transform,"derivative")
+    }else{
+        dtransform <- function(x){diag(numDeriv::jacobian(transform, x))}
+    }
+    object[,"se"] <- object[,"se"]*dtransform(object[,"estimate"])
+    object[,"estimate"] <- transform(object[,"estimate"])
+    if("lower" %in% names(object)){
+        object[,"lower"] <- transform(object[,"lower"])
+    }
+    if("upper" %in% names(object)){
+        object[,"upper"] <- transform(object[,"upper"])
+    }
+    if("null" %in% names(object)){
+        object[,"null"] <- transform(object[,"null"])
+    }
+    if("null" %in% names(object)){
+        object[,"null"] <- transform(object[,"null"])
+    }
+    return(object)
+}
+
+######################################################################
+### transformSummaryTable.R ends here
diff --git a/R/vcov2.R b/R/vcov2.R
deleted file mode 100644
index 0cb1e41..0000000
--- a/R/vcov2.R
+++ /dev/null
@@ -1,124 +0,0 @@
-### vcov2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar 12 2018 (16:38) 
-## Version: 
-## Last-Updated: jul 31 2020 (10:44) 
-##           By: Brice Ozenne
-##     Update #: 13
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * Documentation - vcov2
-#' @title  Extract the Variance Covariance Matrix of the Model Parameters
-#' @description  Extract the variance covariance matrix of the model parameters from a Gaussian linear model.
-#' @name vcov2
-#'
-#' @param object a linear model or a latent variable model
-#' @param param [optional] the fitted parameters.
-#' @param data [optional] the data set.
-#' @param bias.correct [logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.
-#' @param ... arguments to be passed to \code{sCorrect}.
-#' 
-#' @details If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
-#'
-#' @seealso \code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
-#'
-#' @return A matrix.
-#' 
-#' @examples
-#' n <- 5e1
-#' p <- 3
-#' X.name <- paste0("X",1:p)
-#' link.lvm <- paste0("Y~",X.name)
-#' formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
-#'
-#' m <- lvm(formula.lvm)
-#' distribution(m,~Id) <- Sequence.lvm(0)
-#' set.seed(10)
-#' d <- lava::sim(m,n)
-#'
-#' ## linear model
-#' e.lm <- lm(formula.lvm,data=d)
-#' vcov.tempo <- vcov2(e.lm, bias.correct = TRUE)
-#' vcov.tempo[rownames(vcov(e.lm)),colnames(vcov(e.lm))]/vcov(e.lm)
-#'
-#' ## latent variable model
-#' e.lvm <- estimate(lvm(formula.lvm),data=d)
-#' vcov.tempo <- vcov2(e.lvm, bias.correct = FALSE)
-#' vcov.tempo/vcov(e.lvm)
-#'
-#' @concept small sample inference
-#' @export
-`vcov2` <-
-  function(object, ...) UseMethod("vcov2")
-
-## * vcov2.lm
-#' @rdname vcov2
-#' @export
-vcov2.lm <- function(object, param = NULL, data = NULL, bias.correct = TRUE, ...){
-
-    sCorrect(object, param = param, data = data, df = FALSE, ...) <- bias.correct
-
-    ### ** export
-    return(object$sCorrect$vcov.param)
-}
-
-## * vcov2.gls
-#' @rdname vcov2
-#' @export
-vcov2.gls <- vcov2.lm
-
-## * vcov2.lme
-#' @rdname vcov2
-#' @export
-vcov2.lme <- vcov2.lm
-
-## * vcov2.lvmfit
-#' @rdname vcov2
-#' @export
-vcov2.lvmfit <- vcov2.lm
-
-## * vcov2.lm2
-#' @rdname vcov2
-#' @export
-vcov2.lm2 <- function(object, param = NULL, data = NULL, ...){
-
-    ### ** compute the score
-    if(!is.null(param) || !is.null(data)){
-        args <- object$sCorrect$args
-        args$df <- FALSE
-        object$sCorrect <- do.call(sCorrect,
-                                   args = c(list(object, param = param, data = data),
-                                            args))
-    }
-
-    ### ** export
-    return(object$sCorrect$vcov.param)
-
-}
-
-## * vcov2.gls
-#' @rdname vcov2
-#' @export
-vcov2.gls2 <- vcov2.lm2
-
-## * vcov2.lme
-#' @rdname vcov2
-#' @export
-vcov2.lme2 <- vcov2.lm2
-
-## * vcov2.lvmfit
-#' @rdname vcov2
-#' @export
-vcov2.lvmfit2 <- vcov2.lm2
-
-
-##----------------------------------------------------------------------
-### vcov2.R ends here
diff --git a/build/vignette.rds b/build/vignette.rds
index 3580e05..d38a44d 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/debian/changelog b/debian/changelog
index 95d7e41..2986c02 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+r-cran-lavasearch2 (2.0.1+dfsg-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sat, 13 May 2023 21:29:04 -0000
+
 r-cran-lavasearch2 (1.5.6+dfsg-1) unstable; urgency=medium
 
   * New upstream version
diff --git a/vignettes/modelsearch.png b/inst/doc-software/modelsearch.png
similarity index 100%
rename from vignettes/modelsearch.png
rename to inst/doc-software/modelsearch.png
diff --git a/inst/doc-software/overview-compress.pdf b/inst/doc-software/overview-compress.pdf
new file mode 100644
index 0000000..92329ec
Binary files /dev/null and b/inst/doc-software/overview-compress.pdf differ
diff --git a/inst/doc-software/overview.aux b/inst/doc-software/overview.aux
new file mode 100644
index 0000000..0bf7279
--- /dev/null
+++ b/inst/doc-software/overview.aux
@@ -0,0 +1,48 @@
+\relax 
+\providecommand\hyper@newdestlabel[2]{}
+\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
+\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
+\global\let\oldcontentsline\contentsline
+\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
+\global\let\oldnewlabel\newlabel
+\gdef\newlabel#1#2{\newlabelxx{#1}#2}
+\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
+\AtEndDocument{\ifx\hyper@anchor\@undefined
+\let\contentsline\oldcontentsline
+\let\newlabel\oldnewlabel
+\fi}
+\fi}
+\global\let\hyper@last\relax 
+\gdef\HyperFirstAtBeginDocument#1{#1}
+\providecommand\HyField@AuxAddToFields[1]{}
+\providecommand\HyField@AuxAddToCoFields[2]{}
+\@writefile{toc}{\contentsline {section}{\numberline {1}Inference}{1}{section.1}\protected@file@percent }
+\newlabel{sec:orgb046af1}{{1}{1}{Inference}{section.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}Introductory example}{1}{subsection.1.1}\protected@file@percent }
+\newlabel{sec:org0d7082d}{{1.1}{1}{Introductory example}{subsection.1.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}How it works in a nutshell}{2}{subsection.1.2}\protected@file@percent }
+\newlabel{sec:org9da7fb3}{{1.2}{2}{How it works in a nutshell}{subsection.1.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Single univariate Wald test}{2}{subsection.1.3}\protected@file@percent }
+\newlabel{sec:org00a24b1}{{1.3}{2}{Single univariate Wald test}{subsection.1.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.4}Saving computation time with \texttt  {estimate2}}{4}{subsection.1.4}\protected@file@percent }
+\newlabel{sec:orgaa5d2f3}{{1.4}{4}{Saving computation time with \texttt {estimate2}}{subsection.1.4}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.5}Single multivariate Wald test}{5}{subsection.1.5}\protected@file@percent }
+\newlabel{sec:org6c4b2cc}{{1.5}{5}{Single multivariate Wald test}{subsection.1.5}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.6}Robust Wald tests}{7}{subsection.1.6}\protected@file@percent }
+\newlabel{sec:orgf3ea70d}{{1.6}{7}{Robust Wald tests}{subsection.1.6}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {1.7}Assessing the type 1 error of the testing procedure}{8}{subsection.1.7}\protected@file@percent }
+\newlabel{sec:org2f34c32}{{1.7}{8}{Assessing the type 1 error of the testing procedure}{subsection.1.7}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {2}Adjustment for multiple comparisons}{10}{section.2}\protected@file@percent }
+\newlabel{sec:org3132637}{{2}{10}{Adjustment for multiple comparisons}{section.2}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Univariate Wald test, single model}{10}{subsection.2.1}\protected@file@percent }
+\newlabel{sec:orgc7110d9}{{2.1}{10}{Univariate Wald test, single model}{subsection.2.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Univariate Wald test, multiple models}{12}{subsection.2.2}\protected@file@percent }
+\newlabel{sec:org11c88f0}{{2.2}{12}{Univariate Wald test, multiple models}{subsection.2.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {3}Model diagnostic}{15}{section.3}\protected@file@percent }
+\newlabel{sec:orgc1e79df}{{3}{15}{Model diagnostic}{section.3}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Detection of local dependencies}{15}{subsection.3.1}\protected@file@percent }
+\newlabel{sec:org439924f}{{3.1}{15}{Detection of local dependencies}{subsection.3.1}{}}
+\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Checking that the names of the variables in the model match those of the data}{18}{subsection.3.2}\protected@file@percent }
+\newlabel{sec:org47cf06d}{{3.2}{18}{Checking that the names of the variables in the model match those of the data}{subsection.3.2}{}}
+\@writefile{toc}{\contentsline {section}{\numberline {4}Information about the R session used for this document}{20}{section.4}\protected@file@percent }
+\newlabel{sec:org95dd3ad}{{4}{20}{Information about the R session used for this document}{section.4}{}}
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.soc b/inst/doc-software/overview.loc
similarity index 92%
rename from inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.soc
rename to inst/doc-software/overview.loc
index 1b15d04..28dcdc4 100644
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.soc
+++ b/inst/doc-software/overview.loc
@@ -1 +1 @@
-;blue;;0;0;0
+;blue;;0;0;0
diff --git a/inst/doc-software/overview.log b/inst/doc-software/overview.log
new file mode 100644
index 0000000..99846dd
--- /dev/null
+++ b/inst/doc-software/overview.log
@@ -0,0 +1,846 @@
+This is pdfTeX, Version 3.14159265-2.6-1.40.20 (TeX Live 2019/Debian) (preloaded format=pdflatex 2021.11.14)  11 APR 2023 22:50
+entering extended mode
+ \write18 enabled.
+ %&-line parsing enabled.
+**overview.tex
+(./overview.tex
+LaTeX2e <2020-02-02> patch level 2
+L3 programming layer <2020-02-14>
+(/usr/share/texlive/texmf-dist/tex/latex/base/article.cls
+Document Class: article 2019/12/20 v1.4l Standard LaTeX document class
+(/usr/share/texlive/texmf-dist/tex/latex/base/size12.clo
+File: size12.clo 2019/12/20 v1.4l Standard LaTeX file (size option)
+)
+\c@part=\count167
+\c@section=\count168
+\c@subsection=\count169
+\c@subsubsection=\count170
+\c@paragraph=\count171
+\c@subparagraph=\count172
+\c@figure=\count173
+\c@table=\count174
+\abovecaptionskip=\skip47
+\belowcaptionskip=\skip48
+\bibindent=\dimen134
+)
+(/usr/share/texlive/texmf-dist/tex/latex/listings/listings.sty
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/keyval.sty
+Package: keyval 2014/10/28 v1.15 key=value parser (DPC)
+\KV@toks@=\toks14
+)
+\lst@mode=\count175
+\lst@gtempboxa=\box45
+\lst@token=\toks15
+\lst@length=\count176
+\lst@currlwidth=\dimen135
+\lst@column=\count177
+\lst@pos=\count178
+\lst@lostspace=\dimen136
+\lst@width=\dimen137
+\lst@newlines=\count179
+\lst@lineno=\count180
+\lst@maxwidth=\dimen138
+
+(/usr/share/texlive/texmf-dist/tex/latex/listings/lstmisc.sty
+File: lstmisc.sty 2019/09/10 1.8c (Carsten Heinz)
+\c@lstnumber=\count181
+\lst@skipnumbers=\count182
+\lst@framebox=\box46
+)
+(/usr/share/texlive/texmf-dist/tex/latex/listings/listings.cfg
+File: listings.cfg 2019/09/10 1.8c listings configuration
+))
+Package: listings 2019/09/10 1.8c (Carsten Heinz)
+
+(/usr/share/texlive/texmf-dist/tex/latex/base/inputenc.sty
+Package: inputenc 2018/08/11 v1.3c Input encoding file
+\inpenc@prehook=\toks16
+\inpenc@posthook=\toks17
+)
+(/usr/share/texlive/texmf-dist/tex/latex/base/fontenc.sty
+Package: fontenc 2020/02/11 v2.0o Standard LaTeX package
+)
+(/usr/share/texmf/tex/latex/lm/lmodern.sty
+Package: lmodern 2009/10/30 v1.6 Latin Modern Fonts
+LaTeX Font Info:    Overwriting symbol font `operators' in version `normal'
+(Font)                  OT1/cmr/m/n --> OT1/lmr/m/n on input line 22.
+LaTeX Font Info:    Overwriting symbol font `letters' in version `normal'
+(Font)                  OML/cmm/m/it --> OML/lmm/m/it on input line 23.
+LaTeX Font Info:    Overwriting symbol font `symbols' in version `normal'
+(Font)                  OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24.
+LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `normal'
+(Font)                  OMX/cmex/m/n --> OMX/lmex/m/n on input line 25.
+LaTeX Font Info:    Overwriting symbol font `operators' in version `bold'
+(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26.
+LaTeX Font Info:    Overwriting symbol font `letters' in version `bold'
+(Font)                  OML/cmm/b/it --> OML/lmm/b/it on input line 27.
+LaTeX Font Info:    Overwriting symbol font `symbols' in version `bold'
+(Font)                  OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28.
+LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `bold'
+(Font)                  OMX/cmex/m/n --> OMX/lmex/m/n on input line 29.
+LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `normal'
+(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31.
+LaTeX Font Info:    Overwriting math alphabet `\mathsf' in version `normal'
+(Font)                  OT1/cmss/m/n --> OT1/lmss/m/n on input line 32.
+LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `normal'
+(Font)                  OT1/cmr/m/it --> OT1/lmr/m/it on input line 33.
+LaTeX Font Info:    Overwriting math alphabet `\mathtt' in version `normal'
+(Font)                  OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34.
+LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `bold'
+(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35.
+LaTeX Font Info:    Overwriting math alphabet `\mathsf' in version `bold'
+(Font)                  OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36.
+LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `bold'
+(Font)                  OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37.
+LaTeX Font Info:    Overwriting math alphabet `\mathtt' in version `bold'
+(Font)                  OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38.
+)
+(/usr/share/texlive/texmf-dist/tex/latex/base/textcomp.sty
+Package: textcomp 2020/02/02 v2.0n Standard LaTeX package
+)
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/color.sty
+Package: color 2019/11/23 v1.2a Standard LaTeX Color (DPC)
+
+(/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
+File: color.cfg 2016/01/02 v1.6 sample color configuration
+)
+Package color Info: Driver file: pdftex.def on input line 147.
+
+(/usr/share/texlive/texmf-dist/tex/latex/graphics-def/pdftex.def
+File: pdftex.def 2018/01/08 v1.0l Graphics/color driver for pdftex
+))
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphicx.sty
+Package: graphicx 2019/11/30 v1.2a Enhanced LaTeX Graphics (DPC,SPQR)
+
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/graphics.sty
+Package: graphics 2019/11/30 v1.4a Standard LaTeX Graphics (DPC,SPQR)
+
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/trig.sty
+Package: trig 2016/01/03 v1.10 sin cos tan (DPC)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/graphics.cfg
+File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
+)
+Package graphics Info: Driver file: pdftex.def on input line 105.
+)
+\Gin@req@height=\dimen139
+\Gin@req@width=\dimen140
+)
+(/usr/share/texlive/texmf-dist/tex/latex/grffile/grffile.sty
+Package: grffile 2019/11/11 v2.1 Extended file name support for graphics (legac
+y)
+Package grffile Info: This package is an empty stub for compatibility on input 
+line 40.
+)
+(/usr/share/texlive/texmf-dist/tex/latex/wrapfig/wrapfig.sty
+\wrapoverhang=\dimen141
+\WF@size=\dimen142
+\c@WF@wrappedlines=\count183
+\WF@box=\box47
+\WF@everypar=\toks18
+Package: wrapfig 2003/01/31  v 3.6
+)
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/rotating.sty
+Package: rotating 2016/08/11 v2.16d rotated objects in LaTeX
+
+(/usr/share/texlive/texmf-dist/tex/latex/base/ifthen.sty
+Package: ifthen 2014/09/29 v1.1c Standard LaTeX ifthen package (DPC)
+)
+\c@r@tfl@t=\count184
+\rotFPtop=\skip49
+\rotFPbot=\skip50
+\rot@float@box=\box48
+\rot@mess@toks=\toks19
+)
+(/usr/share/texlive/texmf-dist/tex/latex/tools/longtable.sty
+Package: longtable 2020/01/07 v4.13 Multi-page Table package (DPC)
+\LTleft=\skip51
+\LTright=\skip52
+\LTpre=\skip53
+\LTpost=\skip54
+\LTchunksize=\count185
+\LTcapwidth=\dimen143
+\LT@head=\box49
+\LT@firsthead=\box50
+\LT@foot=\box51
+\LT@lastfoot=\box52
+\LT@cols=\count186
+\LT@rows=\count187
+\c@LT@tables=\count188
+\c@LT@chunks=\count189
+\LT@p@ftn=\toks20
+)
+(/usr/share/texlive/texmf-dist/tex/latex/multirow/multirow.sty
+Package: multirow 2019/05/31 v2.5 Span multiple rows of a table
+\multirow@colwidth=\skip55
+\multirow@cntb=\count190
+\multirow@dima=\skip56
+\bigstrutjot=\dimen144
+)
+(/usr/share/texlive/texmf-dist/tex/latex/tools/multicol.sty
+Package: multicol 2019/12/09 v1.8y multicolumn formatting (FMi)
+\c@tracingmulticols=\count191
+\mult@box=\box53
+\multicol@leftmargin=\dimen145
+\c@unbalance=\count192
+\c@collectmore=\count193
+\doublecol@number=\count194
+\multicoltolerance=\count195
+\multicolpretolerance=\count196
+\full@width=\dimen146
+\page@free=\dimen147
+\premulticols=\dimen148
+\postmulticols=\dimen149
+\multicolsep=\skip57
+\multicolbaselineskip=\skip58
+\partial@page=\box54
+\last@line=\box55
+\maxbalancingoverflow=\dimen150
+\mult@rightbox=\box56
+\mult@grightbox=\box57
+\mult@gfirstbox=\box58
+\mult@firstbox=\box59
+\@tempa=\box60
+\@tempa=\box61
+\@tempa=\box62
+\@tempa=\box63
+\@tempa=\box64
+\@tempa=\box65
+\@tempa=\box66
+\@tempa=\box67
+\@tempa=\box68
+\@tempa=\box69
+\@tempa=\box70
+\@tempa=\box71
+\@tempa=\box72
+\@tempa=\box73
+\@tempa=\box74
+\@tempa=\box75
+\@tempa=\box76
+\@tempa=\box77
+\@tempa=\box78
+\@tempa=\box79
+\@tempa=\box80
+\@tempa=\box81
+\@tempa=\box82
+\@tempa=\box83
+\@tempa=\box84
+\@tempa=\box85
+\@tempa=\box86
+\@tempa=\box87
+\@tempa=\box88
+\@tempa=\box89
+\@tempa=\box90
+\@tempa=\box91
+\@tempa=\box92
+\@tempa=\box93
+\@tempa=\box94
+\@tempa=\box95
+\@tempa=\box96
+\c@minrows=\count197
+\c@columnbadness=\count198
+\c@finalcolumnbadness=\count199
+\last@try=\dimen151
+\multicolovershoot=\dimen152
+\multicolundershoot=\dimen153
+\mult@nat@firstbox=\box97
+\colbreak@box=\box98
+\mc@col@check@num=\count266
+) (./changes.sty
+Package: changes 2012/04/25 v1.0.0 changes-Paket
+
+*** changes-Paket 2012/04/25 v1.0.0 ***
+(/usr/share/texlive/texmf-dist/tex/latex/xkeyval/xkeyval.sty
+Package: xkeyval 2014/12/03 v2.7a package option processing (HA)
+
+(/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkeyval.tex
+(/usr/share/texlive/texmf-dist/tex/generic/xkeyval/xkvutils.tex
+\XKV@toks=\toks21
+\XKV@tempa@toks=\toks22
+)
+\XKV@depth=\count267
+File: xkeyval.tex 2014/12/03 v2.7a key=value parser (HA)
+))
+(/usr/share/texlive/texmf-dist/tex/latex/xifthen/xifthen.sty
+Package: xifthen 2015/11/05 v1.4.0 Extended ifthen features
+
+(/usr/share/texlive/texmf-dist/tex/latex/tools/calc.sty
+Package: calc 2017/05/25 v4.3 Infix arithmetic (KKT,FJ)
+\calc@Acount=\count268
+\calc@Bcount=\count269
+\calc@Adimen=\dimen154
+\calc@Bdimen=\dimen155
+\calc@Askip=\skip59
+\calc@Bskip=\skip60
+LaTeX Info: Redefining \setlength on input line 80.
+LaTeX Info: Redefining \addtolength on input line 81.
+\calc@Ccount=\count270
+\calc@Cskip=\skip61
+)
+(/usr/share/texlive/texmf-dist/tex/latex/ifmtarg/ifmtarg.sty
+Package: ifmtarg 2018/04/16 v1.2b check for an empty argument
+))
+(/usr/share/texlive/texmf-dist/tex/latex/xcolor/xcolor.sty
+Package: xcolor 2016/05/11 v2.12 LaTeX color extensions (UK)
+
+(/usr/share/texlive/texmf-dist/tex/latex/graphics-cfg/color.cfg
+File: color.cfg 2016/01/02 v1.6 sample color configuration
+)
+Package xcolor Info: Driver file: pdftex.def on input line 225.
+LaTeX Info: Redefining \color on input line 709.
+Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1348.
+Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1352.
+Package xcolor Info: Model `RGB' extended on input line 1364.
+Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1366.
+Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1367.
+Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1368.
+Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1369.
+Package xcolor Info: Model `Gray' substituted by `gray' on input line 1370.
+Package xcolor Info: Model `wave' substituted by `hsb' on input line 1371.
+)
+(/usr/share/texlive/texmf-dist/tex/latex/pdfcolmk/pdfcolmk.sty
+Package: pdfcolmk 2019/11/24 v2.0 Color support for pdfTeX via marks
+)
+(/usr/share/texlive/texmf-dist/tex/generic/ulem/ulem.sty
+\UL@box=\box99
+\UL@hyphenbox=\box100
+\UL@skip=\skip62
+\UL@hook=\toks23
+\UL@height=\dimen156
+\UL@pe=\count271
+\UL@pixel=\dimen157
+\ULC@box=\box101
+Package: ulem 2019/11/18
+\ULdepth=\dimen158
+)
+\c@Changes@AuthorCount=\count272
+\c@Changes@Author=\count273
+\c@Changes@AddCount=\count274
+\c@Changes@DeleteCount=\count275
+\c@Changes@ReplaceCount=\count276
+)
+(/usr/share/texlive/texmf-dist/tex/latex/pdflscape/pdflscape.sty
+Package: pdflscape 2019/12/05 v0.12 Display of landscape pages in PDF (HO)
+
+(/usr/share/texlive/texmf-dist/tex/generic/iftex/iftex.sty
+Package: iftex 2019/11/07 v1.0c TeX engine tests
+)
+(/usr/share/texlive/texmf-dist/tex/latex/graphics/lscape.sty
+Package: lscape 2000/10/22 v3.01 Landscape Pages (DPC)
+)
+Package pdflscape Info: Auto-detected driver: pdftex on input line 81.
+)
+(/usr/share/texlive/texmf-dist/tex/latex/geometry/geometry.sty
+Package: geometry 2020/01/02 v5.9 Page Geometry
+
+(/usr/share/texlive/texmf-dist/tex/generic/iftex/ifvtex.sty
+Package: ifvtex 2019/10/25 v1.7 ifvtex legacy package. Use iftex instead.
+)
+\Gm@cnth=\count277
+\Gm@cntv=\count278
+\c@Gm@tempcnt=\count279
+\Gm@bindingoffset=\dimen159
+\Gm@wd@mp=\dimen160
+\Gm@odd@mp=\dimen161
+\Gm@even@mp=\dimen162
+\Gm@layoutwidth=\dimen163
+\Gm@layoutheight=\dimen164
+\Gm@layouthoffset=\dimen165
+\Gm@layoutvoffset=\dimen166
+\Gm@dimlist=\toks24
+)
+(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/amssymb.sty
+Package: amssymb 2013/01/14 v3.01 AMS font symbols
+
+(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/amsfonts.sty
+Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
+\@emptytoks=\toks25
+\symAMSa=\mathgroup4
+\symAMSb=\mathgroup5
+LaTeX Font Info:    Redeclaring math symbol \hbar on input line 98.
+LaTeX Font Info:    Overwriting math alphabet `\mathfrak' in version `bold'
+(Font)                  U/euf/m/n --> U/euf/b/n on input line 106.
+))
+(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsmath.sty
+Package: amsmath 2020/01/20 v2.17e AMS math features
+\@mathmargin=\skip63
+
+For additional information on amsmath, use the `?' option.
+(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amstext.sty
+Package: amstext 2000/06/29 v2.01 AMS text
+
+(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsgen.sty
+File: amsgen.sty 1999/11/30 v2.0 generic functions
+\@emptytoks=\toks26
+\ex@=\dimen167
+))
+(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsbsy.sty
+Package: amsbsy 1999/11/29 v1.2d Bold Symbols
+\pmbraise@=\dimen168
+)
+(/usr/share/texlive/texmf-dist/tex/latex/amsmath/amsopn.sty
+Package: amsopn 2016/03/08 v2.02 operator names
+)
+\inf@bad=\count280
+LaTeX Info: Redefining \frac on input line 227.
+\uproot@=\count281
+\leftroot@=\count282
+LaTeX Info: Redefining \overline on input line 389.
+\classnum@=\count283
+\DOTSCASE@=\count284
+LaTeX Info: Redefining \ldots on input line 486.
+LaTeX Info: Redefining \dots on input line 489.
+LaTeX Info: Redefining \cdots on input line 610.
+\Mathstrutbox@=\box102
+\strutbox@=\box103
+\big@size=\dimen169
+LaTeX Font Info:    Redeclaring font encoding OML on input line 733.
+LaTeX Font Info:    Redeclaring font encoding OMS on input line 734.
+\macc@depth=\count285
+\c@MaxMatrixCols=\count286
+\dotsspace@=\muskip16
+\c@parentequation=\count287
+\dspbrk@lvl=\count288
+\tag@help=\toks27
+\row@=\count289
+\column@=\count290
+\maxfields@=\count291
+\andhelp@=\toks28
+\eqnshift@=\dimen170
+\alignsep@=\dimen171
+\tagshift@=\dimen172
+\tagwidth@=\dimen173
+\totwidth@=\dimen174
+\lineht@=\dimen175
+\@envbody=\toks29
+\multlinegap=\skip64
+\multlinetaggap=\skip65
+\mathdisplay@stack=\toks30
+LaTeX Info: Redefining \[ on input line 2859.
+LaTeX Info: Redefining \] on input line 2860.
+)
+(/usr/share/texlive/texmf-dist/tex/latex/doublestroke/dsfont.sty
+Package: dsfont 1995/08/01 v0.1 Double stroke roman fonts
+)
+(/usr/share/texlive/texmf-dist/tex/latex/tools/array.sty
+Package: array 2019/08/31 v2.4l Tabular extension package (FMi)
+\col@sep=\dimen176
+\ar@mcellbox=\box104
+\extrarowheight=\dimen177
+\NC@list=\toks31
+\extratabsurround=\skip66
+\backup@length=\skip67
+\ar@cellbox=\box105
+)
+(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hyperref.sty
+Package: hyperref 2020/01/14 v7.00d Hypertext links for LaTeX
+
+(/usr/share/texlive/texmf-dist/tex/generic/ltxcmds/ltxcmds.sty
+Package: ltxcmds 2019/12/15 v1.24 LaTeX kernel commands for general use (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/pdftexcmds/pdftexcmds.sty
+Package: pdftexcmds 2019/11/24 v0.31 Utility functions of pdfTeX for LuaTeX (HO
+)
+
+(/usr/share/texlive/texmf-dist/tex/generic/infwarerr/infwarerr.sty
+Package: infwarerr 2019/12/03 v1.5 Providing info/warning/error messages (HO)
+)
+Package pdftexcmds Info: \pdf@primitive is available.
+Package pdftexcmds Info: \pdf@ifprimitive is available.
+Package pdftexcmds Info: \pdfdraftmode found.
+)
+(/usr/share/texlive/texmf-dist/tex/generic/kvsetkeys/kvsetkeys.sty
+Package: kvsetkeys 2019/12/15 v1.18 Key value parser (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/generic/kvdefinekeys/kvdefinekeys.sty
+Package: kvdefinekeys 2019-12-19 v1.6 Define keys (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/generic/pdfescape/pdfescape.sty
+Package: pdfescape 2019/12/09 v1.15 Implements pdfTeX's escape features (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/hycolor/hycolor.sty
+Package: hycolor 2020-01-27 v1.10 Color options for hyperref/bookmark (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/letltxmacro/letltxmacro.sty
+Package: letltxmacro 2019/12/03 v1.6 Let assignment for LaTeX macros (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/auxhook/auxhook.sty
+Package: auxhook 2019-12-17 v1.6 Hooks for auxiliary files (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/kvoptions/kvoptions.sty
+Package: kvoptions 2019/11/29 v3.13 Key value format for package options (HO)
+)
+\@linkdim=\dimen178
+\Hy@linkcounter=\count292
+\Hy@pagecounter=\count293
+
+(/usr/share/texlive/texmf-dist/tex/latex/hyperref/pd1enc.def
+File: pd1enc.def 2020/01/14 v7.00d Hyperref: PDFDocEncoding definition (HO)
+Now handling font encoding PD1 ...
+... no UTF-8 mapping file for font encoding PD1
+)
+(/usr/share/texlive/texmf-dist/tex/generic/intcalc/intcalc.sty
+Package: intcalc 2019/12/15 v1.3 Expandable calculations with integers (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/generic/etexcmds/etexcmds.sty
+Package: etexcmds 2019/12/15 v1.7 Avoid name clashes with e-TeX commands (HO)
+)
+\Hy@SavedSpaceFactor=\count294
+\pdfmajorversion=\count295
+Package hyperref Info: Hyper figures OFF on input line 4547.
+Package hyperref Info: Link nesting OFF on input line 4552.
+Package hyperref Info: Hyper index ON on input line 4555.
+Package hyperref Info: Plain pages OFF on input line 4562.
+Package hyperref Info: Backreferencing OFF on input line 4567.
+Package hyperref Info: Implicit mode ON; LaTeX internals redefined.
+Package hyperref Info: Bookmarks ON on input line 4800.
+\c@Hy@tempcnt=\count296
+
+(/usr/share/texlive/texmf-dist/tex/latex/url/url.sty
+\Urlmuskip=\muskip17
+Package: url 2013/09/16  ver 3.4  Verb mode for urls, etc.
+)
+LaTeX Info: Redefining \url on input line 5159.
+\XeTeXLinkMargin=\dimen179
+
+(/usr/share/texlive/texmf-dist/tex/generic/bitset/bitset.sty
+Package: bitset 2019/12/09 v1.3 Handle bit-vector datatype (HO)
+
+(/usr/share/texlive/texmf-dist/tex/generic/bigintcalc/bigintcalc.sty
+Package: bigintcalc 2019/12/15 v1.5 Expandable calculations on big integers (HO
+)
+))
+\Fld@menulength=\count297
+\Field@Width=\dimen180
+\Fld@charsize=\dimen181
+Package hyperref Info: Hyper figures OFF on input line 6430.
+Package hyperref Info: Link nesting OFF on input line 6435.
+Package hyperref Info: Hyper index ON on input line 6438.
+Package hyperref Info: backreferencing OFF on input line 6445.
+Package hyperref Info: Link coloring OFF on input line 6450.
+Package hyperref Info: Link coloring with OCG OFF on input line 6455.
+Package hyperref Info: PDF/A mode OFF on input line 6460.
+LaTeX Info: Redefining \ref on input line 6500.
+LaTeX Info: Redefining \pageref on input line 6504.
+
+(/usr/share/texlive/texmf-dist/tex/generic/atbegshi/atbegshi.sty
+Package: atbegshi 2019/12/05 v1.19 At begin shipout hook (HO)
+)
+\Hy@abspage=\count298
+\c@Item=\count299
+\c@Hfootnote=\count300
+)
+Package hyperref Info: Driver (autodetected): hpdftex.
+
+(/usr/share/texlive/texmf-dist/tex/latex/hyperref/hpdftex.def
+File: hpdftex.def 2020/01/14 v7.00d Hyperref driver for pdfTeX
+
+(/usr/share/texlive/texmf-dist/tex/latex/atveryend/atveryend.sty
+Package: atveryend 2019-12-11 v1.11 Hooks at the very end of document (HO)
+Package atveryend Info: \enddocument detected (standard20110627).
+)
+\Fld@listcount=\count301
+\c@bookmark@seq@number=\count302
+
+(/usr/share/texlive/texmf-dist/tex/latex/rerunfilecheck/rerunfilecheck.sty
+Package: rerunfilecheck 2019/12/05 v1.9 Rerun checks for auxiliary files (HO)
+
+(/usr/share/texlive/texmf-dist/tex/generic/uniquecounter/uniquecounter.sty
+Package: uniquecounter 2019/12/15 v1.4 Provide unlimited unique counter (HO)
+)
+Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2
+86.
+)
+\Hy@SectionHShift=\skip68
+)
+(/usr/share/texlive/texmf-dist/tex/latex/natbib/natbib.sty
+Package: natbib 2010/09/13 8.31b (PWD, AO)
+\bibhang=\skip69
+\bibsep=\skip70
+LaTeX Info: Redefining \cite on input line 694.
+\c@NAT@ctr=\count303
+)
+(/usr/share/texlive/texmf-dist/tex/latex/fancyvrb/fancyvrb.sty
+Package: fancyvrb 2020/01/13 v3.5 verbatim text (tvz,hv)
+\FV@CodeLineNo=\count304
+\FV@InFile=\read2
+\FV@TabBox=\box106
+\c@FancyVerbLine=\count305
+\FV@StepNumber=\count306
+\FV@OutFile=\write3
+)
+(/usr/share/texlive/texmf-dist/tex/latex/colortbl/colortbl.sty
+Package: colortbl 2020/01/04 v1.0e Color table columns (DPC)
+\everycr=\toks32
+\minrowclearance=\skip71
+)
+(/usr/share/texlive/texmf-dist/tex/latex/setspace/setspace.sty
+Package: setspace 2011/12/19 v6.7a set line spacing
+)
+(/usr/share/texlive/texmf-dist/tex/latex/preprint/authblk.sty
+Package: authblk 2001/02/27 1.3 (PWD)
+\affilsep=\skip72
+\@affilsep=\skip73
+\c@Maxaffil=\count307
+\c@authors=\count308
+\c@affil=\count309
+)
+(/usr/share/texlive/texmf-dist/tex/latex/enumitem/enumitem.sty
+Package: enumitem 2019/06/20 v3.9 Customized lists
+\labelindent=\skip74
+\enit@outerparindent=\dimen182
+\enit@toks=\toks33
+\enit@inbox=\box107
+\enit@count@id=\count310
+\enitdp@description=\count311
+)
+(/usr/share/texlive/texmf-dist/tex/latex/tools/xspace.sty
+Package: xspace 2014/10/28 v1.13 Space after command names (DPC,MH)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf.sty
+Package: epstopdf 2020-01-24 v2.11 Conversion with epstopdf on the fly (HO)
+
+(/usr/share/texlive/texmf-dist/tex/latex/grfext/grfext.sty
+Package: grfext 2019/12/03 v1.3 Manage graphics extensions (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/latex/epstopdf-pkg/epstopdf-base.sty
+Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf
+Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4
+85.
+Package grfext Info: Graphics extension search list:
+(grfext)             [.pdf,.png,.jpg,.mps,.jpeg,.jbig2,.jb2,.PDF,.PNG,.JPG,.JPE
+G,.JBIG2,.JB2,.eps]
+(grfext)             \AppendGraphicsExtensions on input line 504.
+
+(/usr/share/texlive/texmf-dist/tex/latex/latexconfig/epstopdf-sys.cfg
+File: epstopdf-sys.cfg 2010/07/13 v1.3 Configuration of (r)epstopdf for TeX Liv
+e
+)))
+Package hyperref Info: Option `colorlinks' set `true' on input line 86.
+
+(/usr/share/texlive/texmf-dist/tex/latex/l3backend/l3backend-pdfmode.def
+File: l3backend-pdfmode.def 2020-02-03 L3 backend support: PDF mode
+\l__kernel_color_stack_int=\count312
+\l__pdf_internal_box=\box108
+)
+(./overview.aux)
+\openout1 = `overview.aux'.
+
+LaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for TS1/cmr/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Checking defaults for PD1/pdf/m/n on input line 87.
+LaTeX Font Info:    ... okay on input line 87.
+LaTeX Font Info:    Trying to load font information for T1+lmr on input line 87
+.
+ (/usr/share/texmf/tex/latex/lm/t1lmr.fd
+File: t1lmr.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+\c@lstlisting=\count313
+
+(/usr/share/texlive/texmf-dist/tex/context/base/mkii/supp-pdf.mkii
+[Loading MPS to PDF converter (version 2006.09.02).]
+\scratchcounter=\count314
+\scratchdimen=\dimen183
+\scratchbox=\box109
+\nofMPsegments=\count315
+\nofMParguments=\count316
+\everyMPshowfont=\toks34
+\MPscratchCnt=\count317
+\MPscratchDim=\dimen184
+\MPnumerator=\count318
+\makeMPintoPDFobject=\count319
+\everyMPtoPDFconversion=\toks35
+)
+*geometry* driver: auto-detecting
+*geometry* detected driver: pdftex
+*geometry* verbose mode - [ preamble ] result:
+* driver: pdftex
+* paper: a4paper
+* layout: <same size as paper>
+* layoutoffset:(h,v)=(0.0pt,0.0pt)
+* modes: 
+* h-part:(L,W,R)=(42.67912pt, 512.14963pt, 42.67912pt)
+* v-part:(T,H,B)=(28.45274pt, 715.18745pt, 101.40665pt)
+* \paperwidth=597.50787pt
+* \paperheight=845.04684pt
+* \textwidth=512.14963pt
+* \textheight=715.18745pt
+* \oddsidemargin=-29.59087pt
+* \evensidemargin=-29.59087pt
+* \topmargin=-80.81725pt
+* \headheight=12.0pt
+* \headsep=25.0pt
+* \topskip=12.0pt
+* \footskip=30.0pt
+* \marginparwidth=44.0pt
+* \marginparsep=10.0pt
+* \columnsep=10.0pt
+* \skip\footins=10.8pt plus 4.0pt minus 2.0pt
+* \hoffset=0.0pt
+* \voffset=0.0pt
+* \mag=1000
+* \@twocolumnfalse
+* \@twosidefalse
+* \@mparswitchfalse
+* \@reversemarginfalse
+* (1in=72.27pt=25.4mm, 1cm=28.453pt)
+
+\AtBeginShipoutBox=\box110
+Package hyperref Info: Link coloring ON on input line 87.
+(/usr/share/texlive/texmf-dist/tex/latex/hyperref/nameref.sty
+Package: nameref 2019/09/16 v2.46 Cross-referencing by name of section
+
+(/usr/share/texlive/texmf-dist/tex/latex/refcount/refcount.sty
+Package: refcount 2019/12/15 v3.6 Data extraction from label references (HO)
+)
+(/usr/share/texlive/texmf-dist/tex/generic/gettitlestring/gettitlestring.sty
+Package: gettitlestring 2019/12/15 v1.6 Cleanup title references (HO)
+)
+\c@section@level=\count320
+)
+LaTeX Info: Redefining \ref on input line 87.
+LaTeX Info: Redefining \pageref on input line 87.
+LaTeX Info: Redefining \nameref on input line 87.
+
+(./overview.out) (./overview.out)
+\@outlinefile=\write4
+\openout4 = `overview.out'.
+
+
+(/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang1.sty
+File: lstlang1.sty 2019/09/10 1.8c listings language file
+)
+(/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang2.sty
+File: lstlang2.sty 2019/09/10 1.8c listings language file
+)
+(/usr/share/texlive/texmf-dist/tex/latex/listings/lstlang3.sty
+File: lstlang3.sty 2019/09/10 1.8c listings language file
+)
+LaTeX Font Info:    Trying to load font information for T1+lmtt on input line 9
+2.
+
+(/usr/share/texmf/tex/latex/lm/t1lmtt.fd
+File: t1lmtt.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    Trying to load font information for OT1+lmr on input line 1
+05.
+
+(/usr/share/texmf/tex/latex/lm/ot1lmr.fd
+File: ot1lmr.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    Trying to load font information for OML+lmm on input line 1
+05.
+
+(/usr/share/texmf/tex/latex/lm/omllmm.fd
+File: omllmm.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    Trying to load font information for OMS+lmsy on input line 
+105.
+
+(/usr/share/texmf/tex/latex/lm/omslmsy.fd
+File: omslmsy.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    Trying to load font information for OMX+lmex on input line 
+105.
+
+(/usr/share/texmf/tex/latex/lm/omxlmex.fd
+File: omxlmex.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    External font `lmex10' loaded for size
+(Font)              <10.95> on input line 105.
+LaTeX Font Info:    External font `lmex10' loaded for size
+(Font)              <8> on input line 105.
+LaTeX Font Info:    External font `lmex10' loaded for size
+(Font)              <6> on input line 105.
+LaTeX Font Info:    Trying to load font information for U+msa on input line 105
+.
+
+(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsa.fd
+File: umsa.fd 2013/01/14 v3.01 AMS symbols A
+)
+LaTeX Font Info:    Trying to load font information for U+msb on input line 105
+.
+
+(/usr/share/texlive/texmf-dist/tex/latex/amsfonts/umsb.fd
+File: umsb.fd 2013/01/14 v3.01 AMS symbols B
+) [1
+
+{/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}]
+LaTeX Font Info:    Trying to load font information for TS1+lmr on input line 1
+47.
+ (/usr/share/texmf/tex/latex/lm/ts1lmr.fd
+File: ts1lmr.fd 2009/10/30 v1.6 Font defs for Latin Modern
+)
+LaTeX Font Info:    External font `lmex10' loaded for size
+(Font)              <12> on input line 147.
+
+Overfull \hbox (1.70935pt too wide) in paragraph at lines 149--151
+[]\T1/lmr/m/n/12 use of an un-bi-ased es-ti-ma-tor of the resid-u-als vari-ance
+ in-stead of the ML-estimator. \T1/lmr/bx/n/12 lavaSearch2
+ []
+
+[2] [3]
+LaTeX Font Info:    Font shape `T1/lmtt/bx/n' in size <14.4> not available
+(Font)              Font shape `T1/lmtt/b/n' tried instead on input line 301.
+ [4] [5] [6] [7] [8] [9] [10
+
+] [11] [12] [13] [14] [15
+
+]
+<./modelsearch.png, id=297, 481.8pt x 481.8pt>
+File: ./modelsearch.png Graphic file (type png)
+<use ./modelsearch.png>
+Package pdftex.def Info: ./modelsearch.png  used on input line 1017.
+(pdftex.def)             Requested size: 460.93153pt x 460.94212pt.
+ [16] [17 <./modelsearch.png>] [18] [19] [20
+
+]
+\Changes@OutFile=\write5
+\openout5 = `overview.loc'.
+
+Package atveryend Info: Empty hook `BeforeClearDocument' on input line 1191.
+ [21]
+Package atveryend Info: Empty hook `AfterLastShipout' on input line 1191.
+ (./overview.aux)
+Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 1191.
+Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 1191.
+
+Package rerunfilecheck Info: File `overview.out' has not changed.
+(rerunfilecheck)             Checksum: 29E8A10B5CA2A6C0793F6129B62EC504;1186.
+Package atveryend Info: Empty hook `AtVeryVeryEnd' on input line 1191.
+ ) 
+Here is how much of TeX's memory you used:
+ 12779 strings out of 483081
+ 180363 string characters out of 5963440
+ 719263 words of memory out of 5000000
+ 27287 multiletter control sequences out of 15000+600000
+ 575121 words of font info for 60 fonts, out of 8000000 for 9000
+ 59 hyphenation exceptions out of 8191
+ 35i,6n,72p,239b,1864s stack positions out of 5000i,500n,10000p,200000b,80000s
+{/usr/share/texmf/fonts/enc/dvips/lm/lm-ec.enc}{/usr/share/texmf/fonts/enc/dv
+ips/lm/lm-mathit.enc}{/usr/share/texmf/fonts/enc/dvips/lm/lm-ts1.enc}{/usr/shar
+e/texmf/fonts/enc/dvips/lm/lm-mathsy.enc}</usr/share/texmf/fonts/type1/public/l
+m/lmbx12.pfb></usr/share/texmf/fonts/type1/public/lm/lmmi12.pfb></usr/share/tex
+mf/fonts/type1/public/lm/lmr12.pfb></usr/share/texmf/fonts/type1/public/lm/lmr1
+7.pfb></usr/share/texmf/fonts/type1/public/lm/lmsy10.pfb></usr/share/texmf/font
+s/type1/public/lm/lmtk10.pfb></usr/share/texmf/fonts/type1/public/lm/lmtt10.pfb
+></usr/share/texmf/fonts/type1/public/lm/lmtt12.pfb>
+Output written on overview.pdf (21 pages, 225505 bytes).
+PDF statistics:
+ 449 PDF objects out of 1000 (max. 8388607)
+ 411 compressed objects within 5 object streams
+ 235 named destinations out of 1000 (max. 500000)
+ 126 words of extra memory for PDF output out of 10000 (max. 10000000)
+
diff --git a/vignettes/overview.org b/inst/doc-software/overview.org
similarity index 66%
rename from vignettes/overview.org
rename to inst/doc-software/overview.org
index 4f88434..ab6cd3d 100644
--- a/vignettes/overview.org
+++ b/inst/doc-software/overview.org
@@ -1,1074 +1,1051 @@
-#+TITLE: Overview of the functionalities of the package lavaSearch2
-#+Author: Brice Ozenne
-#+LaTeX_HEADER: %\VignetteIndexEntry{overview}
-#+LaTeX_HEADER: %\VignetteEngine{R.rsp::tex}
-#+LaTeX_HEADER: %\VignetteKeyword{R}
-#+BEGIN_SRC R :exports none :results output :session *R* :cache no
-options(width = 90)
-#+END_SRC
-
-#+RESULTS:
-
-Load *lavaSearch2* in the R session:
-#+BEGIN_SRC R :exports code :results silent :session *R* :cache no
-library(lavaSearch2)
-#+END_SRC 
-
-* Inference
-** Introductory example
-You may have noticed that for simple linear regression, the p-values
-of the Wald tests from =lm=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-mSim <- lvm(Y[1:1]~0.3*X1+0.2*X2)
-set.seed(10)
-df.data <- sim(mSim, 2e1)
-
-## fit linear model
-summary(lm(Y~X1+X2, data = df.data))$coef
-#+END_SRC
-
-#+RESULTS:
-:              Estimate Std. Error   t value    Pr(>|t|)
-: (Intercept) 0.7967775  0.2506767 3.1785069 0.005495832
-: X1          0.1550938  0.2205080 0.7033477 0.491360483
-: X2          0.4581556  0.2196785 2.0855736 0.052401103
-
-differ from those obtained with the corresponding latent variable
-model estimated by maximum likelihood:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## fit latent variable model
-m <- lvm(Y~X1+X2)
-e <- estimate(m, data = df.data)
-
-## extract Wald tests
-summary(e)$coef
-#+END_SRC
-
-#+RESULTS:
-:       Estimate Std. Error   Z-value      P-value
-: Y~X1 0.1550938  0.2032984 0.7628877 0.4455303456
-: Y~X2 0.4581556  0.2025335 2.2621221 0.0236898575
-: Y~~Y 0.5557910  0.1757566 3.1622777           NA
-: Y    0.7967775  0.2311125 3.4475747 0.0005656439
-
-For instance, the p-value for the effect of X2 is 0.024 in the latent
-variable model and 0.052 in the linear regression. The discrepancy is
-due to 2 corrections that =lm= applies in order to improve the control
-of the type 1 error of the Wald tests:
-- use of a Student \(t\)-distribution instead of a Gaussian
-  distribution (informally using a t-value instead of z-value).
-- use of an unbiased estimator of the residuals variance instead of
-  the ML-estimator.
-*lavaSearch2* attempts to generalize these corrections to models with
-correlated and heteroschedastic measurements. In the case of a simple
-linear regression, Wald tests obtained with *lavaSearch2* exactly
-match the results of =lm=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e)$coef
-#+END_SRC
-
-#+RESULTS:
-:       Estimate Std. Error   t-value    P-value    df
-: Y~X1 0.1550938  0.2205078 0.7033483 0.49136012 17.00
-: Y~X2 0.4581556  0.2196783 2.0855754 0.05240092 17.00
-: Y~~Y 0.6538707  0.2242758 2.9154759         NA  4.25
-: Y    0.7967775  0.2506765 3.1785096 0.00549580 17.00
-
-** How it works in a nutshell
-
-When using *lava*, the p.values that are obtained from the summary
-(Wald tests) rely on a Gaussian approximation and maximum likelihood
-estimation. While being asymptotically valid, they usually do not
-provide a very accurate control of the type 1 error rate in small
-samples. Simulations have shown that the type 1 error rate tends to be
-too large, i.e. the p.values are have a downward bias. *lavaSearch2*
-provides two improvements:
-- using a Student's \(t\)-distribution instead of a Gaussian
-  distribution to account for the uncertainty on the variance of the
-  coefficients. The degrees of freedom are estimated using Satterwaite
-  approximation, i.e. identifying the chi-squared distribution that
-  best fit the observed moments of the variance of the coefficients.
-- (partially) correcting for the first order bias in the ML estimates
-  of the variance parameters. This correction also affects the
-  standard error of the estimates.
-
-** Single univariate Wald test
-
-We will illustrate the functionalities using a simulated dataset:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-mSim <- lvm(Y1~eta,Y2~eta,Y3~0.4+0.4*eta,Y4~0.6+0.6*eta,eta~0.5*X1+0.7*X2)
-latent(mSim) <- ~eta
-set.seed(12)
-df.data <- sim(mSim, n = 3e1, latent = FALSE)
-
-## display
-head(df.data)
-#+END_SRC
-
-#+RESULTS:
-:           Y1         Y2          Y3         Y4         X1         X2
-: 1 -1.7606233  0.1264910  0.66442611  0.2579355  0.2523400 -1.5431527
-: 2  3.0459417  2.4631929  0.00283511  2.1714802  0.6423143 -1.3206009
-: 3 -2.1443162 -0.3318033  0.82253070  0.3008415 -0.3469361 -0.6758215
-: 4 -2.5050328 -1.3878987 -0.10474850 -1.7814956 -0.5152632 -0.3670054
-: 5 -2.5307249  0.3012422  1.22046986 -1.0195188  0.3981689 -0.5138722
-: 6 -0.9521366  0.1669496 -0.21422548  1.5954456  0.9535572 -0.9592540
-
-We first fit the latent variable model using, as usual, the =estimate=
-function:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-m <- lvm(c(Y1,Y2,Y3,Y4)~eta, eta~X1+X2)
-e <- estimate(m, data = df.data)
-#+END_SRC
-
-#+RESULTS:
-
-We can extract the Wald tests based on the traditional approach using
-=summary=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate Std. Error   Z-value      P-value
-: Y2     0.2335412  0.2448593 0.9537775 0.3401962906
-: Y3     0.5114275  0.1785886 2.8637186 0.0041869974
-: Y2~eta 0.9192847  0.2621248 3.5070497 0.0004531045
-: Y3~eta 0.2626930  0.1558978 1.6850339 0.0919820326
-: eta~X1 0.5150072  0.2513393 2.0490515 0.0404570768
-: eta~X2 0.6212222  0.2118930 2.9317729 0.0033703310
-
-As explain at the begining of this section, *lavaSearch2* implements
-two corrections that can be directly applied by calling the =summary2=
-method:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate Std. Error   t-value     P-value        df
-: Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-: Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-: Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-: Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-: eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-: eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-
-To use the Satterthwaite correction alone, set the argument
-  =bias.correct= to =FALSE=:
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e, bias.correct = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate Std. Error   t-value     P-value        df
-: Y2     0.2335412  0.2448593 0.9537775 0.357711941 12.911877
-: Y3     0.5114275  0.1785886 2.8637186 0.008210968 25.780552
-: Y2~eta 0.9192847  0.2621248 3.5070497 0.028396459  3.674640
-: Y3~eta 0.2626930  0.1558978 1.6850339 0.141185621  6.222912
-: eta~X1 0.5150072  0.2513393 2.0490515 0.052814794 21.571210
-: eta~X2 0.6212222  0.2118930 2.9317729 0.006351686 30.370334
-
-When using the Satterthwaite correction alone, the standard error are
-left unchanged compared to the original lava output. The only change
-is how the p-values are computed, i.e. based on the quantiles of a
-Student's \(t\)-distribution instead of a Gaussian distribution. 
-
-To only use the bias correction, set the argument =df= to =FALSE=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e, df = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate Std. Error   t-value      P-value  df
-: Y2     0.2335412  0.2518218 0.9274067 0.3537154044 Inf
-: Y3     0.5114275  0.1828716 2.7966475 0.0051635832 Inf
-: Y2~eta 0.9192847  0.2653220 3.4647887 0.0005306482 Inf
-: Y3~eta 0.2626930  0.1562776 1.6809386 0.0927748494 Inf
-: eta~X1 0.5150072  0.2642257 1.9491180 0.0512813393 Inf
-: eta~X2 0.6212222  0.2221293 2.7966698 0.0051632271 Inf
-
-
-** Saving computation time with =sCorrect=
-For each call to =summary2= the small sample size correction(s) will
-be recalculated. However the calculation of the sample correction(s)
-can be time consuming.
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-system.time(
-    res <- summary2(e, bias.correct = FALSE)
-)
-#+END_SRC
-
-#+RESULTS:
-:    user  system elapsed 
-:    0.25    0.00    0.25
-
-In such a case one can pre-compute the main terms of the correction
-(e.g. the derivative of the variance-covariance matrix) once for all
-using the =sCorrect= method (=sCorrect= stands for Satterthwaite
-correction). When calling =sCorrect=, the right hand side indicates
-whether the bias correction should be used (equivalent to
-=bias.correct= argument described previously):
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-e2 <- e
-sCorrect(e2) <- TRUE
-#+END_SRC
-
-#+RESULTS:
-
-=sCorrect= automatically store the pre-computed terms in the =sCorrect=
-slot of the object. It also adds the class =lvmfit2= to the object:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-class(e2)
-#+END_SRC
-
-#+RESULTS:
-: [1] "lvmfit2" "lvmfit"
-
-Then p-values computed using the small sample correction can be
-obtained calling the =summary= method, as usual:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e2)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate Std. Error   t-value     P-value        df
-: Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-: Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-: Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-: Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-: eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-: eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-
-The =summary2= methods take approximately the same time as the usual
-=summary= method:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-system.time(
-    summary2(e2)
-)
-#+END_SRC
-
-#+RESULTS:
-:    user  system elapsed 
-:    0.19    0.00    0.19
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-system.time(
-    summary(e2)
-)
-#+END_SRC
-
-#+RESULTS:
-:    user  system elapsed 
-:    0.15    0.00    0.16
-
-** Single multivariate Wald test
-
-The function =compare= from the lava package can be use to perform
-multivariate Wald tests, i.e. to test simultaneously several linear
-combinations of the coefficients.  =compare= uses a contrast matrix to
-encode in lines which linear combination of coefficients should be
-tested. For instance if we want to simultaneously test whether all the
-mean coefficients are 0, we can create a contrast matrix using
-=createContrast=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resC <- createContrast(e2, par = c("Y2=0","Y2~eta=0","eta~X1=0"))
-resC
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-$contrast
-             Y2 Y3 Y4 eta Y2~eta Y3~eta Y4~eta eta~X1 eta~X2 Y1~~Y1 Y2~~Y2 Y3~~Y3 Y4~~Y4
-[Y2] = 0      1  0  0   0      0      0      0      0      0      0      0      0      0
-[Y2~eta] = 0  0  0  0   0      1      0      0      0      0      0      0      0      0
-[eta~X1] = 0  0  0  0   0      0      0      0      1      0      0      0      0      0
-             eta~~eta
-[Y2] = 0            0
-[Y2~eta] = 0        0
-[eta~X1] = 0        0
-
-$null
-    [Y2] = 0 [Y2~eta] = 0 [eta~X1] = 0 
-           0            0            0 
-
-$Q
-[1] 3
-#+end_example
-
-We can then test the linear hypothesis by specifying in =compare= the
-left hand side of the hypothesis (argument contrast) and the right
-hand side (argument null):
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resTest0 <- lava::compare(e2, contrast = resC$contrast, null = resC$null)
-resTest0
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-chisq = 21.332, df = 3, p-value = 8.981e-05
-sample estimates:
-          Estimate   Std.Err       2.5%     97.5%
-[Y2]     0.2335412 0.2448593 -0.2463741 0.7134566
-[Y2~eta] 0.9192847 0.2621248  0.4055295 1.4330399
-[eta~X1] 0.5150072 0.2513393  0.0223912 1.0076231
-#+end_example
-
-=compare= uses a chi-squared distribution to compute the p-values.
-Similarly to the Gaussian approximation, while being valid
-asymptotically this procedure may not provide a very accurate control
-of the type 1 error rate in small samples. Fortunately, the correction
-proposed for the univariate Wald statistic can be adapted to the
-multivariate Wald statistic. This is achieved by =compare2=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resTest1 <- compare2(e2, contrast = resC$contrast, null = resC$null)
-resTest1
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 6.7118, df1 = 3, df2 = 11.1, p-value = 0.007596
-sample estimates:
-              Estimate   Std.Err        df       2.5%     97.5%
-[Y2] = 0     0.2335412 0.2518218 12.328385 -0.3135148 0.7805973
-[Y2~eta] = 0 0.9192847 0.2653220  3.515034  0.1407653 1.6978041
-[eta~X1] = 0 0.5150072 0.2642257 20.044312 -0.0360800 1.0660943
-#+end_example
-
-The same result could have been obtained using the par argument to
-define the linear hypothesis:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resTest2 <- compare2(e2, par = c("Y2","Y2~eta","eta~X1"))
-identical(resTest1,resTest2)
-#+END_SRC
-
-#+RESULTS:
-: [1] TRUE
-
-Now a F-distribution is used to compute the p-values. As before on can
-set the argument =bias.correct= to =FALSE= to use the Satterthwaite
-approximation alone:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resTest3 <- compare2(e, bias.correct = FALSE, 
-                      contrast = resC$contrast, null = resC$null)
-resTest3
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 7.1107, df1 = 3, df2 = 11.13, p-value = 0.006182
-sample estimates:
-              Estimate   Std.Err       df         2.5%     97.5%
-[Y2] = 0     0.2335412 0.2448593 12.91188 -0.295812256 0.7628948
-[Y2~eta] = 0 0.9192847 0.2621248  3.67464  0.165378080 1.6731913
-[eta~X1] = 0 0.5150072 0.2513393 21.57121 -0.006840023 1.0368543
-#+end_example
-
-In this case the F-statistic of =compare2= is the same as the
-chi-squared statistic of =compare= divided by the rank of the contrast matrix:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resTest0$statistic/qr(resC$contrast)$rank
-#+END_SRC
-
-#+RESULTS:
-:    chisq 
-: 7.110689
-
-** Robust Wald tests
-
-When one does not want to assume normality distributed residuals,
-robust standard error can be used instead of the model based standard
-errors. They can be obtain by setting the argument =robust= to =TRUE=
-when computing univariate Wald tests:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary2(e, robust = TRUE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-#+END_SRC
-
-#+RESULTS:
-:         Estimate robust SE   t-value      P-value       df
-: Y2     0.2335412 0.2353245 0.9924222 0.3340117610 18.18841
-: Y3     0.5114275 0.1897160 2.6957535 0.0099985389 42.79555
-: Y2~eta 0.9192847 0.1791240 5.1321150 0.0002361186 12.19058
-: Y3~eta 0.2626930 0.1365520 1.9237585 0.0653095551 26.20919
-: eta~X1 0.5150072 0.2167580 2.3759546 0.0315112789 14.74859
-: eta~X2 0.6212222 0.2036501 3.0504389 0.0035239307 54.54181
-
-or multivariate Wald test:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-compare2(e2, robust = TRUE, par = c("Y2","Y2~eta","eta~X1"))
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 12.526, df1 = 3, df2 = 23.97, p-value = 3.981e-05
-sample estimates:
-              Estimate robust SE       df        2.5%     97.5%
-[Y2] = 0     0.2335412 0.2353245 18.18841 -0.26049031 0.7275728
-[Y2~eta] = 0 0.9192847 0.1791240 12.19058  0.52968275 1.3088867
-[eta~X1] = 0 0.5150072 0.2167580 14.74859  0.05231154 0.9777028
-#+end_example
-
-Only the standard error is affected by the argument =robust=, the
-degrees of freedom are the one of the model-based standard errors.  It
-may be surprising that the (corrected) robust standard errors are (in
-this example) smaller than the (corrected) model-based one. This is
-also the case for the uncorrected one:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-rbind(robust = diag(crossprod(iid(e2))),
-      model = diag(vcov(e2)))
-#+END_SRC
-
-#+RESULTS:
-:                Y2         Y3         Y4        eta     Y2~eta     Y3~eta     Y4~eta
-: robust 0.04777252 0.03325435 0.03886706 0.06011727 0.08590732 0.02179453 0.02981895
-: model  0.05995606 0.03189389 0.04644303 0.06132384 0.06870941 0.02430412 0.03715633
-:            eta~X1     eta~X2    Y1~~Y1    Y2~~Y2     Y3~~Y3     Y4~~Y4  eta~~eta
-: robust 0.05166005 0.05709393 0.2795272 0.1078948 0.03769614 0.06923165 0.3198022
-: model  0.06317144 0.04489865 0.1754744 0.1600112 0.05112998 0.10152642 0.2320190
-
-This may be explained by the fact the robust standard error tends to
-be liberal in small samples (e.g. see Kauermann 2001, A Note on the
-Efficiency of Sandwich Covariance Matrix Estimation ).
-
-** Assessing the type 1 error of the testing procedure
-
-The function =calibrateType1= can be used to assess the type 1 error
-of a Wald statistic on a specific example. This however assumes that
-the estimated model is correctly specified. Let's make an example. For
-this we simulate some data:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-set.seed(10)
-m.generative <- lvm(Y ~ X1 + X2 + Gene)
-categorical(m.generative, labels = c("ss","ll")) <- ~Gene
-d <- lava::sim(m.generative, n = 50, latent = FALSE)
-#+END_SRC
-
-#+RESULTS:
-
-Let's now imagine that we want to analyze the relationship between
-Y and Gene using the following dataset:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-head(d)
-#+END_SRC
-
-#+RESULTS:
-:             Y         X1         X2 Gene
-: 1 -1.14369572 -0.4006375 -0.7618043   ss
-: 2 -0.09943370 -0.3345566  0.4193754   ss
-: 3 -0.04331996  1.3679540 -1.0399434   ll
-: 4  2.25017335  2.1377671  0.7115740   ss
-: 5  0.16715138  0.5058193 -0.6332130   ss
-: 6  1.73931135  0.7863424  0.5631747   ss
-
-For this we fit define a LVM:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-myModel <- lvm(Y ~ X1 + X2 + Gene)
-#+END_SRC
-
-#+RESULTS:
-
-and estimate the coefficients of the model using =estimate=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-e <- estimate(myModel, data = d)
-e
-#+END_SRC
-
-#+RESULTS:
-:                     Estimate Std. Error  Z-value  P-value
-: Regressions:                                             
-:    Y~X1              1.02349    0.12017  8.51728   <1e-12
-:    Y~X2              0.91519    0.12380  7.39244   <1e-12
-:    Y~Genell          0.48035    0.23991  2.00224  0.04526
-: Intercepts:                                              
-:    Y                -0.11221    0.15773 -0.71141   0.4768
-: Residual Variances:                                      
-:    Y                 0.67073    0.13415  5.00000
-
-We can now use =calibrateType1= to perform a simulation study. We just
-need to define the null hypotheses (i.e. which coefficients should be
-set to 0 when generating the data) and the number of simulations:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-mySimulation <- calibrateType1(e, 
-                               param = "Y~Genell",
-                               n.rep = 50, 
-                               trace = FALSE, seed = 10)
-#+END_SRC
-
-#+RESULTS:
-
-To save time we only make 50 simulations but much more are necessary
-to really assess the type 1 error rate. Then we can use the =summary=
-method to display the results:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary(mySimulation)
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-Estimated type 1 error rate [95% confidence interval] 
-  > sample size: 50 | number of simulations: 50
-     link statistic correction type1error                  CI
- Y~Genell      Wald       Gaus       0.12 [0.05492 ; 0.24242]
-                          Satt       0.10 [0.04224 ; 0.21869]
-                           SSC       0.10 [0.04224 ; 0.21869]
-                    SSC + Satt       0.08 [0.03035 ; 0.19456]
-
-Corrections: Gaus = Gaussian approximation 
-             SSC  = small sample correction 
-             Satt = Satterthwaite approximation
-#+end_example
-
-
-\clearpage
-
-* Adjustment for multiple comparisons
-** Univariate Wald test, single model
-
-When performing multiple testing, adjustment for multiple comparisons
-is necessary in order to control the type 1 error rate, i.e. to
-provide interpretable p.values. The *multcomp* package enables to do
-such adjustment when all tests comes from the same =lvmfit= object:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-mSim <- lvm(Y ~ 0.25 * X1 + 0.3 * X2 + 0.35 * X3 + 0.4 * X4 + 0.45 * X5 + 0.5 * X6)
-set.seed(10)
-df.data <- sim(mSim, n = 4e1)
-
-## fit lvm
-e.lvm <- estimate(lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-name.coef <- names(coef(e.lvm))
-n.coef <- length(name.coef)
-
-## Create contrast matrix
-resC <- createContrast(e.lvm, par = paste0("Y~X",1:6), rowname.rhs = FALSE)
-resC$contrast
-#+END_SRC
-
-#+RESULTS:
-:      Y Y~X1 Y~X2 Y~X3 Y~X4 Y~X5 Y~X6 Y~~Y
-: Y~X1 0    1    0    0    0    0    0    0
-: Y~X2 0    0    1    0    0    0    0    0
-: Y~X3 0    0    0    1    0    0    0    0
-: Y~X4 0    0    0    0    1    0    0    0
-: Y~X5 0    0    0    0    0    1    0    0
-: Y~X6 0    0    0    0    0    0    1    0
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-e.glht <- multcomp::glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht)
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error z value Pr(>|z|)   
-Y~X1 == 0   0.3270     0.1589   2.058  0.20725   
-Y~X2 == 0   0.4025     0.1596   2.523  0.06611 . 
-Y~X3 == 0   0.5072     0.1383   3.669  0.00144 **
-Y~X4 == 0   0.3161     0.1662   1.902  0.28582   
-Y~X5 == 0   0.3875     0.1498   2.586  0.05554 . 
-Y~X6 == 0   0.3758     0.1314   2.859  0.02482 * 
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-#+end_example
-
-Note that this correction relies on the Gaussian approximation. To use
-small sample corrections implemented in *lavaSearch2*, just call
-=glht2= instead of =glht=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-e.glht2 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht2)
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.3290  
-Y~X2 == 0   0.4025     0.1757   2.291   0.1482  
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.4128  
-Y~X5 == 0   0.3875     0.1650   2.349   0.1315  
-Y~X6 == 0   0.3758     0.1447   2.597   0.0762 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-#+end_example
-
-The single step method is the appropriate correction when one wants to
-report the most significant p-value relative to a set of
-hypotheses. If the second most significant p-value is also to be
-reported then the method "free" is more efficient:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary(e.glht2, test = multcomp::adjusted("free"))
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.1291  
-Y~X2 == 0   0.4025     0.1757   2.291   0.0913 .
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.1291  
-Y~X5 == 0   0.3875     0.1650   2.349   0.0913 .
-Y~X6 == 0   0.3758     0.1447   2.597   0.0645 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- free method)
-#+end_example
-
-See the book: "Multiple Comparisons Using R" by Frank Bretz, Torsten
-Hothorn, and Peter Westfall (2011, CRC Press) for details about the
-theory underlying the *multcomp* package.
-
-** Univariate Wald test, multiple models
-
-Pipper et al. in "A Versatile Method for Confirmatory Evaluation of
-the Effects of a Covariate in Multiple Models" (2012, Journal of the
-Royal Statistical Society, Series C) developed a method to assess the
-effect of an exposure on several outcomes when a different model is
-fitted for each outcome. This method has been implemented in the =mmm=
-function from the *multcomp* package for glm and Cox
-models. *lavaSearch2* extends it to =lvm=. 
-
-Let's consider an example where we wish to assess the treatment effect
-on three outcomes X, Y, and Z. We have at hand three measurements
-relative to outcome Z for each individual:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-mSim <- lvm(X ~ Age + 0.5*Treatment,
-            Y ~ Gender + 0.25*Treatment,
-            c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
-            Age[40:5]~1)
-latent(mSim) <- ~eta
-categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
-categorical(mSim, labels = c("male","female")) <- ~Gender
-
-n <- 5e1
-set.seed(10)
-df.data <- sim(mSim, n = n, latent = FALSE)
-head(df.data)
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-         X      Age Treatment          Y Gender         Z1         Z2          Z3
-1 39.12289 39.10415   placebo  0.6088958 female  1.8714112  2.2960633 -0.09326935
-2 39.56766 39.25191      SSRI  1.0001325 female  0.9709943  0.6296226  1.31035910
-3 41.68751 43.05884   placebo  2.1551047 female -1.1634011 -0.3332927 -1.30769267
-4 44.68102 44.78019      SSRI  0.3852728 female -1.0305476  0.6678775  0.99780139
-5 41.42559 41.13105   placebo -0.8666783   male -1.6342816 -0.8285492  1.20450488
-6 42.64811 41.75832      SSRI -1.0710170 female -1.2198019 -1.9602130 -1.85472132
-   treatment
-1  1.1639675
-2 -1.5233846
-3 -2.5183351
-4 -0.7075292
-5 -0.2874329
-6 -0.4353083
-#+end_example
-
-We fit a model specific to each outcome:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-lmX <- lm(X ~ Age + Treatment, data = df.data)
-lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
-lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ 1*eta, eta ~ -1 + Treatment), 
-                 data = df.data)
-#+END_SRC
-
-#+RESULTS:
-
-and combine them into a list of =lvmfit= objects:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-mmm.lvm <- multcomp::mmm(X = lmX, Y = lvmY, Z = lvmZ)
-#+END_SRC
-
-#+RESULTS:
-
-We can then generate a contrast matrix to test each coefficient
-related to the treatment:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resC <- createContrast(mmm.lvm, var.test = "Treatment", add.variance = TRUE)
-resC$contrast
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-                     X: (Intercept) X: Age X: TreatmentSSRI X: sigma2 Y: Y
-X: TreatmentSSRI                  0      0                1         0    0
-Y: Y~TreatmentSSRI                0      0                0         0    0
-Z: eta~TreatmentSSRI              0      0                0         0    0
-                     Y: Y~Genderfemale Y: Y~TreatmentSSRI Y: Y~~Y Z: Z1 Z: Z2 Z: Z3
-X: TreatmentSSRI                     0                  0       0     0     0     0
-Y: Y~TreatmentSSRI                   0                  1       0     0     0     0
-Z: eta~TreatmentSSRI                 0                  0       0     0     0     0
-                     Z: eta~TreatmentSSRI Z: Z1~~Z1 Z: Z2~~Z2 Z: Z3~~Z3 Z: eta~~eta
-X: TreatmentSSRI                        0         0         0         0           0
-Y: Y~TreatmentSSRI                      0         0         0         0           0
-Z: eta~TreatmentSSRI                    1         0         0         0           0
-#+end_example
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-lvm.glht2 <- glht2(mmm.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(lvm.glht2)
-#+END_SRC
-
-#+RESULTS:
-: 
-: 	 Simultaneous Tests for General Linear Hypotheses
-: 
-: Linear Hypotheses:
-:                           Estimate Std. Error t value Pr(>|t|)
-: X: TreatmentSSRI == 0       0.4661     0.2533   1.840    0.187
-: Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074    0.117
-: Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407    0.393
-: (Adjusted p values reported -- single-step method)
-
-This can be compared to the unadjusted p.values:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-summary(lvm.glht2, test = multcomp::univariate())
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Linear Hypotheses:
-                          Estimate Std. Error t value Pr(>|t|)  
-X: TreatmentSSRI == 0       0.4661     0.2533   1.840   0.0720 .
-Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074   0.0435 *
-Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407   0.1659  
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Univariate p values reported)
-#+end_example
-
-
-\clearpage 
-
-* Model diagnostic
-** Detection of local dependencies
-
-The =modelsearch= function of *lava* is a diagnostic tool for latent
-variable models. It enables to search for local dependencies
-(i.e. model misspecification) and add them to the model. Obviously it
-is a data-driven procedure and its usefulness can be discussed,
-especially in small samples:
-- the procedure is instable, i.e. is likely to lead to two different
-  models when applied on two different dataset sampled from the same
-  generative model.
-- it is hard to define a meaningful significance threshold since
-  p-values should be adjusted for multiple comparisons and sequential
-  testing. However traditional methods like Bonferroni-Holm tend to
-  over corrected and therefore reduce the power of the procedure since
-  they assume that the test are independent.
-
-The function =modelsearch2= in *lavaSearch2* partially solves the
-second issue by adjusting the p-values for multiple testing. Let's see
-an example:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-mSim <- lvm(c(y1,y2,y3)~u, u~x1+x2)
-latent(mSim) <- ~u
-covariance(mSim) <- y2~y3
-transform(mSim, Id~u) <- function(x){1:NROW(x)}
-set.seed(10)
-df.data <- lava::sim(mSim, n = 125, latent = FALSE)
-head(df.data)
-#+END_SRC
-
-#+RESULTS:
-:           y1           y2         y3         x1         x2 Id
-: 1  5.5071523  4.883752014  6.2928016  0.8694750  2.3991549  1
-: 2 -0.6398644  0.025832617  0.5088030 -0.6800096 -0.0898721  2
-: 3 -2.5835495 -2.616715027 -2.8982645  0.1732145 -0.8216484  3
-: 4 -2.5312637 -2.518185427 -2.9015033 -0.1594380 -0.2869618  4
-: 5  1.6346220 -0.001877577  0.3705181  0.7934994  0.1312789  5
-: 6  0.4939972  1.759884014  1.5010499  1.6943505 -1.0620840  6
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## fit model
-m <- lvm(c(y1,y2,y3)~u, u~x1)
-latent(m) <- ~u
-addvar(m) <- ~x2 
-e.lvm <- estimate(m, data = df.data)
-#+END_SRC
-
-#+RESULTS:
-
-=modelsearch2= can be used to sequentially apply the =modelsearch=
-function with a given correction for the p.values:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resScore <- modelsearch2(e.lvm, alpha = 0.1, trace = FALSE)
-displayScore <- summary(resScore)
-#+END_SRC
-
-#+RESULTS:
-: Sequential search for local dependence using the score statistic 
-: The variable selection procedure retained 2 variables:
-:     link statistic      p.value adjusted.p.value dp.Info selected nTests
-: 1   u~x2  6.036264 1.577228e-09     5.008615e-08       1     TRUE     10
-: 2 y2~~y3  2.629176 8.559198e-03     6.055947e-02       1     TRUE      9
-: 3  y3~x1  1.770997 7.656118e-02     2.814424e-01       1    FALSE      8
-: Confidence level: 0.9 (two sided, adjustement: fastmax)
-
-This indeed matches the highest score statistic found by
-=modelsearch=:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resScore0 <- modelsearch(e.lvm, silent = TRUE)
-c(statistic = sqrt(max(resScore0$test[,"Test Statistic"])), 
-  p.value = min(resScore0$test[,"P-value"]))
-#+END_SRC
-
-#+RESULTS:
-:    statistic      p.value 
-: 6.036264e+00 1.577228e-09
-
-We can compare the adjustment using the max distribution to bonferroni:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-data.frame(link = displayScore$table[,"link"],
-           none = displayScore$table[,"p.value"],
-           bonferroni = displayScore$table[,"p.value"]*displayScore$table[1,"nTests"],
-           max = displayScore$table[,"adjusted.p.value"])
-#+END_SRC
-
-#+RESULTS:
-:     link         none   bonferroni          max
-: 1   u~x2 1.577228e-09 1.577228e-08 5.008615e-08
-: 2 y2~~y3 8.559198e-03 8.559198e-02 6.055947e-02
-: 3  y3~x1 7.656118e-02 7.656118e-01 2.814424e-01
-
-In theory, the correction based on the max statistic should give a p
-value that is smaller or equal than the p value adjusted using
-Bonferroni. However for for very small p-values, the max-correction
-can be numerically inaccurate and result in p-values that are slightly
-larger. The evolution of the estimation of a given coefficient across
-the sequential search can be displayed using =autoplot=:
-   
-#+BEGIN_SRC R :results graphics :file "c:/Users/hpl802/Documents/GitHub/lavaSearch2/vignettes/modelsearch.png" :exports results :session *R* :cache no
-autoplot(resScore, param = "u~x1")
-#+END_SRC
-
-#+RESULTS:
-[[file:./modelsearch.png]]
-
-In many cases, all links are not plausible so the user should
-indicates which links should be investigated by =modelsearch2=. This
-can be done via the argument =link=:
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-resRed <- modelsearch2(e.lvm, link = c("y1~~y2","y1~~y3","y2~~y3"), trace = FALSE)
-print(resRed)
-#+END_SRC
-
-#+RESULTS:
-: Sequential search for local dependence using the score statistic 
-: The variable selection procedure did not retain any variable 
-:     link statistic    p.value adjusted.p.value dp.Info selected nTests
-: 1 y1~~y3  1.754102 0.07941299        0.1818963       1    FALSE      3
-: Confidence level: 0.95 (two sided, adjustement: fastmax)
-
-The function =findNewLink= can help the user to identify the set of
-relevant links:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-findNewLink(e.lvm$model, type = "covariance")$link
-#+END_SRC
-
-#+RESULTS:
-: [1] "y1~~y2" "y1~~y3" "y2~~y3"
-
-** Checking that the names of the variables in the model match those of the data
-
-When estimating latent variable models using *lava*, it sometimes
-happens that the model does not converge:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-set.seed(10)
-df.data <- sim(lvm(Y~X1+X2), 1e2)
-
-## fit model
-mWrong <- lvm(Y ~ X + X2)
-eWrong <- estimate(mWrong, data = df.data)
-#+END_SRC
-
-#+RESULTS:
-: Warning messages:
-: 1: In estimate.lvm(mWrong, data = df.data) :
-:   Lack of convergence. Increase number of iteration or change starting values.
-: 2: In sqrt(diag(asVar)) : NaNs produced
-
- This can have several reasons:
-- the model is not identifiable.
-- the optimization routine did not managed to find a local
-  optimum. This may happen for complex latent variable model where the
-  objective function is not convex or locally convex.
-- the user has made a mistake when defining the model or has not given
-  the appropriate dataset.
-
-The =checkData= function enables to check the last point. It compares
-the observed variables defined in the model and the one given by the
-dataset. In case of mismatch it returns a message:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-checkData(mWrong, df.data)
-#+END_SRC
-
-#+RESULTS:
-: Missing variable in data: X
- 
-In presence of latent variables, the user needs to explicitely define
-them in the model, otherwise =checkData= will identify them as an
-issue:
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-## simulate data
-set.seed(10)
-mSim <- lvm(c(Y1,Y2,Y3)~eta)
-latent(mSim) <- ~eta
-df.data <- sim(mSim, n = 1e2, latent = FALSE)
-
-## fit model
-m <- lvm(c(Y1,Y2,Y3)~eta)
-checkData(m, data = df.data)
-#+END_SRC
-
-#+RESULTS:
-: Missing variable in data: eta
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-latent(m) <- ~eta
-checkData(m, data = df.data)
-#+END_SRC
-
-#+RESULTS:
-: No issue detected
-
-
-\clearpage
-
-* Information about the R session used for this document
-
-#+BEGIN_SRC R :exports both :results output :session *R* :cache no
-sessionInfo()
-#+END_SRC
-
-#+RESULTS:
-#+begin_example
-R version 3.5.1 (2018-07-02)
-Platform: x86_64-w64-mingw32/x64 (64-bit)
-Running under: Windows 7 x64 (build 7601) Service Pack 1
-
-Matrix products: default
-
-locale:
-[1] LC_COLLATE=Danish_Denmark.1252  LC_CTYPE=Danish_Denmark.1252   
-[3] LC_MONETARY=Danish_Denmark.1252 LC_NUMERIC=C                   
-[5] LC_TIME=Danish_Denmark.1252    
-
-attached base packages:
-[1] stats     graphics  grDevices utils     datasets  methods   base     
-
-other attached packages:
-[1] lavaSearch2_1.5.1 lava_1.6.4        ggplot2_3.1.0    
-
-loaded via a namespace (and not attached):
- [1] Rcpp_1.0.0        pillar_1.3.1      compiler_3.5.1    plyr_1.8.4       
- [5] bindr_0.1.1       tools_3.5.1       tibble_2.0.1      gtable_0.2.0     
- [9] lattice_0.20-35   pkgconfig_2.0.2   rlang_0.3.1       Matrix_1.2-14    
-[13] parallel_3.5.1    mvtnorm_1.0-8     bindrcpp_0.2.2    withr_2.1.2      
-[17] dplyr_0.7.8       stringr_1.3.1     grid_3.5.1        tidyselect_0.2.5 
-[21] glue_1.3.0        R6_2.3.0          survival_2.42-6   multcomp_1.4-8   
-[25] TH.data_1.0-9     purrr_0.3.0       reshape2_1.4.3    magrittr_1.5     
-[29] scales_1.0.0      codetools_0.2-15  MASS_7.3-50       splines_3.5.1    
-[33] assertthat_0.2.0  colorspace_1.3-2  numDeriv_2016.8-1 labeling_0.3     
-[37] sandwich_2.5-0    stringi_1.2.4     lazyeval_0.2.1    munsell_0.5.0    
-[41] crayon_1.3.4      zoo_1.8-4
-#+end_example
-
-* CONFIG :noexport:
-#+LANGUAGE:  en
-#+LaTeX_CLASS: org-article
-#+LaTeX_CLASS_OPTIONS: [12pt]
-#+OPTIONS:   title:t author:t toc:nil todo:nil
-#+OPTIONS:   H:3 num:t 
-#+OPTIONS:   TeX:t LaTeX:t
-
-** Code
-#+PROPERTY: header-args :session *R*
-#+PROPERTY: header-args :tange yes % extract source code: http://orgmode.org/manual/Extracting-source-code.html
-#+PROPERTY: header-args :cache no 
-#+LATEX_HEADER: \RequirePackage{fancyvrb}
-#+LATEX_HEADER: \DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\small,formatcom = {\color[rgb]{0.5,0,0}}}
-
-** Display 
-#+LaTeX_HEADER: \geometry{a4paper, left=15mm, right=15mm}
-
-#+LATEX_HEADER: \RequirePackage{colortbl} % arrayrulecolor to mix colors
-#+LATEX_HEADER: \RequirePackage{setspace} % to modify the space between lines - incompatible with footnote in beamer
-#+LaTeX_HEADER:\usepackage{authblk} % enable several affiliations (clash with beamer)
-#+LaTeX_HEADER:\renewcommand{\baselinestretch}{1.1}
-#+LATEX_HEADER:\geometry{top=1cm}
-
-** List
-#+LaTeX_HEADER: \usepackage{enumitem}
-
-** Notations
-#+LATEX_HEADER: \RequirePackage{xspace} % 
-#+LATEX_HEADER: \newcommand\Rlogo{\textbf{\textsf{R}}\xspace} % 
-
-** Image
-#+LATEX_HEADER: \RequirePackage{epstopdf} % to be able to convert .eps to .pdf image files
+#+TITLE: Overview of the functionalities of the package lavaSearch2
+#+Author: Brice Ozenne
+#+LaTeX_HEADER: %\VignetteIndexEntry{overview}
+#+LaTeX_HEADER: %\VignetteEngine{R.rsp::tex}
+#+LaTeX_HEADER: %\VignetteKeyword{R}
+#+BEGIN_SRC R :exports none :results output :session *R* :cache no
+options(width = 90)
+#+END_SRC
+
+#+RESULTS:
+
+Load *lavaSearch2* in the R session:
+#+BEGIN_SRC R :exports code :results silent :session *R* :cache no
+library(lavaSearch2)
+#+END_SRC 
+
+* Inference
+** Introductory example
+You may have noticed that for simple linear regression, the p-values
+of the Wald tests from =lm=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+mSim <- lvm(Y[1:1]~0.3*X1+0.2*X2)
+set.seed(10)
+df.data <- sim(mSim, 2e1)
+
+## fit linear model
+summary(lm(Y~X1+X2, data = df.data))$coef
+#+END_SRC
+
+#+RESULTS:
+:              Estimate Std. Error   t value    Pr(>|t|)
+: (Intercept) 0.7967775  0.2506767 3.1785069 0.005495832
+: X1          0.1550938  0.2205080 0.7033477 0.491360483
+: X2          0.4581556  0.2196785 2.0855736 0.052401103
+
+differ from those obtained with the corresponding latent variable
+model estimated by maximum likelihood:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## fit latent variable model
+m <- lvm(Y~X1+X2)
+e <- estimate(m, data = df.data)
+
+## extract Wald tests
+summary(e)$coef
+#+END_SRC
+
+#+RESULTS:
+:       Estimate Std. Error   Z-value      P-value
+: Y~X1 0.1550938  0.2032984 0.7628877 0.4455303456
+: Y~X2 0.4581556  0.2025335 2.2621221 0.0236898575
+: Y~~Y 0.5557910  0.1757566 3.1622777           NA
+: Y    0.7967775  0.2311125 3.4475747 0.0005656439
+
+For instance, the p-value for the effect of X2 is 0.024 in the latent
+variable model and 0.052 in the linear regression. The discrepancy is
+due to 2 corrections that =lm= applies in order to improve the control
+of the type 1 error of the Wald tests:
+- use of a Student \(t\)-distribution instead of a Gaussian
+  distribution (informally using a t-value instead of z-value).
+- use of an unbiased estimator of the residuals variance instead of
+  the ML-estimator.  *lavaSearch2* attempts to generalize these
+corrections to models with correlated and heteroschedastic
+measurements. In the case of a simple linear regression, Wald tests
+obtained with *lavaSearch2* match almost exactly those of =lm=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary2(e)$coef
+#+END_SRC
+
+#+RESULTS:
+:       estimate        se statistic    df     p.value
+: Y    0.7967775 0.2506766 3.1785073 17.00 0.005495827
+: Y~X1 0.1550938 0.2205080 0.7033478 17.00 0.491360428
+: Y~X2 0.4581556 0.2196784 2.0855738 17.00 0.052401076
+: Y~~Y 0.6538716 0.2242761        NA  4.25          NA
+
+** How it works in a nutshell
+
+When using *lava*, the p.values that are obtained from the summary
+(Wald tests) rely on a Gaussian approximation and maximum likelihood
+estimation. While being asymptotically valid, they usually do not
+provide a very accurate control of the type 1 error rate in small
+samples. Simulations have shown that the type 1 error rate tends to be
+too large, i.e. the p.values are have a downward bias. *lavaSearch2*
+provides two improvements:
+- using a Student's \(t\)-distribution instead of a Gaussian
+  distribution to account for the uncertainty on the variance of the
+  coefficients. The degrees of freedom are estimated using Satterwaite
+  approximation, i.e. identifying the chi-squared distribution that
+  best fit the observed moments of the variance of the coefficients.
+- (partially) correcting for the first order bias in the ML estimates
+  of the variance parameters. This correction also affects the
+  standard error of the estimates.
+
+** Single univariate Wald test
+
+We will illustrate the functionalities using a simulated dataset:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+mSim <- lvm(Y1~eta,Y2~eta,Y3~0.4+0.4*eta,Y4~0.6+0.6*eta,eta~0.5*X1+0.7*X2)
+latent(mSim) <- ~eta
+set.seed(12)
+df.data <- sim(mSim, n = 3e1, latent = FALSE)
+
+## display
+head(df.data)
+#+END_SRC
+
+#+RESULTS:
+:           Y1         Y2          Y3         Y4         X1         X2
+: 1 -1.7606233  0.1264910  0.66442611  0.2579355  0.2523400 -1.5431527
+: 2  3.0459417  2.4631929  0.00283511  2.1714802  0.6423143 -1.3206009
+: 3 -2.1443162 -0.3318033  0.82253070  0.3008415 -0.3469361 -0.6758215
+: 4 -2.5050328 -1.3878987 -0.10474850 -1.7814956 -0.5152632 -0.3670054
+: 5 -2.5307249  0.3012422  1.22046986 -1.0195188  0.3981689 -0.5138722
+: 6 -0.9521366  0.1669496 -0.21422548  1.5954456  0.9535572 -0.9592540
+
+We first fit the latent variable model using, as usual, the =estimate=
+function:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+m <- lvm(c(Y1,Y2,Y3,Y4)~eta, eta~X1+X2)
+e <- estimate(m, data = df.data)
+#+END_SRC
+
+#+RESULTS:
+
+We can extract the Wald tests based on the traditional approach using
+=summary=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+#+END_SRC
+
+#+RESULTS:
+:         Estimate Std. Error   Z-value      P-value
+: Y2     0.2335412  0.2448593 0.9537775 0.3401962906
+: Y3     0.5114275  0.1785886 2.8637186 0.0041869974
+: Y2~eta 0.9192847  0.2621248 3.5070497 0.0004531045
+: Y3~eta 0.2626930  0.1558978 1.6850339 0.0919820326
+: eta~X1 0.5150072  0.2513393 2.0490515 0.0404570768
+: eta~X2 0.6212222  0.2118930 2.9317729 0.0033703310
+
+As explain at the begining of this section, *lavaSearch2* implements
+two corrections that can be directly applied by calling the =summary2=
+method:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary2(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+#+END_SRC
+
+#+RESULTS:
+:         estimate        se statistic        df     p.value
+: Y2     0.2335412 0.2518218 0.9274067 12.332567 0.371510180
+: Y3     0.5114275 0.1828716 2.7966475 24.693254 0.009851893
+: Y2~eta 0.9192847 0.2653220 3.4647887  3.518708 0.031533355
+: Y3~eta 0.2626930 0.1562776 1.6809386  5.953880 0.144155715
+: eta~X1 0.5150072 0.2642257 1.9491180 20.047646 0.065412240
+: eta~X2 0.6212222 0.2221293 2.7966698 27.739008 0.009272041
+
+To use the Satterthwaite correction alone, set the argument
+  =ssc= to =FALSE=:
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary2(e, ssc = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+#+END_SRC
+
+#+RESULTS:
+:         estimate        se statistic        df     p.value
+: Y2     0.2335412 0.2448593 0.9537775 12.911877 0.357711941
+: Y3     0.5114275 0.1785886 2.8637186 25.780552 0.008210968
+: Y2~eta 0.9192847 0.2621248 3.5070497  3.674640 0.028396459
+: Y3~eta 0.2626930 0.1558978 1.6850339  6.222912 0.141185621
+: eta~X1 0.5150072 0.2513393 2.0490515 21.571210 0.052814794
+: eta~X2 0.6212222 0.2118930 2.9317729 30.370334 0.006351686
+
+When using the Satterthwaite correction alone, the standard error are
+left unchanged compared to the original lava output. The only change
+is how the p-values are computed, i.e. based on the quantiles of a
+Student's \(t\)-distribution instead of a Gaussian distribution. 
+
+To only use the bias correction, set the argument =df= to =FALSE=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary2(e, df = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+#+END_SRC
+
+#+RESULTS:
+:         estimate        se statistic  df      p.value
+: Y2     0.2335412 0.2518218 0.9274067 Inf 0.3537154044
+: Y3     0.5114275 0.1828716 2.7966475 Inf 0.0051635832
+: Y2~eta 0.9192847 0.2653220 3.4647887 Inf 0.0005306482
+: Y3~eta 0.2626930 0.1562776 1.6809386 Inf 0.0927748494
+: eta~X1 0.5150072 0.2642257 1.9491180 Inf 0.0512813393
+: eta~X2 0.6212222 0.2221293 2.7966698 Inf 0.0051632271
+
+** Saving computation time with =estimate2=
+For each call to =summary2= the small sample size correction(s) will
+be recalculated. However the calculation of the sample correction(s)
+can be time consuming.
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+system.time(
+    res <- summary2(e, ssc = FALSE)
+)
+#+END_SRC
+
+#+RESULTS:
+:    user  system elapsed 
+:   0.128   0.000   0.129
+
+In such a case one can pre-compute the main terms of the correction
+(e.g. the derivative of the variance-covariance matrix) once for all
+using the =estimate2= method:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+e2 <- estimate2(e)
+#+END_SRC
+
+#+RESULTS:
+
+=estimate2= automatically store the pre-computed terms in the
+=sCorrect= slot of the object. It also adds the class =lvmfit2= to the
+object:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+class(e2)
+#+END_SRC
+
+#+RESULTS:
+: [1] "lvmfit2" "lvmfit"
+
+Calling the  =summary= methods is now much faster:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+system.time(
+    summary(e2)
+)
+#+END_SRC
+
+#+RESULTS:
+:    user  system elapsed 
+:   0.027   0.000   0.026
+ 
+** Single multivariate Wald test
+
+The function =compare= from the lava package can be use to perform
+multivariate Wald tests, i.e. to test simultaneously several linear
+combinations of the coefficients. We can test the linear hypothesis by
+specifying in =compare= the parameters we would like to test:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resTest0 <- lava::compare(e, par = c("Y2","Y2~eta","eta~X1"))
+resTest0
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+chisq = 21.332, df = 3, p-value = 8.981e-05
+sample estimates:
+          Estimate   Std.Err       2.5%     97.5%
+[Y2]     0.2335412 0.2448593 -0.2463741 0.7134566
+[Y2~eta] 0.9192847 0.2621248  0.4055295 1.4330399
+[eta~X1] 0.5150072 0.2513393  0.0223912 1.0076231
+#+end_example
+
+=compare= uses a chi-squared distribution to compute the p-values.
+Similarly to the Gaussian approximation, while being valid
+asymptotically this procedure may not provide a very accurate control
+of the type 1 error rate in small samples. Fortunately, the correction
+proposed for the univariate Wald statistic can be adapted to the
+multivariate Wald statistic. This is achieved by =compare2=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resTest1 <- compare2(e, linfct = c("Y2","Y2~eta","eta~X1"))
+resTest1
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 6.7118, df1 = 3, df2 = 11.11, p-value = 0.007577
+sample estimates:
+          Estimate   Std.Err        df        2.5%     97.5%
+[Y2]     0.2335412 0.2518218 12.332567 -0.31349486 0.7805774
+[Y2~eta] 0.9192847 0.2653220  3.518708  0.14114161 1.6974278
+[eta~X1] 0.5150072 0.2642257 20.047646 -0.03607414 1.0660884
+#+end_example
+
+The same result could have been obtained by first defining a contrast
+matrix to encode (by rows) which linear combination of coefficients
+should be tested, e.g.:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resC <- createContrast(e, linfct = c("Y2=0","Y2~eta=0","eta~X1=0"))
+resC$contrast
+#+END_SRC
+
+#+RESULTS:
+:              Y2 Y3 Y4 eta Y2~eta Y3~eta Y4~eta eta~X1 eta~X2 Y1~~Y1 Y2~~Y2 Y3~~Y3 Y4~~Y4
+: [Y2] = 0      1  0  0   0      0      0      0      0      0      0      0      0      0
+: [Y2~eta] = 0  0  0  0   0      1      0      0      0      0      0      0      0      0
+: [eta~X1] = 0  0  0  0   0      0      0      0      1      0      0      0      0      0
+:              eta~~eta
+: [Y2] = 0            0
+: [Y2~eta] = 0        0
+: [eta~X1] = 0        0
+
+and passing it to the argument =linfct=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resTest2 <- compare2(e2, linfct = resC$contrast)
+identical(resTest1,resTest2)
+#+END_SRC
+
+#+RESULTS:
+: [1] TRUE
+
+Now a F-distribution is used to compute the p-values. As before on can
+set the argument =ssc= to =FALSE= to use the Satterthwaite
+approximation alone:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resTest3 <- compare2(e, ssc = FALSE, linfct = resC$contrast)
+resTest3
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 7.1107, df1 = 3, df2 = 11.13, p-value = 0.006182
+sample estimates:
+          Estimate   Std.Err       df         2.5%     97.5%
+[Y2]     0.2335412 0.2448593 12.91188 -0.295812256 0.7628948
+[Y2~eta] 0.9192847 0.2621248  3.67464  0.165378080 1.6731913
+[eta~X1] 0.5150072 0.2513393 21.57121 -0.006840023 1.0368543
+#+end_example
+
+In this case the F-statistic of =compare2= is the same as the
+chi-squared statistic of =compare= divided by the rank of the contrast matrix:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resTest0$statistic/qr(resC$contrast)$rank
+#+END_SRC
+
+#+RESULTS:
+:    chisq 
+: 7.110689
+
+** Robust Wald tests
+
+When one does not want to assume normality distributed residuals,
+robust standard error can be used instead of the model based standard
+errors. They can be obtained by setting the argument =robust= to =TRUE=
+when computing univariate Wald tests:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary2(e, robust = TRUE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+#+END_SRC
+
+#+RESULTS:
+:         estimate robust SE statistic        df     p.value
+: Y2     0.2335412 0.2353245 0.9924222 12.332567 0.340064859
+: Y3     0.5114275 0.1897160 2.6957534 24.693254 0.012453535
+: Y2~eta 0.9192847 0.1791240 5.1321143  3.518708 0.009583913
+: Y3~eta 0.2626930 0.1365520 1.9237580  5.953880 0.103104593
+: eta~X1 0.5150072 0.2167580 2.3759546 20.047646 0.027583693
+: eta~X2 0.6212222 0.2036501 3.0504385 27.739008 0.004986632
+
+By default the degrees of freedom of the modeled based variance is
+used. Degrees of freedom can be computed via a Satterthwaite
+approximation using =lava.options(df.robust=2)=. However it is not
+recommended as the resulting degrees of freedom showed a strange
+behavior. Multivariate Wald test can be obtained in a similar way
+using the =compare2= method:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+compare2(e2, linfct = c("Y2","Y2~eta","eta~X1"), robust = TRUE)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 12.526, df1 = 3, df2 = 8.41, p-value = 0.001832
+sample estimates:
+          Estimate robust SE        df        2.5%     97.5%
+[Y2]     0.2335412 0.2353245 12.332567 -0.27765746 0.7447400
+[Y2~eta] 0.9192847 0.1791240  3.518708  0.39394539 1.4446240
+[eta~X1] 0.5150072 0.2167580 20.047646  0.06292679 0.9670875
+#+end_example
+
+It may be surprising that the (corrected) robust standard errors are
+(in this example) smaller than the (corrected) model-based one. This
+is also the case for the uncorrected one:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+rbind(robust = diag(crossprod(iid(e))),
+      model = diag(vcov(e)))
+#+END_SRC
+
+#+RESULTS:
+:                Y2         Y3         Y4        eta     Y2~eta     Y3~eta     Y4~eta
+: robust 0.04777252 0.03325435 0.03886706 0.06011727 0.08590732 0.02179453 0.02981895
+: model  0.05995606 0.03189389 0.04644303 0.06132384 0.06870941 0.02430412 0.03715633
+:            eta~X1     eta~X2    Y1~~Y1    Y2~~Y2     Y3~~Y3     Y4~~Y4  eta~~eta
+: robust 0.05166005 0.05709393 0.2795272 0.1078948 0.03769614 0.06923165 0.3198022
+: model  0.06317144 0.04489865 0.1754744 0.1600112 0.05112998 0.10152642 0.2320190
+
+This may be explained by the fact the robust standard error tends to
+be liberal in small samples (e.g. see Kauermann 2001, A Note on the
+Efficiency of Sandwich Covariance Matrix Estimation ).
+
+** Assessing the type 1 error of the testing procedure
+
+The function =calibrateType1= can be used to assess the type 1 error
+of a Wald statistic on a specific example. This however assumes that
+the estimated model is correctly specified. Let's make an example. For
+this we simulate some data:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+set.seed(10)
+m.generative <- lvm(Y ~ X1 + X2 + Gene)
+categorical(m.generative, labels = c("ss","ll")) <- ~Gene
+d <- lava::sim(m.generative, n = 50, latent = FALSE)
+#+END_SRC
+
+#+RESULTS:
+
+Let's now imagine that we want to analyze the relationship between
+Y and Gene using the following dataset:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+head(d)
+#+END_SRC
+
+#+RESULTS:
+:             Y         X1         X2 Gene
+: 1 -1.14369572 -0.4006375 -0.7618043   ss
+: 2 -0.09943370 -0.3345566  0.4193754   ss
+: 3 -0.04331996  1.3679540 -1.0399434   ll
+: 4  2.25017335  2.1377671  0.7115740   ss
+: 5  0.16715138  0.5058193 -0.6332130   ss
+: 6  1.73931135  0.7863424  0.5631747   ss
+
+For this we fit define a LVM:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+myModel <- lvm(Y ~ X1 + X2 + Gene)
+#+END_SRC
+
+#+RESULTS:
+
+and estimate the coefficients of the model using =estimate=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+e <- estimate(myModel, data = d)
+e
+#+END_SRC
+
+#+RESULTS:
+:                     Estimate Std. Error  Z-value  P-value
+: Regressions:                                             
+:    Y~X1              1.02349    0.12017  8.51728   <1e-12
+:    Y~X2              0.91519    0.12380  7.39244   <1e-12
+:    Y~Genell          0.48035    0.23991  2.00224  0.04526
+: Intercepts:                                              
+:    Y                -0.11221    0.15773 -0.71141   0.4768
+: Residual Variances:                                      
+:    Y                 0.67073    0.13415  5.00000
+
+We can now use =calibrateType1= to perform a simulation study. We just
+need to define the null hypotheses (i.e. which coefficients should be
+set to 0 when generating the data) and the number of simulations:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+mySimulation <- calibrateType1(e, 
+                               param = "Y~Genell",
+                               n.rep = 50, 
+                               trace = FALSE, seed = 10)
+#+END_SRC
+
+#+RESULTS:
+
+To save time we only make 50 simulations but much more are necessary
+to really assess the type 1 error rate. Then we can use the =summary=
+method to display the results:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary(mySimulation)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+Estimated type 1 error rate [95% confidence interval]
+sample size: 50 | number of simulations: 50
+            link statistic correction type1error                  CI
+ [Y~Genell] == 0      Wald       Gaus       0.12 [0.05492 ; 0.24242]
+                                 Satt       0.10 [0.04224 ; 0.21869]
+                                  SSC       0.08 [0.03035 ; 0.19456]
+                           SSC + Satt       0.08 [0.03035 ; 0.19456]
+
+Corrections: Gaus = Gaussian approximation 
+             SSC  = small sample correction 
+             Satt = Satterthwaite approximation
+#+end_example
+
+\clearpage
+
+* Adjustment for multiple comparisons
+** Univariate Wald test, single model
+
+When performing multiple testing, adjustment for multiple comparisons
+is necessary in order to control the type 1 error rate, i.e. to
+provide interpretable p.values. The *multcomp* package enables to do
+such adjustment when all tests comes from the same =lvmfit= object:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+mSim <- lvm(Y ~ 0.25 * X1 + 0.3 * X2 + 0.35 * X3 + 0.4 * X4 + 0.45 * X5 + 0.5 * X6)
+set.seed(10)
+df.data <- sim(mSim, n = 4e1)
+
+## fit lvm
+e.lvm <- estimate(lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+name.coef <- names(coef(e.lvm))
+n.coef <- length(name.coef)
+
+## Create contrast matrix
+resC <- createContrast(e.lvm, linfct = paste0("Y~X",1:6), rowname.rhs = FALSE)
+resC$contrast
+#+END_SRC
+
+#+RESULTS:
+:        Y Y~X1 Y~X2 Y~X3 Y~X4 Y~X5 Y~X6 Y~~Y
+: [Y~X1] 0    1    0    0    0    0    0    0
+: [Y~X2] 0    0    1    0    0    0    0    0
+: [Y~X3] 0    0    0    1    0    0    0    0
+: [Y~X4] 0    0    0    0    1    0    0    0
+: [Y~X5] 0    0    0    0    0    1    0    0
+: [Y~X6] 0    0    0    0    0    0    1    0
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+e.glht <- multcomp::glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
+summary(e.glht)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+
+Linear Hypotheses:
+            Estimate Std. Error z value Pr(>|z|)   
+[Y~X1] == 0   0.3270     0.1589   2.058  0.20725   
+[Y~X2] == 0   0.4025     0.1596   2.523  0.06611 . 
+[Y~X3] == 0   0.5072     0.1383   3.669  0.00144 **
+[Y~X4] == 0   0.3161     0.1662   1.902  0.28582   
+[Y~X5] == 0   0.3875     0.1498   2.586  0.05554 . 
+[Y~X6] == 0   0.3758     0.1314   2.859  0.02482 * 
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(Adjusted p values reported -- single-step method)
+#+end_example
+
+Note that this correction relies on the Gaussian approximation. To use
+small sample corrections implemented in *lavaSearch2*, just call
+=glht2= instead of =glht=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+e.glht2 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null)
+summary(e.glht2)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+Standard errors: Model-based
+
+Linear Hypotheses:
+             estimate        se        df     lower     upper statistic p.value  
+[Y~X1] == 0  0.327006  0.174976 33.000000 -0.158914  0.812926    1.8689 0.32895  
+[Y~X2] == 0  0.402533  0.175670 33.000000 -0.085313  0.890380    2.2914 0.14817  
+[Y~X3] == 0  0.507242  0.152209 33.000000  0.084548  0.929937    3.3325 0.01232 *
+[Y~X4] == 0  0.316099  0.182995 33.000000 -0.192089  0.824288    1.7274 0.41283  
+[Y~X5] == 0  0.387459  0.164970 33.000000 -0.070673  0.845590    2.3487 0.13153  
+[Y~X6] == 0  0.375763  0.144712 33.000000 -0.026113  0.777639    2.5966 0.07617 .
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values adjusted for multiple comparisons -- single step max-test) 
+Error when computing the adjusted p-value by numerical integration: 0.00012125
+#+end_example
+
+The single step method is the appropriate correction when one wants to
+report the most significant p-value relative to a set of
+hypotheses. If the second most significant p-value is also to be
+reported then the method "free" is more efficient:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary(e.glht2, test = multcomp::adjusted("free"))
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+Standard errors: Model-based
+
+Linear Hypotheses:
+            estimate       se       df statistic p.value  
+[Y~X1] == 0  0.32701  0.17498 33.00000    1.8689 0.12911  
+[Y~X2] == 0  0.40253  0.17567 33.00000    2.2914 0.09129 .
+[Y~X3] == 0  0.50724  0.15221 33.00000    3.3325 0.01242 *
+[Y~X4] == 0  0.31610  0.18299 33.00000    1.7274 0.12911  
+[Y~X5] == 0  0.38746  0.16497 33.00000    2.3487 0.09129 .
+[Y~X6] == 0  0.37576  0.14471 33.00000    2.5966 0.06451 .
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values adjusted for multiple comparisons -- step down max-test)
+#+end_example
+
+See the book: "Multiple Comparisons Using R" by Frank Bretz, Torsten
+Hothorn, and Peter Westfall (2011, CRC Press) for details about the
+theory underlying the *multcomp* package.
+
+** Univariate Wald test, multiple models
+
+Pipper et al. in "A Versatile Method for Confirmatory Evaluation of
+the Effects of a Covariate in Multiple Models" (2012, Journal of the
+Royal Statistical Society, Series C) developed a method to assess the
+effect of an exposure on several outcomes when a different model is
+fitted for each outcome. This method has been implemented in the =mmm=
+function from the *multcomp* package for glm and Cox
+models. *lavaSearch2* extends it to =lvm=. 
+
+Let's consider an example where we wish to assess the treatment effect
+on three outcomes X, Y, and Z. We have at hand three measurements
+relative to outcome Z for each individual:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+mSim <- lvm(X ~ Age + 0.5*Treatment,
+            Y ~ Gender + 0.25*Treatment,
+            c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
+            Age[40:5]~1)
+latent(mSim) <- ~eta
+categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
+categorical(mSim, labels = c("male","female")) <- ~Gender
+
+n <- 5e1
+set.seed(10)
+df.data <- sim(mSim, n = n, latent = FALSE)
+head(df.data)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+         X      Age Treatment          Y Gender         Z1         Z2          Z3
+1 39.12289 39.10415   placebo  0.6088958 female  1.8714112  2.2960633 -0.09326935
+2 39.56766 39.25191      SSRI  1.0001325 female  0.9709943  0.6296226  1.31035910
+3 41.68751 43.05884   placebo  2.1551047 female -1.1634011 -0.3332927 -1.30769267
+4 44.68102 44.78019      SSRI  0.3852728 female -1.0305476  0.6678775  0.99780139
+5 41.42559 41.13105   placebo -0.8666783   male -1.6342816 -0.8285492  1.20450488
+6 42.64811 41.75832      SSRI -1.0710170 female -1.2198019 -1.9602130 -1.85472132
+   treatment
+1  1.1639675
+2 -1.5233846
+3 -2.5183351
+4 -0.7075292
+5 -0.2874329
+6 -0.4353083
+#+end_example
+
+We fit a model specific to each outcome:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+lvmX <- estimate(lvm(X ~ Age + Treatment), data = df.data)
+lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
+lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ 1*eta, eta ~ -1 + Treatment), 
+                 data = df.data)
+#+END_SRC
+
+#+RESULTS:
+
+and combine them into a list of =lvmfit= objects:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+mmm.lvm <- multcomp::mmm(X = lvmX, Y = lvmY, Z = lvmZ)
+#+END_SRC
+
+#+RESULTS:
+
+We can then call =glht2= to apply the small sample corrections,
+generate a contrast matrix containing tests for all coefficient
+related to the treatment, and collect the results:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+lvm.glht2 <- glht2(mmm.lvm, linfct = "TreatmentSSRI")
+summary(lvm.glht2)
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Linear Hypotheses:
+                         estimate        se        df     lower     upper statistic
+X: [TreatmentSSRI] == 0  0.466150  0.253280 47.000000 -0.154910  1.087209    1.8405
+Y: [TreatmentSSRI] == 0 -0.542096  0.261321 47.000000 -1.182874  0.098682   -2.0744
+Z: [TreatmentSSRI] == 0 -0.619822  0.440397 47.000000 -1.699707  0.460063   -1.4074
+                        p.value
+X: [TreatmentSSRI] == 0  0.1863
+Y: [TreatmentSSRI] == 0  0.1165
+Z: [TreatmentSSRI] == 0  0.3912
+(CIs/p-values adjusted for multiple comparisons -- single step max-test) 
+Error when computing the adjusted p-value by numerical integration: 0.00025692
+#+end_example
+
+This can be compared to the unadjusted p.values:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+summary(lvm.glht2, test = multcomp::adjusted("none"))
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Linear Hypotheses:
+                         estimate        se        df     lower     upper statistic
+X: [TreatmentSSRI] == 0  0.466150  0.253280 47.000000 -0.043383  0.975682    1.8405
+Y: [TreatmentSSRI] == 0 -0.542096  0.261321 47.000000 -1.067807 -0.016385   -2.0744
+Z: [TreatmentSSRI] == 0 -0.619822  0.440397 47.000000 -1.505787  0.266143   -1.4074
+                        p.value  
+X: [TreatmentSSRI] == 0 0.07202 .
+Y: [TreatmentSSRI] == 0 0.04354 *
+Z: [TreatmentSSRI] == 0 0.16588  
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values not adjusted for multiple comparisons)
+#+end_example
+
+\clearpage 
+
+* Model diagnostic
+** Detection of local dependencies
+
+The =modelsearch= function of *lava* is a diagnostic tool for latent
+variable models. It enables to search for local dependencies
+(i.e. model misspecification) and add them to the model. Obviously it
+is a data-driven procedure and its usefulness can be discussed,
+especially in small samples:
+- the procedure is instable, i.e. is likely to lead to two different
+  models when applied on two different dataset sampled from the same
+  generative model.
+- it is hard to define a meaningful significance threshold since
+  p-values should be adjusted for multiple comparisons and sequential
+  testing. However traditional methods like Bonferroni-Holm tend to
+  over corrected and therefore reduce the power of the procedure since
+  they assume that the test are independent.
+
+The function =modelsearch2= in *lavaSearch2* partially solves the
+second issue by adjusting the p-values for multiple testing. Let's see
+an example:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+mSim <- lvm(c(y1,y2,y3)~u, u~x1+x2)
+latent(mSim) <- ~u
+covariance(mSim) <- y2~y3
+transform(mSim, Id~u) <- function(x){1:NROW(x)}
+set.seed(10)
+df.data <- lava::sim(mSim, n = 125, latent = FALSE)
+head(df.data)
+#+END_SRC
+
+#+RESULTS:
+:           y1           y2         y3         x1         x2 Id
+: 1  5.5071523  4.883752014  6.2928016  0.8694750  2.3991549  1
+: 2 -0.6398644  0.025832617  0.5088030 -0.6800096 -0.0898721  2
+: 3 -2.5835495 -2.616715027 -2.8982645  0.1732145 -0.8216484  3
+: 4 -2.5312637 -2.518185427 -2.9015033 -0.1594380 -0.2869618  4
+: 5  1.6346220 -0.001877577  0.3705181  0.7934994  0.1312789  5
+: 6  0.4939972  1.759884014  1.5010499  1.6943505 -1.0620840  6
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## fit model
+m <- lvm(c(y1,y2,y3)~u, u~x1)
+latent(m) <- ~u
+addvar(m) <- ~x2 
+e.lvm <- estimate(m, data = df.data)
+#+END_SRC
+
+#+RESULTS:
+
+=modelsearch2= can be used to sequentially apply the =modelsearch=
+function with a given correction for the p.values:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resScore <- modelsearch2(e.lvm, alpha = 0.1, trace = FALSE)
+displayScore <- summary(resScore)
+#+END_SRC
+
+#+RESULTS:
+: Sequential search for local dependence using the score statistic 
+: The variable selection procedure retained 2 variables:
+:     link statistic      p.value adjusted.p.value dp.Info selected nTests
+: 1   u~x2 36.436487 1.577228e-09     5.008615e-08       1     TRUE     10
+: 2 y2~~y3  6.912567 8.559203e-03     6.056378e-02       1     TRUE      9
+: 3  y3~x1  3.136429 7.656125e-02     2.814343e-01       1    FALSE      8
+: Confidence level: 0.9 (two sided, adjustement: fastmax)
+
+This indeed matches the highest score statistic found by
+=modelsearch=:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resScore0 <- modelsearch(e.lvm, silent = TRUE)
+c(statistic = sqrt(max(resScore0$test[,"Test Statistic"])), 
+  p.value = min(resScore0$test[,"P-value"]))
+#+END_SRC
+
+#+RESULTS:
+:    statistic      p.value 
+: 6.036264e+00 1.577228e-09
+
+We can compare the adjustment using the max distribution to bonferroni:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+data.frame(link = displayScore$table[,"link"],
+           none = displayScore$table[,"p.value"],
+           bonferroni = displayScore$table[,"p.value"]*displayScore$table[1,"nTests"],
+           max = displayScore$table[,"adjusted.p.value"])
+#+END_SRC
+
+#+RESULTS:
+:     link         none   bonferroni          max
+: 1   u~x2 1.577228e-09 1.577228e-08 5.008615e-08
+: 2 y2~~y3 8.559203e-03 8.559203e-02 6.056378e-02
+: 3  y3~x1 7.656125e-02 7.656125e-01 2.814343e-01
+
+In theory, the correction based on the max statistic should give a p
+value that is smaller or equal than the p value adjusted using
+Bonferroni. However for for very small p-values, the max-correction
+can be numerically inaccurate and result in p-values that are slightly
+larger. The evolution of the estimation of a given coefficient across
+the sequential search can be displayed using =autoplot=:
+   
+#+BEGIN_SRC R :results graphics :file "c:/Users/hpl802/Documents/GitHub/lavaSearch2/vignettes/modelsearch.png" :exports results :session *R* :cache no
+autoplot(resScore, param = "u~x1")
+#+END_SRC
+
+#+RESULTS:
+[[file:./modelsearch.png]]
+
+In many cases, all links are not plausible so the user should
+indicates which links should be investigated by =modelsearch2=. This
+can be done via the argument =link=:
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+resRed <- modelsearch2(e.lvm, link = c("y1~~y2","y1~~y3","y2~~y3"), trace = FALSE)
+print(resRed)
+#+END_SRC
+
+#+RESULTS:
+: Sequential search for local dependence using the score statistic 
+: The variable selection procedure did not retain any variable 
+:     link statistic    p.value adjusted.p.value dp.Info selected nTests
+: 1 y1~~y3  3.076875 0.07941299        0.1818963       1    FALSE      3
+: Confidence level: 0.95 (two sided, adjustement: fastmax)
+
+The function =findNewLink= can help the user to identify the set of
+relevant links:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+findNewLink(e.lvm$model, type = "covariance")$link
+#+END_SRC
+
+#+RESULTS:
+: [1] "y1~~y2" "y1~~y3" "y2~~y3"
+
+** Checking that the names of the variables in the model match those of the data
+
+When estimating latent variable models using *lava*, it sometimes
+happens that the model does not converge:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+set.seed(10)
+df.data <- sim(lvm(Y~X1+X2), 1e2)
+
+## fit model
+mWrong <- lvm(Y ~ X + X2)
+eWrong <- estimate(mWrong, data = df.data)
+#+END_SRC
+
+#+RESULTS:
+: Warning messages:
+: 1: In estimate.lvm(mWrong, data = df.data) :
+:   Lack of convergence. Increase number of iteration or change starting values.
+: 2: In sqrt(diag(asVar)) : NaNs produced
+
+ This can have several reasons:
+- the model is not identifiable.
+- the optimization routine did not managed to find a local
+  optimum. This may happen for complex latent variable model where the
+  objective function is not convex or locally convex.
+- the user has made a mistake when defining the model or has not given
+  the appropriate dataset.
+
+The =checkData= function enables to check the last point. It compares
+the observed variables defined in the model and the one given by the
+dataset. In case of mismatch it returns a message:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+checkData(mWrong, df.data)
+#+END_SRC
+
+#+RESULTS:
+: Missing variable in data: X
+ 
+In presence of latent variables, the user needs to explicitely define
+them in the model, otherwise =checkData= will identify them as an
+issue:
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+## simulate data
+set.seed(10)
+mSim <- lvm(c(Y1,Y2,Y3)~eta)
+latent(mSim) <- ~eta
+df.data <- sim(mSim, n = 1e2, latent = FALSE)
+
+## fit model
+m <- lvm(c(Y1,Y2,Y3)~eta)
+checkData(m, data = df.data)
+#+END_SRC
+
+#+RESULTS:
+: Missing variable in data: eta
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+latent(m) <- ~eta
+checkData(m, data = df.data)
+#+END_SRC
+
+#+RESULTS:
+: No issue detected
+
+
+\clearpage
+
+* Information about the R session used for this document
+
+#+BEGIN_SRC R :exports both :results output :session *R* :cache no
+sessionInfo()
+#+END_SRC
+
+#+RESULTS:
+#+begin_example
+R version 4.2.0 (2022-04-22)
+Platform: x86_64-pc-linux-gnu (64-bit)
+Running under: Ubuntu 20.04.4 LTS
+
+Matrix products: default
+BLAS:   /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.9.0
+LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.9.0
+
+locale:
+ [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C               LC_TIME=en_US.UTF-8       
+ [4] LC_COLLATE=en_US.UTF-8     LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
+ [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                  LC_ADDRESS=C              
+[10] LC_TELEPHONE=C             LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
+
+attached base packages:
+[1] stats     graphics  grDevices utils     datasets  methods   base     
+
+other attached packages:
+[1] lavaSearch2_2.0.1 lava_1.7.2        ggplot2_3.4.0     butils.base_1.2  
+[5] Rcpp_1.0.9        devtools_2.4.3    usethis_2.1.5     data.table_1.14.2
+
+loaded via a namespace (and not attached):
+ [1] pkgload_1.2.4            splines_4.2.0            foreach_1.5.2           
+ [4] brio_1.1.3               assertthat_0.2.1         butils_1.4.7            
+ [7] remotes_2.4.2            sessioninfo_1.2.2        globals_0.16.1          
+[10] numDeriv_2016.8-1.1      pillar_1.8.1             lattice_0.20-45         
+[13] glue_1.6.2               digest_0.6.31            colorspace_2.0-3        
+[16] sandwich_3.0-2           Matrix_1.4-1             plyr_1.8.7              
+[19] pkgconfig_2.0.3          listenv_0.8.0            purrr_1.0.0             
+[22] mvtnorm_1.1-3            scales_1.2.1             processx_3.5.3          
+[25] tibble_3.1.8             generics_0.1.3           ellipsis_0.3.2          
+[28] TH.data_1.1-1            cachem_1.0.6             withr_2.5.0             
+[31] cli_3.5.0                survival_3.5-0           magrittr_2.0.3          
+[34] crayon_1.5.2             memoise_2.0.1            ps_1.7.0                
+[37] fs_1.5.2                 future_1.28.0            fansi_1.0.3             
+[40] parallelly_1.32.1        doParallel_1.0.17        nlme_3.1-157            
+[43] MASS_7.3-57              xml2_1.3.3               RcppArmadillo_0.11.2.0.0
+[46] pkgbuild_1.3.1           progressr_0.11.0         tools_4.2.0             
+[49] prettyunits_1.1.1        lifecycle_1.0.3          multcomp_1.4-20         
+[52] stringr_1.5.0            munsell_0.5.0            callr_3.7.0             
+[55] compiler_4.2.0           rlang_1.0.6              grid_4.2.0              
+[58] iterators_1.0.14         boot_1.3-28              testthat_3.1.4          
+[61] gtable_0.3.1             codetools_0.2-18         abind_1.4-5             
+[64] DBI_1.1.3                roxygen2_7.2.1           reshape2_1.4.4          
+[67] R6_2.5.1                 zoo_1.8-11               knitr_1.39              
+[70] dplyr_1.0.10             fastmap_1.1.0            future.apply_1.9.1      
+[73] utf8_1.2.2               rprojroot_2.0.3          desc_1.4.1              
+[76] stringi_1.7.8            parallel_4.2.0           vctrs_0.5.1             
+[79] tidyselect_1.2.0         xfun_0.31
+#+end_example
+
+* CONFIG :noexport:
+#+LANGUAGE:  en
+#+LaTeX_CLASS: org-article
+#+LaTeX_CLASS_OPTIONS: [12pt]
+#+OPTIONS:   title:t author:t toc:nil todo:nil
+#+OPTIONS:   H:3 num:t 
+#+OPTIONS:   TeX:t LaTeX:t
+
+** Code
+#+PROPERTY: header-args :session *R*
+#+PROPERTY: header-args :tange yes % extract source code: http://orgmode.org/manual/Extracting-source-code.html
+#+PROPERTY: header-args :cache no 
+#+LATEX_HEADER: \RequirePackage{fancyvrb}
+#+LATEX_HEADER: \DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\small,formatcom = {\color[rgb]{0.5,0,0}}}
+
+** Display 
+#+LaTeX_HEADER: \geometry{a4paper, left=15mm, right=15mm}
+
+#+LATEX_HEADER: \RequirePackage{colortbl} % arrayrulecolor to mix colors
+#+LATEX_HEADER: \RequirePackage{setspace} % to modify the space between lines - incompatible with footnote in beamer
+#+LaTeX_HEADER:\usepackage{authblk} % enable several affiliations (clash with beamer)
+#+LaTeX_HEADER:\renewcommand{\baselinestretch}{1.1}
+#+LATEX_HEADER:\geometry{top=1cm}
+
+** List
+#+LaTeX_HEADER: \usepackage{enumitem}
+
+** Notations
+#+LATEX_HEADER: \RequirePackage{xspace} % 
+#+LATEX_HEADER: \newcommand\Rlogo{\textbf{\textsf{R}}\xspace} % 
+
+** Image
+#+LATEX_HEADER: \RequirePackage{epstopdf} % to be able to convert .eps to .pdf image files
diff --git a/inst/doc-software/overview.pdf b/inst/doc-software/overview.pdf
new file mode 100644
index 0000000..24a38f3
Binary files /dev/null and b/inst/doc-software/overview.pdf differ
diff --git a/vignettes/overview.ltx b/inst/doc-software/overview.tex
similarity index 65%
rename from vignettes/overview.ltx
rename to inst/doc-software/overview.tex
index 00822b8..2ec78ae 100644
--- a/vignettes/overview.ltx
+++ b/inst/doc-software/overview.tex
@@ -1,1189 +1,1191 @@
-% Created 2019-04-04 to 16:15
-% Intended LaTeX compiler: pdflatex
-\documentclass[12pt]{article}
-
-%%%% settings when exporting code %%%% 
-
-\usepackage{listings}
-\lstset{
-backgroundcolor=\color{white},
-basewidth={0.5em,0.4em},
-basicstyle=\ttfamily\small,
-breakatwhitespace=false,
-breaklines=true,
-columns=fullflexible,
-commentstyle=\color[rgb]{0.5,0,0.5},
-frame=single,
-keepspaces=true,
-keywordstyle=\color{black},
-literate={~}{$\sim$}{1},
-numbers=left,
-numbersep=10pt,
-numberstyle=\ttfamily\tiny\color{gray},
-showspaces=false,
-showstringspaces=false,
-stepnumber=1,
-stringstyle=\color[rgb]{0,.5,0},
-tabsize=4,
-xleftmargin=.23in,
-emph={anova,apply,class,coef,colnames,colNames,colSums,dim,dcast,for,ggplot,head,if,ifelse,is.na,lapply,list.files,library,logLik,melt,plot,require,rowSums,sapply,setcolorder,setkey,str,summary,tapply},
-emphstyle=\color{blue}
-}
-
-%%%% packages %%%%%
-
-\usepackage[utf8]{inputenc}
-\usepackage[T1]{fontenc}
-\usepackage{lmodern}
-\usepackage{textcomp}
-\usepackage{color}
-\usepackage{enumerate}
-\usepackage{graphicx}
-\usepackage{grffile}
-\usepackage{wrapfig}
-\usepackage{rotating}
-\usepackage{longtable}
-\usepackage{multirow}
-\usepackage{multicol}
-\usepackage{changes}
-\usepackage{pdflscape}
-\usepackage{geometry}
-\usepackage[normalem]{ulem}
-\usepackage{amssymb}
-\usepackage{amsmath}
-\usepackage{amsfonts}
-\usepackage{dsfont}
-\usepackage{array}
-\usepackage{ifthen}
-\usepackage{hyperref}
-\usepackage{natbib}
-%\VignetteIndexEntry{overview}
-%\VignetteEngine{R.rsp::tex}
-%\VignetteKeyword{R}
-\RequirePackage{fancyvrb}
-\DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\small,formatcom = {\color[rgb]{0.5,0,0}}}
-\geometry{a4paper, left=15mm, right=15mm}
-\RequirePackage{colortbl} % arrayrulecolor to mix colors
-\RequirePackage{setspace} % to modify the space between lines - incompatible with footnote in beamer
-\usepackage{authblk} % enable several affiliations (clash with beamer)
-\renewcommand{\baselinestretch}{1.1}
-\geometry{top=1cm}
-\usepackage{enumitem}
-\RequirePackage{xspace} %
-\newcommand\Rlogo{\textbf{\textsf{R}}\xspace} %
-\RequirePackage{epstopdf} % to be able to convert .eps to .pdf image files
-\author{Brice Ozenne}
-\date{\today}
-\title{Overview of the functionalities of the package lavaSearch2}
-\hypersetup{
- colorlinks=true,
- citecolor=[rgb]{0,0.5,0},
- urlcolor=[rgb]{0,0,0.5},
- linkcolor=[rgb]{0,0,0.5},
- pdfauthor={Brice Ozenne},
- pdftitle={Overview of the functionalities of the package lavaSearch2},
- pdfkeywords={},
- pdfsubject={},
- pdfcreator={Emacs 25.2.1 (Org mode 9.0.4)},
- pdflang={English}
- }
-\begin{document}
-
-\maketitle
-Load \textbf{lavaSearch2} in the R session:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-library(lavaSearch2)
-\end{lstlisting}
-
-\section{Inference}
-\label{sec:orgfd6e90a}
-\subsection{Introductory example}
-\label{sec:org3f1e0eb}
-You may have noticed that for simple linear regression, the p-values
-of the Wald tests from \texttt{lm}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y[1:1]~0.3*X1+0.2*X2)
-set.seed(10)
-df.data <- sim(mSim, 2e1)
-
-## fit linear model
-summary(lm(Y~X1+X2, data = df.data))$coef
-\end{lstlisting}
-
-\begin{verbatim}
-             Estimate Std. Error   t value    Pr(>|t|)
-(Intercept) 0.7967775  0.2506767 3.1785069 0.005495832
-X1          0.1550938  0.2205080 0.7033477 0.491360483
-X2          0.4581556  0.2196785 2.0855736 0.052401103
-\end{verbatim}
-
-differ from those obtained with the corresponding latent variable
-model estimated by maximum likelihood:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## fit latent variable model
-m <- lvm(Y~X1+X2)
-e <- estimate(m, data = df.data)
-
-## extract Wald tests
-summary(e)$coef
-\end{lstlisting}
-
-\begin{verbatim}
-      Estimate Std. Error   Z-value      P-value
-Y~X1 0.1550938  0.2032984 0.7628877 0.4455303456
-Y~X2 0.4581556  0.2025335 2.2621221 0.0236898575
-Y~~Y 0.5557910  0.1757566 3.1622777           NA
-Y    0.7967775  0.2311125 3.4475747 0.0005656439
-\end{verbatim}
-
-For instance, the p-value for the effect of X2 is 0.024 in the latent
-variable model and 0.052 in the linear regression. The discrepancy is
-due to 2 corrections that \texttt{lm} applies in order to improve the control
-of the type 1 error of the Wald tests:
-\begin{itemize}
-\item use of a Student \(t\)-distribution instead of a Gaussian
-distribution (informally using a t-value instead of z-value).
-\item use of an unbiased estimator of the residuals variance instead of
-the ML-estimator.
-\end{itemize}
-\textbf{lavaSearch2} attempts to generalize these corrections to models with
-correlated and heteroschedastic measurements. In the case of a simple
-linear regression, Wald tests obtained with \textbf{lavaSearch2} exactly
-match the results of \texttt{lm}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e)$coef
-\end{lstlisting}
-
-\begin{verbatim}
-      Estimate Std. Error   t-value    P-value    df
-Y~X1 0.1550938  0.2205078 0.7033483 0.49136012 17.00
-Y~X2 0.4581556  0.2196783 2.0855754 0.05240092 17.00
-Y~~Y 0.6538707  0.2242758 2.9154759         NA  4.25
-Y    0.7967775  0.2506765 3.1785096 0.00549580 17.00
-\end{verbatim}
-
-\subsection{How it works in a nutshell}
-\label{sec:org89617bf}
-
-When using \textbf{lava}, the p.values that are obtained from the summary
-(Wald tests) rely on a Gaussian approximation and maximum likelihood
-estimation. While being asymptotically valid, they usually do not
-provide a very accurate control of the type 1 error rate in small
-samples. Simulations have shown that the type 1 error rate tends to be
-too large, i.e. the p.values are have a downward bias. \textbf{lavaSearch2}
-provides two improvements:
-\begin{itemize}
-\item using a Student's \(t\)-distribution instead of a Gaussian
-distribution to account for the uncertainty on the variance of the
-coefficients. The degrees of freedom are estimated using Satterwaite
-approximation, i.e. identifying the chi-squared distribution that
-best fit the observed moments of the variance of the coefficients.
-\item (partially) correcting for the first order bias in the ML estimates
-of the variance parameters. This correction also affects the
-standard error of the estimates.
-\end{itemize}
-
-\subsection{Single univariate Wald test}
-\label{sec:org8e0ca86}
-
-We will illustrate the functionalities using a simulated dataset:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y1~eta,Y2~eta,Y3~0.4+0.4*eta,Y4~0.6+0.6*eta,eta~0.5*X1+0.7*X2)
-latent(mSim) <- ~eta
-set.seed(12)
-df.data <- sim(mSim, n = 3e1, latent = FALSE)
-
-## display
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-          Y1         Y2          Y3         Y4         X1         X2
-1 -1.7606233  0.1264910  0.66442611  0.2579355  0.2523400 -1.5431527
-2  3.0459417  2.4631929  0.00283511  2.1714802  0.6423143 -1.3206009
-3 -2.1443162 -0.3318033  0.82253070  0.3008415 -0.3469361 -0.6758215
-4 -2.5050328 -1.3878987 -0.10474850 -1.7814956 -0.5152632 -0.3670054
-5 -2.5307249  0.3012422  1.22046986 -1.0195188  0.3981689 -0.5138722
-6 -0.9521366  0.1669496 -0.21422548  1.5954456  0.9535572 -0.9592540
-\end{verbatim}
-
-We first fit the latent variable model using, as usual, the \texttt{estimate}
-function:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-m <- lvm(c(Y1,Y2,Y3,Y4)~eta, eta~X1+X2)
-e <- estimate(m, data = df.data)
-\end{lstlisting}
-
-We can extract the Wald tests based on the traditional approach using
-\texttt{summary}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   Z-value      P-value
-Y2     0.2335412  0.2448593 0.9537775 0.3401962906
-Y3     0.5114275  0.1785886 2.8637186 0.0041869974
-Y2~eta 0.9192847  0.2621248 3.5070497 0.0004531045
-Y3~eta 0.2626930  0.1558978 1.6850339 0.0919820326
-eta~X1 0.5150072  0.2513393 2.0490515 0.0404570768
-eta~X2 0.6212222  0.2118930 2.9317729 0.0033703310
-\end{verbatim}
-
-As explain at the begining of this section, \textbf{lavaSearch2} implements
-two corrections that can be directly applied by calling the \texttt{summary2}
-method:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-\end{verbatim}
-
-To use the Satterthwaite correction alone, set the argument
-  \texttt{bias.correct} to \texttt{FALSE}:
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, bias.correct = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2448593 0.9537775 0.357711941 12.911877
-Y3     0.5114275  0.1785886 2.8637186 0.008210968 25.780552
-Y2~eta 0.9192847  0.2621248 3.5070497 0.028396459  3.674640
-Y3~eta 0.2626930  0.1558978 1.6850339 0.141185621  6.222912
-eta~X1 0.5150072  0.2513393 2.0490515 0.052814794 21.571210
-eta~X2 0.6212222  0.2118930 2.9317729 0.006351686 30.370334
-\end{verbatim}
-
-When using the Satterthwaite correction alone, the standard error are
-left unchanged compared to the original lava output. The only change
-is how the p-values are computed, i.e. based on the quantiles of a
-Student's \(t\)-distribution instead of a Gaussian distribution. 
-
-To only use the bias correction, set the argument \texttt{df} to \texttt{FALSE}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, df = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value      P-value  df
-Y2     0.2335412  0.2518218 0.9274067 0.3537154044 Inf
-Y3     0.5114275  0.1828716 2.7966475 0.0051635832 Inf
-Y2~eta 0.9192847  0.2653220 3.4647887 0.0005306482 Inf
-Y3~eta 0.2626930  0.1562776 1.6809386 0.0927748494 Inf
-eta~X1 0.5150072  0.2642257 1.9491180 0.0512813393 Inf
-eta~X2 0.6212222  0.2221293 2.7966698 0.0051632271 Inf
-\end{verbatim}
-
-
-\subsection{Saving computation time with \texttt{sCorrect}}
-\label{sec:org86bc5ce}
-For each call to \texttt{summary2} the small sample size correction(s) will
-be recalculated. However the calculation of the sample correction(s)
-can be time consuming.
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	res <- summary2(e, bias.correct = FALSE)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.25    0.00    0.25
-\end{verbatim}
-
-In such a case one can pre-compute the main terms of the correction
-(e.g. the derivative of the variance-covariance matrix) once for all
-using the \texttt{sCorrect} method (\texttt{sCorrect} stands for Satterthwaite
-correction). When calling \texttt{sCorrect}, the right hand side indicates
-whether the bias correction should be used (equivalent to
-\texttt{bias.correct} argument described previously):
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e2 <- e
-sCorrect(e2) <- TRUE
-\end{lstlisting}
-
-\texttt{sCorrect} automatically store the pre-computed terms in the \texttt{sCorrect}
-slot of the object. It also adds the class \texttt{lvmfit2} to the object:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-class(e2)
-\end{lstlisting}
-
-\begin{verbatim}
-[1] "lvmfit2" "lvmfit"
-\end{verbatim}
-
-Then p-values computed using the small sample correction can be
-obtained calling the \texttt{summary} method, as usual:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e2)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-\end{verbatim}
-
-The \texttt{summary2} methods take approximately the same time as the usual
-\texttt{summary} method:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	summary2(e2)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.19    0.00    0.19
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	summary(e2)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.15    0.00    0.16
-\end{verbatim}
-
-\subsection{Single multivariate Wald test}
-\label{sec:org84008c5}
-
-The function \texttt{compare} from the lava package can be use to perform
-multivariate Wald tests, i.e. to test simultaneously several linear
-combinations of the coefficients.  \texttt{compare} uses a contrast matrix to
-encode in lines which linear combination of coefficients should be
-tested. For instance if we want to simultaneously test whether all the
-mean coefficients are 0, we can create a contrast matrix using
-\texttt{createContrast}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resC <- createContrast(e2, par = c("Y2=0","Y2~eta=0","eta~X1=0"))
-resC
-\end{lstlisting}
-
-\begin{verbatim}
-$contrast
-             Y2 Y3 Y4 eta Y2~eta Y3~eta Y4~eta eta~X1 eta~X2 Y1~~Y1 Y2~~Y2 Y3~~Y3 Y4~~Y4
-[Y2] = 0      1  0  0   0      0      0      0      0      0      0      0      0      0
-[Y2~eta] = 0  0  0  0   0      1      0      0      0      0      0      0      0      0
-[eta~X1] = 0  0  0  0   0      0      0      0      1      0      0      0      0      0
-             eta~~eta
-[Y2] = 0            0
-[Y2~eta] = 0        0
-[eta~X1] = 0        0
-
-$null
-    [Y2] = 0 [Y2~eta] = 0 [eta~X1] = 0 
-           0            0            0 
-
-$Q
-[1] 3
-\end{verbatim}
-
-We can then test the linear hypothesis by specifying in \texttt{compare} the
-left hand side of the hypothesis (argument contrast) and the right
-hand side (argument null):
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest0 <- lava::compare(e2, contrast = resC$contrast, null = resC$null)
-resTest0
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-chisq = 21.332, df = 3, p-value = 8.981e-05
-sample estimates:
-          Estimate   Std.Err       2.5%     97.5%
-[Y2]     0.2335412 0.2448593 -0.2463741 0.7134566
-[Y2~eta] 0.9192847 0.2621248  0.4055295 1.4330399
-[eta~X1] 0.5150072 0.2513393  0.0223912 1.0076231
-\end{verbatim}
-
-\texttt{compare} uses a chi-squared distribution to compute the p-values.
-Similarly to the Gaussian approximation, while being valid
-asymptotically this procedure may not provide a very accurate control
-of the type 1 error rate in small samples. Fortunately, the correction
-proposed for the univariate Wald statistic can be adapted to the
-multivariate Wald statistic. This is achieved by \texttt{compare2}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest1 <- compare2(e2, contrast = resC$contrast, null = resC$null)
-resTest1
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 6.7118, df1 = 3, df2 = 11.1, p-value = 0.007596
-sample estimates:
-              Estimate   Std.Err        df       2.5%     97.5%
-[Y2] = 0     0.2335412 0.2518218 12.328385 -0.3135148 0.7805973
-[Y2~eta] = 0 0.9192847 0.2653220  3.515034  0.1407653 1.6978041
-[eta~X1] = 0 0.5150072 0.2642257 20.044312 -0.0360800 1.0660943
-\end{verbatim}
-
-The same result could have been obtained using the par argument to
-define the linear hypothesis:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest2 <- compare2(e2, par = c("Y2","Y2~eta","eta~X1"))
-identical(resTest1,resTest2)
-\end{lstlisting}
-
-\begin{verbatim}
-[1] TRUE
-\end{verbatim}
-
-Now a F-distribution is used to compute the p-values. As before on can
-set the argument \texttt{bias.correct} to \texttt{FALSE} to use the Satterthwaite
-approximation alone:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest3 <- compare2(e, bias.correct = FALSE, 
-					  contrast = resC$contrast, null = resC$null)
-resTest3
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 7.1107, df1 = 3, df2 = 11.13, p-value = 0.006182
-sample estimates:
-              Estimate   Std.Err       df         2.5%     97.5%
-[Y2] = 0     0.2335412 0.2448593 12.91188 -0.295812256 0.7628948
-[Y2~eta] = 0 0.9192847 0.2621248  3.67464  0.165378080 1.6731913
-[eta~X1] = 0 0.5150072 0.2513393 21.57121 -0.006840023 1.0368543
-\end{verbatim}
-
-In this case the F-statistic of \texttt{compare2} is the same as the
-chi-squared statistic of \texttt{compare} divided by the rank of the contrast matrix:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest0$statistic/qr(resC$contrast)$rank
-\end{lstlisting}
-
-\begin{verbatim}
-   chisq 
-7.110689
-\end{verbatim}
-
-\subsection{Robust Wald tests}
-\label{sec:org67840d2}
-
-When one does not want to assume normality distributed residuals,
-robust standard error can be used instead of the model based standard
-errors. They can be obtain by setting the argument \texttt{robust} to \texttt{TRUE}
-when computing univariate Wald tests:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, robust = TRUE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate robust SE   t-value      P-value       df
-Y2     0.2335412 0.2353245 0.9924222 0.3340117610 18.18841
-Y3     0.5114275 0.1897160 2.6957535 0.0099985389 42.79555
-Y2~eta 0.9192847 0.1791240 5.1321150 0.0002361186 12.19058
-Y3~eta 0.2626930 0.1365520 1.9237585 0.0653095551 26.20919
-eta~X1 0.5150072 0.2167580 2.3759546 0.0315112789 14.74859
-eta~X2 0.6212222 0.2036501 3.0504389 0.0035239307 54.54181
-\end{verbatim}
-
-or multivariate Wald test:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-compare2(e2, robust = TRUE, par = c("Y2","Y2~eta","eta~X1"))
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 12.526, df1 = 3, df2 = 23.97, p-value = 3.981e-05
-sample estimates:
-              Estimate robust SE       df        2.5%     97.5%
-[Y2] = 0     0.2335412 0.2353245 18.18841 -0.26049031 0.7275728
-[Y2~eta] = 0 0.9192847 0.1791240 12.19058  0.52968275 1.3088867
-[eta~X1] = 0 0.5150072 0.2167580 14.74859  0.05231154 0.9777028
-\end{verbatim}
-
-Only the standard error is affected by the argument \texttt{robust}, the
-degrees of freedom are the one of the model-based standard errors.  It
-may be surprising that the (corrected) robust standard errors are (in
-this example) smaller than the (corrected) model-based one. This is
-also the case for the uncorrected one:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-rbind(robust = diag(crossprod(iid(e2))),
-	  model = diag(vcov(e2)))
-\end{lstlisting}
-
-\begin{verbatim}
-               Y2         Y3         Y4        eta     Y2~eta     Y3~eta     Y4~eta
-robust 0.04777252 0.03325435 0.03886706 0.06011727 0.08590732 0.02179453 0.02981895
-model  0.05995606 0.03189389 0.04644303 0.06132384 0.06870941 0.02430412 0.03715633
-           eta~X1     eta~X2    Y1~~Y1    Y2~~Y2     Y3~~Y3     Y4~~Y4  eta~~eta
-robust 0.05166005 0.05709393 0.2795272 0.1078948 0.03769614 0.06923165 0.3198022
-model  0.06317144 0.04489865 0.1754744 0.1600112 0.05112998 0.10152642 0.2320190
-\end{verbatim}
-
-This may be explained by the fact the robust standard error tends to
-be liberal in small samples (e.g. see Kauermann 2001, A Note on the
-Efficiency of Sandwich Covariance Matrix Estimation ).
-
-\subsection{Assessing the type 1 error of the testing procedure}
-\label{sec:orgf5e63db}
-
-The function \texttt{calibrateType1} can be used to assess the type 1 error
-of a Wald statistic on a specific example. This however assumes that
-the estimated model is correctly specified. Let's make an example. For
-this we simulate some data:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-set.seed(10)
-m.generative <- lvm(Y ~ X1 + X2 + Gene)
-categorical(m.generative, labels = c("ss","ll")) <- ~Gene
-d <- lava::sim(m.generative, n = 50, latent = FALSE)
-\end{lstlisting}
-
-Let's now imagine that we want to analyze the relationship between
-Y and Gene using the following dataset:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-head(d)
-\end{lstlisting}
-
-\begin{verbatim}
-            Y         X1         X2 Gene
-1 -1.14369572 -0.4006375 -0.7618043   ss
-2 -0.09943370 -0.3345566  0.4193754   ss
-3 -0.04331996  1.3679540 -1.0399434   ll
-4  2.25017335  2.1377671  0.7115740   ss
-5  0.16715138  0.5058193 -0.6332130   ss
-6  1.73931135  0.7863424  0.5631747   ss
-\end{verbatim}
-
-For this we fit define a LVM:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-myModel <- lvm(Y ~ X1 + X2 + Gene)
-\end{lstlisting}
-
-and estimate the coefficients of the model using \texttt{estimate}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e <- estimate(myModel, data = d)
-e
-\end{lstlisting}
-
-\begin{verbatim}
-                    Estimate Std. Error  Z-value  P-value
-Regressions:                                             
-   Y~X1              1.02349    0.12017  8.51728   <1e-12
-   Y~X2              0.91519    0.12380  7.39244   <1e-12
-   Y~Genell          0.48035    0.23991  2.00224  0.04526
-Intercepts:                                              
-   Y                -0.11221    0.15773 -0.71141   0.4768
-Residual Variances:                                      
-   Y                 0.67073    0.13415  5.00000
-\end{verbatim}
-
-We can now use \texttt{calibrateType1} to perform a simulation study. We just
-need to define the null hypotheses (i.e. which coefficients should be
-set to 0 when generating the data) and the number of simulations:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mySimulation <- calibrateType1(e, 
-							   param = "Y~Genell",
-							   n.rep = 50, 
-							   trace = FALSE, seed = 10)
-\end{lstlisting}
-
-To save time we only make 50 simulations but much more are necessary
-to really assess the type 1 error rate. Then we can use the \texttt{summary}
-method to display the results:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(mySimulation)
-\end{lstlisting}
-
-\begin{verbatim}
-Estimated type 1 error rate [95% confidence interval] 
-  > sample size: 50 | number of simulations: 50
-     link statistic correction type1error                  CI
- Y~Genell      Wald       Gaus       0.12 [0.05492 ; 0.24242]
-                          Satt       0.10 [0.04224 ; 0.21869]
-                           SSC       0.10 [0.04224 ; 0.21869]
-                    SSC + Satt       0.08 [0.03035 ; 0.19456]
-
-Corrections: Gaus = Gaussian approximation 
-             SSC  = small sample correction 
-             Satt = Satterthwaite approximation
-\end{verbatim}
-
-
-\clearpage
-
-\section{Adjustment for multiple comparisons}
-\label{sec:org4823f66}
-\subsection{Univariate Wald test, single model}
-\label{sec:org4c37542}
-
-When performing multiple testing, adjustment for multiple comparisons
-is necessary in order to control the type 1 error rate, i.e. to
-provide interpretable p.values. The \textbf{multcomp} package enables to do
-such adjustment when all tests comes from the same \texttt{lvmfit} object:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y ~ 0.25 * X1 + 0.3 * X2 + 0.35 * X3 + 0.4 * X4 + 0.45 * X5 + 0.5 * X6)
-set.seed(10)
-df.data <- sim(mSim, n = 4e1)
-
-## fit lvm
-e.lvm <- estimate(lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-name.coef <- names(coef(e.lvm))
-n.coef <- length(name.coef)
-
-## Create contrast matrix
-resC <- createContrast(e.lvm, par = paste0("Y~X",1:6), rowname.rhs = FALSE)
-resC$contrast
-\end{lstlisting}
-
-\begin{verbatim}
-     Y Y~X1 Y~X2 Y~X3 Y~X4 Y~X5 Y~X6 Y~~Y
-Y~X1 0    1    0    0    0    0    0    0
-Y~X2 0    0    1    0    0    0    0    0
-Y~X3 0    0    0    1    0    0    0    0
-Y~X4 0    0    0    0    1    0    0    0
-Y~X5 0    0    0    0    0    1    0    0
-Y~X6 0    0    0    0    0    0    1    0
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e.glht <- multcomp::glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht)
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error z value Pr(>|z|)   
-Y~X1 == 0   0.3270     0.1589   2.058  0.20725   
-Y~X2 == 0   0.4025     0.1596   2.523  0.06611 . 
-Y~X3 == 0   0.5072     0.1383   3.669  0.00144 **
-Y~X4 == 0   0.3161     0.1662   1.902  0.28582   
-Y~X5 == 0   0.3875     0.1498   2.586  0.05554 . 
-Y~X6 == 0   0.3758     0.1314   2.859  0.02482 * 
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-Note that this correction relies on the Gaussian approximation. To use
-small sample corrections implemented in \textbf{lavaSearch2}, just call
-\texttt{glht2} instead of \texttt{glht}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e.glht2 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht2)
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.3290  
-Y~X2 == 0   0.4025     0.1757   2.291   0.1482  
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.4128  
-Y~X5 == 0   0.3875     0.1650   2.349   0.1315  
-Y~X6 == 0   0.3758     0.1447   2.597   0.0762 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-The single step method is the appropriate correction when one wants to
-report the most significant p-value relative to a set of
-hypotheses. If the second most significant p-value is also to be
-reported then the method "free" is more efficient:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(e.glht2, test = multcomp::adjusted("free"))
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.1291  
-Y~X2 == 0   0.4025     0.1757   2.291   0.0913 .
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.1291  
-Y~X5 == 0   0.3875     0.1650   2.349   0.0913 .
-Y~X6 == 0   0.3758     0.1447   2.597   0.0645 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- free method)
-\end{verbatim}
-
-See the book: "Multiple Comparisons Using R" by Frank Bretz, Torsten
-Hothorn, and Peter Westfall (2011, CRC Press) for details about the
-theory underlying the \textbf{multcomp} package.
-
-\subsection{Univariate Wald test, multiple models}
-\label{sec:orgec7907f}
-
-Pipper et al. in "A Versatile Method for Confirmatory Evaluation of
-the Effects of a Covariate in Multiple Models" (2012, Journal of the
-Royal Statistical Society, Series C) developed a method to assess the
-effect of an exposure on several outcomes when a different model is
-fitted for each outcome. This method has been implemented in the \texttt{mmm}
-function from the \textbf{multcomp} package for glm and Cox
-models. \textbf{lavaSearch2} extends it to \texttt{lvm}. 
-
-Let's consider an example where we wish to assess the treatment effect
-on three outcomes X, Y, and Z. We have at hand three measurements
-relative to outcome Z for each individual:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mSim <- lvm(X ~ Age + 0.5*Treatment,
-			Y ~ Gender + 0.25*Treatment,
-			c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
-			Age[40:5]~1)
-latent(mSim) <- ~eta
-categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
-categorical(mSim, labels = c("male","female")) <- ~Gender
-
-n <- 5e1
-set.seed(10)
-df.data <- sim(mSim, n = n, latent = FALSE)
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-         X      Age Treatment          Y Gender         Z1         Z2          Z3
-1 39.12289 39.10415   placebo  0.6088958 female  1.8714112  2.2960633 -0.09326935
-2 39.56766 39.25191      SSRI  1.0001325 female  0.9709943  0.6296226  1.31035910
-3 41.68751 43.05884   placebo  2.1551047 female -1.1634011 -0.3332927 -1.30769267
-4 44.68102 44.78019      SSRI  0.3852728 female -1.0305476  0.6678775  0.99780139
-5 41.42559 41.13105   placebo -0.8666783   male -1.6342816 -0.8285492  1.20450488
-6 42.64811 41.75832      SSRI -1.0710170 female -1.2198019 -1.9602130 -1.85472132
-   treatment
-1  1.1639675
-2 -1.5233846
-3 -2.5183351
-4 -0.7075292
-5 -0.2874329
-6 -0.4353083
-\end{verbatim}
-
-We fit a model specific to each outcome:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-lmX <- lm(X ~ Age + Treatment, data = df.data)
-lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
-lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ 1*eta, eta ~ -1 + Treatment), 
-				 data = df.data)
-\end{lstlisting}
-
-and combine them into a list of \texttt{lvmfit} objects:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mmm.lvm <- multcomp::mmm(X = lmX, Y = lvmY, Z = lvmZ)
-\end{lstlisting}
-
-We can then generate a contrast matrix to test each coefficient
-related to the treatment:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resC <- createContrast(mmm.lvm, var.test = "Treatment", add.variance = TRUE)
-resC$contrast
-\end{lstlisting}
-
-\begin{verbatim}
-                     X: (Intercept) X: Age X: TreatmentSSRI X: sigma2 Y: Y
-X: TreatmentSSRI                  0      0                1         0    0
-Y: Y~TreatmentSSRI                0      0                0         0    0
-Z: eta~TreatmentSSRI              0      0                0         0    0
-                     Y: Y~Genderfemale Y: Y~TreatmentSSRI Y: Y~~Y Z: Z1 Z: Z2 Z: Z3
-X: TreatmentSSRI                     0                  0       0     0     0     0
-Y: Y~TreatmentSSRI                   0                  1       0     0     0     0
-Z: eta~TreatmentSSRI                 0                  0       0     0     0     0
-                     Z: eta~TreatmentSSRI Z: Z1~~Z1 Z: Z2~~Z2 Z: Z3~~Z3 Z: eta~~eta
-X: TreatmentSSRI                        0         0         0         0           0
-Y: Y~TreatmentSSRI                      0         0         0         0           0
-Z: eta~TreatmentSSRI                    1         0         0         0           0
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-lvm.glht2 <- glht2(mmm.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(lvm.glht2)
-\end{lstlisting}
-
-\begin{verbatim}
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Linear Hypotheses:
-                          Estimate Std. Error t value Pr(>|t|)
-X: TreatmentSSRI == 0       0.4661     0.2533   1.840    0.187
-Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074    0.117
-Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407    0.393
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-This can be compared to the unadjusted p.values:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(lvm.glht2, test = multcomp::univariate())
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Linear Hypotheses:
-                          Estimate Std. Error t value Pr(>|t|)  
-X: TreatmentSSRI == 0       0.4661     0.2533   1.840   0.0720 .
-Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074   0.0435 *
-Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407   0.1659  
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Univariate p values reported)
-\end{verbatim}
-
-
-\clearpage 
-
-\section{Model diagnostic}
-\label{sec:org1c04a31}
-\subsection{Detection of local dependencies}
-\label{sec:org6d7e4f5}
-
-The \texttt{modelsearch} function of \textbf{lava} is a diagnostic tool for latent
-variable models. It enables to search for local dependencies
-(i.e. model misspecification) and add them to the model. Obviously it
-is a data-driven procedure and its usefulness can be discussed,
-especially in small samples:
-\begin{itemize}
-\item the procedure is instable, i.e. is likely to lead to two different
-models when applied on two different dataset sampled from the same
-generative model.
-\item it is hard to define a meaningful significance threshold since
-p-values should be adjusted for multiple comparisons and sequential
-testing. However traditional methods like Bonferroni-Holm tend to
-over corrected and therefore reduce the power of the procedure since
-they assume that the test are independent.
-\end{itemize}
-
-The function \texttt{modelsearch2} in \textbf{lavaSearch2} partially solves the
-second issue by adjusting the p-values for multiple testing. Let's see
-an example:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(c(y1,y2,y3)~u, u~x1+x2)
-latent(mSim) <- ~u
-covariance(mSim) <- y2~y3
-transform(mSim, Id~u) <- function(x){1:NROW(x)}
-set.seed(10)
-df.data <- lava::sim(mSim, n = 125, latent = FALSE)
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-          y1           y2         y3         x1         x2 Id
-1  5.5071523  4.883752014  6.2928016  0.8694750  2.3991549  1
-2 -0.6398644  0.025832617  0.5088030 -0.6800096 -0.0898721  2
-3 -2.5835495 -2.616715027 -2.8982645  0.1732145 -0.8216484  3
-4 -2.5312637 -2.518185427 -2.9015033 -0.1594380 -0.2869618  4
-5  1.6346220 -0.001877577  0.3705181  0.7934994  0.1312789  5
-6  0.4939972  1.759884014  1.5010499  1.6943505 -1.0620840  6
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## fit model
-m <- lvm(c(y1,y2,y3)~u, u~x1)
-latent(m) <- ~u
-addvar(m) <- ~x2 
-e.lvm <- estimate(m, data = df.data)
-\end{lstlisting}
-
-\texttt{modelsearch2} can be used to sequentially apply the \texttt{modelsearch}
-function with a given correction for the p.values:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resScore <- modelsearch2(e.lvm, alpha = 0.1, trace = FALSE)
-displayScore <- summary(resScore)
-\end{lstlisting}
-
-\begin{verbatim}
-Sequential search for local dependence using the score statistic 
-The variable selection procedure retained 2 variables:
-    link statistic      p.value adjusted.p.value dp.Info selected nTests
-1   u~x2  6.036264 1.577228e-09     5.008615e-08       1     TRUE     10
-2 y2~~y3  2.629176 8.559198e-03     6.055947e-02       1     TRUE      9
-3  y3~x1  1.770997 7.656118e-02     2.814424e-01       1    FALSE      8
-Confidence level: 0.9 (two sided, adjustement: fastmax)
-\end{verbatim}
-
-This indeed matches the highest score statistic found by
-\texttt{modelsearch}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resScore0 <- modelsearch(e.lvm, silent = TRUE)
-c(statistic = sqrt(max(resScore0$test[,"Test Statistic"])), 
-  p.value = min(resScore0$test[,"P-value"]))
-\end{lstlisting}
-
-\begin{verbatim}
-   statistic      p.value 
-6.036264e+00 1.577228e-09
-\end{verbatim}
-
-We can compare the adjustment using the max distribution to bonferroni:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-data.frame(link = displayScore$table[,"link"],
-		   none = displayScore$table[,"p.value"],
-		   bonferroni = displayScore$table[,"p.value"]*displayScore$table[1,"nTests"],
-		   max = displayScore$table[,"adjusted.p.value"])
-\end{lstlisting}
-
-\begin{verbatim}
-    link         none   bonferroni          max
-1   u~x2 1.577228e-09 1.577228e-08 5.008615e-08
-2 y2~~y3 8.559198e-03 8.559198e-02 6.055947e-02
-3  y3~x1 7.656118e-02 7.656118e-01 2.814424e-01
-\end{verbatim}
-
-In theory, the correction based on the max statistic should give a p
-value that is smaller or equal than the p value adjusted using
-Bonferroni. However for for very small p-values, the max-correction
-can be numerically inaccurate and result in p-values that are slightly
-larger. The evolution of the estimation of a given coefficient across
-the sequential search can be displayed using \texttt{autoplot}:
-
-\begin{center}
-\includegraphics[width=.9\linewidth]{./modelsearch.png}
-\end{center}
-
-In many cases, all links are not plausible so the user should
-indicates which links should be investigated by \texttt{modelsearch2}. This
-can be done via the argument \texttt{link}:
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resRed <- modelsearch2(e.lvm, link = c("y1~~y2","y1~~y3","y2~~y3"), trace = FALSE)
-print(resRed)
-\end{lstlisting}
-
-\begin{verbatim}
-Sequential search for local dependence using the score statistic 
-The variable selection procedure did not retain any variable 
-    link statistic    p.value adjusted.p.value dp.Info selected nTests
-1 y1~~y3  1.754102 0.07941299        0.1818963       1    FALSE      3
-Confidence level: 0.95 (two sided, adjustement: fastmax)
-\end{verbatim}
-
-The function \texttt{findNewLink} can help the user to identify the set of
-relevant links:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-findNewLink(e.lvm$model, type = "covariance")$link
-\end{lstlisting}
-
-\begin{verbatim}
-[1] "y1~~y2" "y1~~y3" "y2~~y3"
-\end{verbatim}
-
-\subsection{Checking that the names of the variables in the model match those of the data}
-\label{sec:org30549f2}
-
-When estimating latent variable models using \textbf{lava}, it sometimes
-happens that the model does not converge:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-set.seed(10)
-df.data <- sim(lvm(Y~X1+X2), 1e2)
-
-## fit model
-mWrong <- lvm(Y ~ X + X2)
-eWrong <- estimate(mWrong, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Warning messages:
-1: In estimate.lvm(mWrong, data = df.data) :
-  Lack of convergence. Increase number of iteration or change starting values.
-2: In sqrt(diag(asVar)) : NaNs produced
-\end{verbatim}
-
-This can have several reasons:
-\begin{itemize}
-\item the model is not identifiable.
-\item the optimization routine did not managed to find a local
-optimum. This may happen for complex latent variable model where the
-objective function is not convex or locally convex.
-\item the user has made a mistake when defining the model or has not given
-the appropriate dataset.
-\end{itemize}
-
-The \texttt{checkData} function enables to check the last point. It compares
-the observed variables defined in the model and the one given by the
-dataset. In case of mismatch it returns a message:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-checkData(mWrong, df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Missing variable in data: X
-\end{verbatim}
-
-In presence of latent variables, the user needs to explicitely define
-them in the model, otherwise \texttt{checkData} will identify them as an
-issue:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-set.seed(10)
-mSim <- lvm(c(Y1,Y2,Y3)~eta)
-latent(mSim) <- ~eta
-df.data <- sim(mSim, n = 1e2, latent = FALSE)
-
-## fit model
-m <- lvm(c(Y1,Y2,Y3)~eta)
-checkData(m, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Missing variable in data: eta
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-latent(m) <- ~eta
-checkData(m, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-No issue detected
-\end{verbatim}
-
-
-\clearpage
-
-\section{Information about the R session used for this document}
-\label{sec:orgae25241}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-sessionInfo()
-\end{lstlisting}
-
-\begin{verbatim}
-R version 3.5.1 (2018-07-02)
-Platform: x86_64-w64-mingw32/x64 (64-bit)
-Running under: Windows 7 x64 (build 7601) Service Pack 1
-
-Matrix products: default
-
-locale:
-[1] LC_COLLATE=Danish_Denmark.1252  LC_CTYPE=Danish_Denmark.1252   
-[3] LC_MONETARY=Danish_Denmark.1252 LC_NUMERIC=C                   
-[5] LC_TIME=Danish_Denmark.1252    
-
-attached base packages:
-[1] stats     graphics  grDevices utils     datasets  methods   base     
-
-other attached packages:
-[1] lavaSearch2_1.5.1 lava_1.6.4        ggplot2_3.1.0    
-
-loaded via a namespace (and not attached):
- [1] Rcpp_1.0.0        pillar_1.3.1      compiler_3.5.1    plyr_1.8.4       
- [5] bindr_0.1.1       tools_3.5.1       tibble_2.0.1      gtable_0.2.0     
- [9] lattice_0.20-35   pkgconfig_2.0.2   rlang_0.3.1       Matrix_1.2-14    
-[13] parallel_3.5.1    mvtnorm_1.0-8     bindrcpp_0.2.2    withr_2.1.2      
-[17] dplyr_0.7.8       stringr_1.3.1     grid_3.5.1        tidyselect_0.2.5 
-[21] glue_1.3.0        R6_2.3.0          survival_2.42-6   multcomp_1.4-8   
-[25] TH.data_1.0-9     purrr_0.3.0       reshape2_1.4.3    magrittr_1.5     
-[29] scales_1.0.0      codetools_0.2-15  MASS_7.3-50       splines_3.5.1    
-[33] assertthat_0.2.0  colorspace_1.3-2  numDeriv_2016.8-1 labeling_0.3     
-[37] sandwich_2.5-0    stringi_1.2.4     lazyeval_0.2.1    munsell_0.5.0    
-[41] crayon_1.3.4      zoo_1.8-4
-\end{verbatim}
+% Created 2023-04-11 Tue 22:50
+% Intended LaTeX compiler: pdflatex
+\documentclass[12pt]{article}
+
+%%%% settings when exporting code %%%% 
+
+\usepackage{listings}
+\lstdefinestyle{code-small}{
+backgroundcolor=\color{white}, % background color for the code block
+basicstyle=\ttfamily\small, % font used to display the code
+commentstyle=\color[rgb]{0.5,0,0.5}, % color used to display comments in the code
+keywordstyle=\color{black}, % color used to highlight certain words in the code
+numberstyle=\ttfamily\tiny\color{gray}, % color used to display the line numbers
+rulecolor=\color{black}, % color of the frame
+stringstyle=\color[rgb]{0,.5,0},  % color used to display strings in the code
+breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace
+breaklines=true, % sets automatic line breaking
+columns=fullflexible,
+frame=single, % adds a frame around the code (non,leftline,topline,bottomline,lines,single,shadowbox)
+keepspaces=true, % % keeps spaces in text, useful for keeping indentation of code
+literate={~}{$\sim$}{1}, % symbol properly display via latex
+numbers=none, % where to put the line-numbers; possible values are (none, left, right)
+numbersep=10pt, % how far the line-numbers are from the code
+showspaces=false,
+showstringspaces=false,
+stepnumber=1, % the step between two line-numbers. If it's 1, each line will be numbered
+tabsize=1,
+xleftmargin=0cm,
+emph={anova,apply,class,coef,colnames,colNames,colSums,dim,dcast,for,ggplot,head,if,ifelse,is.na,lapply,list.files,library,logLik,melt,plot,require,rowSums,sapply,setcolorder,setkey,str,summary,tapply},
+aboveskip = \medskipamount, % define the space above displayed listings.
+belowskip = \medskipamount, % define the space above displayed listings.
+lineskip = 0pt} % specifies additional space between lines in listings
+\lstset{style=code-small}
+%%%% packages %%%%%
+
+\usepackage[utf8]{inputenc}
+\usepackage[T1]{fontenc}
+\usepackage{lmodern}
+\usepackage{textcomp}
+\usepackage{color}
+\usepackage{graphicx}
+\usepackage{grffile}
+\usepackage{wrapfig}
+\usepackage{rotating}
+\usepackage{longtable}
+\usepackage{multirow}
+\usepackage{multicol}
+\usepackage{changes}
+\usepackage{pdflscape}
+\usepackage{geometry}
+\usepackage[normalem]{ulem}
+\usepackage{amssymb}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+\usepackage{dsfont}
+\usepackage{array}
+\usepackage{ifthen}
+\usepackage{hyperref}
+\usepackage{natbib}
+%\VignetteIndexEntry{overview}
+%\VignetteEngine{R.rsp::tex}
+%\VignetteKeyword{R}
+\RequirePackage{fancyvrb}
+\DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\small,formatcom = {\color[rgb]{0.5,0,0}}}
+\geometry{a4paper, left=15mm, right=15mm}
+\RequirePackage{colortbl} % arrayrulecolor to mix colors
+\RequirePackage{setspace} % to modify the space between lines - incompatible with footnote in beamer
+\usepackage{authblk} % enable several affiliations (clash with beamer)
+\renewcommand{\baselinestretch}{1.1}
+\geometry{top=1cm}
+\usepackage{enumitem}
+\RequirePackage{xspace} %
+\newcommand\Rlogo{\textbf{\textsf{R}}\xspace} %
+\RequirePackage{epstopdf} % to be able to convert .eps to .pdf image files
+\author{Brice Ozenne}
+\date{\today}
+\title{Overview of the functionalities of the package lavaSearch2}
+\hypersetup{
+ colorlinks=true,
+ pdfauthor={Brice Ozenne},
+ pdftitle={Overview of the functionalities of the package lavaSearch2},
+ pdfkeywords={},
+ pdfsubject={},
+ pdfcreator={Emacs 26.3 (Org mode 9.4.6)},
+ pdflang={English}
+ }
+\begin{document}
+
+\maketitle
+Load \textbf{lavaSearch2} in the R session:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+library(lavaSearch2)
+\end{lstlisting}
+
+\section{Inference}
+\label{sec:orgb046af1}
+\subsection{Introductory example}
+\label{sec:org0d7082d}
+You may have noticed that for simple linear regression, the p-values
+of the Wald tests from \texttt{lm}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+mSim <- lvm(Y[1:1]~0.3*X1+0.2*X2)
+set.seed(10)
+df.data <- sim(mSim, 2e1)
+
+## fit linear model
+summary(lm(Y~X1+X2, data = df.data))$coef
+\end{lstlisting}
+
+\begin{verbatim}
+             Estimate Std. Error   t value    Pr(>|t|)
+(Intercept) 0.7967775  0.2506767 3.1785069 0.005495832
+X1          0.1550938  0.2205080 0.7033477 0.491360483
+X2          0.4581556  0.2196785 2.0855736 0.052401103
+\end{verbatim}
+
+
+differ from those obtained with the corresponding latent variable
+model estimated by maximum likelihood:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## fit latent variable model
+m <- lvm(Y~X1+X2)
+e <- estimate(m, data = df.data)
+
+## extract Wald tests
+summary(e)$coef
+\end{lstlisting}
+
+\begin{verbatim}
+      Estimate Std. Error   Z-value      P-value
+Y~X1 0.1550938  0.2032984 0.7628877 0.4455303456
+Y~X2 0.4581556  0.2025335 2.2621221 0.0236898575
+Y~~Y 0.5557910  0.1757566 3.1622777           NA
+Y    0.7967775  0.2311125 3.4475747 0.0005656439
+\end{verbatim}
+
+
+For instance, the p-value for the effect of X2 is 0.024 in the latent
+variable model and 0.052 in the linear regression. The discrepancy is
+due to 2 corrections that \texttt{lm} applies in order to improve the control
+of the type 1 error of the Wald tests:
+\begin{itemize}
+\item use of a Student \(t\)-distribution instead of a Gaussian
+distribution (informally using a t-value instead of z-value).
+\item use of an unbiased estimator of the residuals variance instead of
+the ML-estimator.  \textbf{lavaSearch2} attempts to generalize these
+\end{itemize}
+corrections to models with correlated and heteroschedastic
+measurements. In the case of a simple linear regression, Wald tests
+obtained with \textbf{lavaSearch2} match almost exactly those of \texttt{lm}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary2(e)$coef
+\end{lstlisting}
+
+\begin{verbatim}
+      estimate        se statistic    df     p.value
+Y    0.7967775 0.2506766 3.1785073 17.00 0.005495827
+Y~X1 0.1550938 0.2205080 0.7033478 17.00 0.491360428
+Y~X2 0.4581556 0.2196784 2.0855738 17.00 0.052401076
+Y~~Y 0.6538716 0.2242761        NA  4.25          NA
+\end{verbatim}
+
+\subsection{How it works in a nutshell}
+\label{sec:org9da7fb3}
+
+When using \textbf{lava}, the p.values that are obtained from the summary
+(Wald tests) rely on a Gaussian approximation and maximum likelihood
+estimation. While being asymptotically valid, they usually do not
+provide a very accurate control of the type 1 error rate in small
+samples. Simulations have shown that the type 1 error rate tends to be
+too large, i.e. the p.values are have a downward bias. \textbf{lavaSearch2}
+provides two improvements:
+\begin{itemize}
+\item using a Student's \(t\)-distribution instead of a Gaussian
+distribution to account for the uncertainty on the variance of the
+coefficients. The degrees of freedom are estimated using Satterwaite
+approximation, i.e. identifying the chi-squared distribution that
+best fit the observed moments of the variance of the coefficients.
+\item (partially) correcting for the first order bias in the ML estimates
+of the variance parameters. This correction also affects the
+standard error of the estimates.
+\end{itemize}
+
+\subsection{Single univariate Wald test}
+\label{sec:org00a24b1}
+
+We will illustrate the functionalities using a simulated dataset:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+mSim <- lvm(Y1~eta,Y2~eta,Y3~0.4+0.4*eta,Y4~0.6+0.6*eta,eta~0.5*X1+0.7*X2)
+latent(mSim) <- ~eta
+set.seed(12)
+df.data <- sim(mSim, n = 3e1, latent = FALSE)
+
+## display
+head(df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+          Y1         Y2          Y3         Y4         X1         X2
+1 -1.7606233  0.1264910  0.66442611  0.2579355  0.2523400 -1.5431527
+2  3.0459417  2.4631929  0.00283511  2.1714802  0.6423143 -1.3206009
+3 -2.1443162 -0.3318033  0.82253070  0.3008415 -0.3469361 -0.6758215
+4 -2.5050328 -1.3878987 -0.10474850 -1.7814956 -0.5152632 -0.3670054
+5 -2.5307249  0.3012422  1.22046986 -1.0195188  0.3981689 -0.5138722
+6 -0.9521366  0.1669496 -0.21422548  1.5954456  0.9535572 -0.9592540
+\end{verbatim}
+
+
+We first fit the latent variable model using, as usual, the \texttt{estimate}
+function:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+m <- lvm(c(Y1,Y2,Y3,Y4)~eta, eta~X1+X2)
+e <- estimate(m, data = df.data)
+\end{lstlisting}
+
+We can extract the Wald tests based on the traditional approach using
+\texttt{summary}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+\end{lstlisting}
+
+\begin{verbatim}
+        Estimate Std. Error   Z-value      P-value
+Y2     0.2335412  0.2448593 0.9537775 0.3401962906
+Y3     0.5114275  0.1785886 2.8637186 0.0041869974
+Y2~eta 0.9192847  0.2621248 3.5070497 0.0004531045
+Y3~eta 0.2626930  0.1558978 1.6850339 0.0919820326
+eta~X1 0.5150072  0.2513393 2.0490515 0.0404570768
+eta~X2 0.6212222  0.2118930 2.9317729 0.0033703310
+\end{verbatim}
+
+
+As explain at the begining of this section, \textbf{lavaSearch2} implements
+two corrections that can be directly applied by calling the \texttt{summary2}
+method:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary2(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+\end{lstlisting}
+
+\begin{verbatim}
+        estimate        se statistic        df     p.value
+Y2     0.2335412 0.2518218 0.9274067 12.332567 0.371510180
+Y3     0.5114275 0.1828716 2.7966475 24.693254 0.009851893
+Y2~eta 0.9192847 0.2653220 3.4647887  3.518708 0.031533355
+Y3~eta 0.2626930 0.1562776 1.6809386  5.953880 0.144155715
+eta~X1 0.5150072 0.2642257 1.9491180 20.047646 0.065412240
+eta~X2 0.6212222 0.2221293 2.7966698 27.739008 0.009272041
+\end{verbatim}
+
+
+To use the Satterthwaite correction alone, set the argument
+  \texttt{ssc} to \texttt{FALSE}:
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary2(e, ssc = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+\end{lstlisting}
+
+\begin{verbatim}
+        estimate        se statistic        df     p.value
+Y2     0.2335412 0.2448593 0.9537775 12.911877 0.357711941
+Y3     0.5114275 0.1785886 2.8637186 25.780552 0.008210968
+Y2~eta 0.9192847 0.2621248 3.5070497  3.674640 0.028396459
+Y3~eta 0.2626930 0.1558978 1.6850339  6.222912 0.141185621
+eta~X1 0.5150072 0.2513393 2.0490515 21.571210 0.052814794
+eta~X2 0.6212222 0.2118930 2.9317729 30.370334 0.006351686
+\end{verbatim}
+
+
+When using the Satterthwaite correction alone, the standard error are
+left unchanged compared to the original lava output. The only change
+is how the p-values are computed, i.e. based on the quantiles of a
+Student's \(t\)-distribution instead of a Gaussian distribution. 
+
+To only use the bias correction, set the argument \texttt{df} to \texttt{FALSE}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary2(e, df = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+\end{lstlisting}
+
+\begin{verbatim}
+        estimate        se statistic  df      p.value
+Y2     0.2335412 0.2518218 0.9274067 Inf 0.3537154044
+Y3     0.5114275 0.1828716 2.7966475 Inf 0.0051635832
+Y2~eta 0.9192847 0.2653220 3.4647887 Inf 0.0005306482
+Y3~eta 0.2626930 0.1562776 1.6809386 Inf 0.0927748494
+eta~X1 0.5150072 0.2642257 1.9491180 Inf 0.0512813393
+eta~X2 0.6212222 0.2221293 2.7966698 Inf 0.0051632271
+\end{verbatim}
+
+\subsection{Saving computation time with \texttt{estimate2}}
+\label{sec:orgaa5d2f3}
+For each call to \texttt{summary2} the small sample size correction(s) will
+be recalculated. However the calculation of the sample correction(s)
+can be time consuming.
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+system.time(
+    res <- summary2(e, ssc = FALSE)
+)
+\end{lstlisting}
+
+\begin{verbatim}
+ user  system elapsed 
+0.128   0.000   0.129
+\end{verbatim}
+
+
+In such a case one can pre-compute the main terms of the correction
+(e.g. the derivative of the variance-covariance matrix) once for all
+using the \texttt{estimate2} method:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+e2 <- estimate2(e)
+\end{lstlisting}
+
+\texttt{estimate2} automatically store the pre-computed terms in the
+\texttt{sCorrect} slot of the object. It also adds the class \texttt{lvmfit2} to the
+object:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+class(e2)
+\end{lstlisting}
+
+\begin{verbatim}
+[1] "lvmfit2" "lvmfit"
+\end{verbatim}
+
+
+Calling the  \texttt{summary} methods is now much faster:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+system.time(
+    summary(e2)
+)
+\end{lstlisting}
+
+\begin{verbatim}
+ user  system elapsed 
+0.027   0.000   0.026
+\end{verbatim}
+
+\subsection{Single multivariate Wald test}
+\label{sec:org6c4b2cc}
+
+The function \texttt{compare} from the lava package can be use to perform
+multivariate Wald tests, i.e. to test simultaneously several linear
+combinations of the coefficients. We can test the linear hypothesis by
+specifying in \texttt{compare} the parameters we would like to test:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resTest0 <- lava::compare(e, par = c("Y2","Y2~eta","eta~X1"))
+resTest0
+\end{lstlisting}
+
+\begin{verbatim}
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+chisq = 21.332, df = 3, p-value = 8.981e-05
+sample estimates:
+          Estimate   Std.Err       2.5%     97.5%
+[Y2]     0.2335412 0.2448593 -0.2463741 0.7134566
+[Y2~eta] 0.9192847 0.2621248  0.4055295 1.4330399
+[eta~X1] 0.5150072 0.2513393  0.0223912 1.0076231
+\end{verbatim}
+
+\texttt{compare} uses a chi-squared distribution to compute the p-values.
+Similarly to the Gaussian approximation, while being valid
+asymptotically this procedure may not provide a very accurate control
+of the type 1 error rate in small samples. Fortunately, the correction
+proposed for the univariate Wald statistic can be adapted to the
+multivariate Wald statistic. This is achieved by \texttt{compare2}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resTest1 <- compare2(e, linfct = c("Y2","Y2~eta","eta~X1"))
+resTest1
+\end{lstlisting}
+
+\begin{verbatim}
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 6.7118, df1 = 3, df2 = 11.11, p-value = 0.007577
+sample estimates:
+          Estimate   Std.Err        df        2.5%     97.5%
+[Y2]     0.2335412 0.2518218 12.332567 -0.31349486 0.7805774
+[Y2~eta] 0.9192847 0.2653220  3.518708  0.14114161 1.6974278
+[eta~X1] 0.5150072 0.2642257 20.047646 -0.03607414 1.0660884
+\end{verbatim}
+
+The same result could have been obtained by first defining a contrast
+matrix to encode (by rows) which linear combination of coefficients
+should be tested, e.g.:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resC <- createContrast(e, linfct = c("Y2=0","Y2~eta=0","eta~X1=0"))
+resC$contrast
+\end{lstlisting}
+
+\begin{verbatim}
+             Y2 Y3 Y4 eta Y2~eta Y3~eta Y4~eta eta~X1 eta~X2 Y1~~Y1 Y2~~Y2 Y3~~Y3 Y4~~Y4
+[Y2] = 0      1  0  0   0      0      0      0      0      0      0      0      0      0
+[Y2~eta] = 0  0  0  0   0      1      0      0      0      0      0      0      0      0
+[eta~X1] = 0  0  0  0   0      0      0      0      1      0      0      0      0      0
+             eta~~eta
+[Y2] = 0            0
+[Y2~eta] = 0        0
+[eta~X1] = 0        0
+\end{verbatim}
+
+
+and passing it to the argument \texttt{linfct}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resTest2 <- compare2(e2, linfct = resC$contrast)
+identical(resTest1,resTest2)
+\end{lstlisting}
+
+\begin{verbatim}
+[1] TRUE
+\end{verbatim}
+
+
+Now a F-distribution is used to compute the p-values. As before on can
+set the argument \texttt{ssc} to \texttt{FALSE} to use the Satterthwaite
+approximation alone:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resTest3 <- compare2(e, ssc = FALSE, linfct = resC$contrast)
+resTest3
+\end{lstlisting}
+
+\begin{verbatim}
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 7.1107, df1 = 3, df2 = 11.13, p-value = 0.006182
+sample estimates:
+          Estimate   Std.Err       df         2.5%     97.5%
+[Y2]     0.2335412 0.2448593 12.91188 -0.295812256 0.7628948
+[Y2~eta] 0.9192847 0.2621248  3.67464  0.165378080 1.6731913
+[eta~X1] 0.5150072 0.2513393 21.57121 -0.006840023 1.0368543
+\end{verbatim}
+
+In this case the F-statistic of \texttt{compare2} is the same as the
+chi-squared statistic of \texttt{compare} divided by the rank of the contrast matrix:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resTest0$statistic/qr(resC$contrast)$rank
+\end{lstlisting}
+
+\begin{verbatim}
+   chisq 
+7.110689
+\end{verbatim}
+
+\subsection{Robust Wald tests}
+\label{sec:orgf3ea70d}
+
+When one does not want to assume normality distributed residuals,
+robust standard error can be used instead of the model based standard
+errors. They can be obtained by setting the argument \texttt{robust} to \texttt{TRUE}
+when computing univariate Wald tests:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary2(e, robust = TRUE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
+\end{lstlisting}
+
+\begin{verbatim}
+        estimate robust SE statistic        df     p.value
+Y2     0.2335412 0.2353245 0.9924222 12.332567 0.340064859
+Y3     0.5114275 0.1897160 2.6957534 24.693254 0.012453535
+Y2~eta 0.9192847 0.1791240 5.1321143  3.518708 0.009583913
+Y3~eta 0.2626930 0.1365520 1.9237580  5.953880 0.103104593
+eta~X1 0.5150072 0.2167580 2.3759546 20.047646 0.027583693
+eta~X2 0.6212222 0.2036501 3.0504385 27.739008 0.004986632
+\end{verbatim}
+
+
+By default the degrees of freedom of the modeled based variance is
+used. Degrees of freedom can be computed via a Satterthwaite
+approximation using \texttt{lava.options(df.robust=2)}. However it is not
+recommended as the resulting degrees of freedom showed a strange
+behavior. Multivariate Wald test can be obtained in a similar way
+using the \texttt{compare2} method:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+compare2(e2, linfct = c("Y2","Y2~eta","eta~X1"), robust = TRUE)
+\end{lstlisting}
+
+\begin{verbatim}
+
+	- Wald test -
+
+	Null Hypothesis:
+	[Y2] = 0
+	[Y2~eta] = 0
+	[eta~X1] = 0
+
+data:  
+F-statistic = 12.526, df1 = 3, df2 = 8.41, p-value = 0.001832
+sample estimates:
+          Estimate robust SE        df        2.5%     97.5%
+[Y2]     0.2335412 0.2353245 12.332567 -0.27765746 0.7447400
+[Y2~eta] 0.9192847 0.1791240  3.518708  0.39394539 1.4446240
+[eta~X1] 0.5150072 0.2167580 20.047646  0.06292679 0.9670875
+\end{verbatim}
+
+It may be surprising that the (corrected) robust standard errors are
+(in this example) smaller than the (corrected) model-based one. This
+is also the case for the uncorrected one:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+rbind(robust = diag(crossprod(iid(e))),
+      model = diag(vcov(e)))
+\end{lstlisting}
+
+\begin{verbatim}
+               Y2         Y3         Y4        eta     Y2~eta     Y3~eta     Y4~eta
+robust 0.04777252 0.03325435 0.03886706 0.06011727 0.08590732 0.02179453 0.02981895
+model  0.05995606 0.03189389 0.04644303 0.06132384 0.06870941 0.02430412 0.03715633
+           eta~X1     eta~X2    Y1~~Y1    Y2~~Y2     Y3~~Y3     Y4~~Y4  eta~~eta
+robust 0.05166005 0.05709393 0.2795272 0.1078948 0.03769614 0.06923165 0.3198022
+model  0.06317144 0.04489865 0.1754744 0.1600112 0.05112998 0.10152642 0.2320190
+\end{verbatim}
+
+
+This may be explained by the fact the robust standard error tends to
+be liberal in small samples (e.g. see Kauermann 2001, A Note on the
+Efficiency of Sandwich Covariance Matrix Estimation ).
+
+\subsection{Assessing the type 1 error of the testing procedure}
+\label{sec:org2f34c32}
+
+The function \texttt{calibrateType1} can be used to assess the type 1 error
+of a Wald statistic on a specific example. This however assumes that
+the estimated model is correctly specified. Let's make an example. For
+this we simulate some data:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+set.seed(10)
+m.generative <- lvm(Y ~ X1 + X2 + Gene)
+categorical(m.generative, labels = c("ss","ll")) <- ~Gene
+d <- lava::sim(m.generative, n = 50, latent = FALSE)
+\end{lstlisting}
+
+Let's now imagine that we want to analyze the relationship between
+Y and Gene using the following dataset:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+head(d)
+\end{lstlisting}
+
+\begin{verbatim}
+            Y         X1         X2 Gene
+1 -1.14369572 -0.4006375 -0.7618043   ss
+2 -0.09943370 -0.3345566  0.4193754   ss
+3 -0.04331996  1.3679540 -1.0399434   ll
+4  2.25017335  2.1377671  0.7115740   ss
+5  0.16715138  0.5058193 -0.6332130   ss
+6  1.73931135  0.7863424  0.5631747   ss
+\end{verbatim}
+
+
+For this we fit define a LVM:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+myModel <- lvm(Y ~ X1 + X2 + Gene)
+\end{lstlisting}
+
+and estimate the coefficients of the model using \texttt{estimate}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+e <- estimate(myModel, data = d)
+e
+\end{lstlisting}
+
+\begin{verbatim}
+                    Estimate Std. Error  Z-value  P-value
+Regressions:                                             
+   Y~X1              1.02349    0.12017  8.51728   <1e-12
+   Y~X2              0.91519    0.12380  7.39244   <1e-12
+   Y~Genell          0.48035    0.23991  2.00224  0.04526
+Intercepts:                                              
+   Y                -0.11221    0.15773 -0.71141   0.4768
+Residual Variances:                                      
+   Y                 0.67073    0.13415  5.00000
+\end{verbatim}
+
+
+We can now use \texttt{calibrateType1} to perform a simulation study. We just
+need to define the null hypotheses (i.e. which coefficients should be
+set to 0 when generating the data) and the number of simulations:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+mySimulation <- calibrateType1(e, 
+                               param = "Y~Genell",
+                               n.rep = 50, 
+                               trace = FALSE, seed = 10)
+\end{lstlisting}
+
+To save time we only make 50 simulations but much more are necessary
+to really assess the type 1 error rate. Then we can use the \texttt{summary}
+method to display the results:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary(mySimulation)
+\end{lstlisting}
+
+\begin{verbatim}
+Estimated type 1 error rate [95% confidence interval]
+sample size: 50 | number of simulations: 50
+            link statistic correction type1error                  CI
+ [Y~Genell] == 0      Wald       Gaus       0.12 [0.05492 ; 0.24242]
+                                 Satt       0.10 [0.04224 ; 0.21869]
+                                  SSC       0.08 [0.03035 ; 0.19456]
+                           SSC + Satt       0.08 [0.03035 ; 0.19456]
+
+Corrections: Gaus = Gaussian approximation 
+             SSC  = small sample correction 
+             Satt = Satterthwaite approximation
+\end{verbatim}
+
+\clearpage
+
+\section{Adjustment for multiple comparisons}
+\label{sec:org3132637}
+\subsection{Univariate Wald test, single model}
+\label{sec:orgc7110d9}
+
+When performing multiple testing, adjustment for multiple comparisons
+is necessary in order to control the type 1 error rate, i.e. to
+provide interpretable p.values. The \textbf{multcomp} package enables to do
+such adjustment when all tests comes from the same \texttt{lvmfit} object:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+mSim <- lvm(Y ~ 0.25 * X1 + 0.3 * X2 + 0.35 * X3 + 0.4 * X4 + 0.45 * X5 + 0.5 * X6)
+set.seed(10)
+df.data <- sim(mSim, n = 4e1)
+
+## fit lvm
+e.lvm <- estimate(lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+name.coef <- names(coef(e.lvm))
+n.coef <- length(name.coef)
+
+## Create contrast matrix
+resC <- createContrast(e.lvm, linfct = paste0("Y~X",1:6), rowname.rhs = FALSE)
+resC$contrast
+\end{lstlisting}
+
+\begin{verbatim}
+       Y Y~X1 Y~X2 Y~X3 Y~X4 Y~X5 Y~X6 Y~~Y
+[Y~X1] 0    1    0    0    0    0    0    0
+[Y~X2] 0    0    1    0    0    0    0    0
+[Y~X3] 0    0    0    1    0    0    0    0
+[Y~X4] 0    0    0    0    1    0    0    0
+[Y~X5] 0    0    0    0    0    1    0    0
+[Y~X6] 0    0    0    0    0    0    1    0
+\end{verbatim}
+
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+e.glht <- multcomp::glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
+summary(e.glht)
+\end{lstlisting}
+
+\begin{verbatim}
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+
+Linear Hypotheses:
+            Estimate Std. Error z value Pr(>|z|)   
+[Y~X1] == 0   0.3270     0.1589   2.058  0.20725   
+[Y~X2] == 0   0.4025     0.1596   2.523  0.06611 . 
+[Y~X3] == 0   0.5072     0.1383   3.669  0.00144 **
+[Y~X4] == 0   0.3161     0.1662   1.902  0.28582   
+[Y~X5] == 0   0.3875     0.1498   2.586  0.05554 . 
+[Y~X6] == 0   0.3758     0.1314   2.859  0.02482 * 
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(Adjusted p values reported -- single-step method)
+\end{verbatim}
+
+Note that this correction relies on the Gaussian approximation. To use
+small sample corrections implemented in \textbf{lavaSearch2}, just call
+\texttt{glht2} instead of \texttt{glht}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+e.glht2 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null)
+summary(e.glht2)
+\end{lstlisting}
+
+\begin{verbatim}
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+Standard errors: Model-based
+
+Linear Hypotheses:
+             estimate        se        df     lower     upper statistic p.value  
+[Y~X1] == 0  0.327006  0.174976 33.000000 -0.158914  0.812926    1.8689 0.32895  
+[Y~X2] == 0  0.402533  0.175670 33.000000 -0.085313  0.890380    2.2914 0.14817  
+[Y~X3] == 0  0.507242  0.152209 33.000000  0.084548  0.929937    3.3325 0.01232 *
+[Y~X4] == 0  0.316099  0.182995 33.000000 -0.192089  0.824288    1.7274 0.41283  
+[Y~X5] == 0  0.387459  0.164970 33.000000 -0.070673  0.845590    2.3487 0.13153  
+[Y~X6] == 0  0.375763  0.144712 33.000000 -0.026113  0.777639    2.5966 0.07617 .
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values adjusted for multiple comparisons -- single step max-test) 
+Error when computing the adjusted p-value by numerical integration: 0.00012125
+\end{verbatim}
+
+The single step method is the appropriate correction when one wants to
+report the most significant p-value relative to a set of
+hypotheses. If the second most significant p-value is also to be
+reported then the method "free" is more efficient:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary(e.glht2, test = multcomp::adjusted("free"))
+\end{lstlisting}
+
+\begin{verbatim}
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
+Standard errors: Model-based
+
+Linear Hypotheses:
+            estimate       se       df statistic p.value  
+[Y~X1] == 0  0.32701  0.17498 33.00000    1.8689 0.12911  
+[Y~X2] == 0  0.40253  0.17567 33.00000    2.2914 0.09129 .
+[Y~X3] == 0  0.50724  0.15221 33.00000    3.3325 0.01242 *
+[Y~X4] == 0  0.31610  0.18299 33.00000    1.7274 0.12911  
+[Y~X5] == 0  0.38746  0.16497 33.00000    2.3487 0.09129 .
+[Y~X6] == 0  0.37576  0.14471 33.00000    2.5966 0.06451 .
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values adjusted for multiple comparisons -- step down max-test)
+\end{verbatim}
+
+See the book: "Multiple Comparisons Using R" by Frank Bretz, Torsten
+Hothorn, and Peter Westfall (2011, CRC Press) for details about the
+theory underlying the \textbf{multcomp} package.
+
+\subsection{Univariate Wald test, multiple models}
+\label{sec:org11c88f0}
+
+Pipper et al. in "A Versatile Method for Confirmatory Evaluation of
+the Effects of a Covariate in Multiple Models" (2012, Journal of the
+Royal Statistical Society, Series C) developed a method to assess the
+effect of an exposure on several outcomes when a different model is
+fitted for each outcome. This method has been implemented in the \texttt{mmm}
+function from the \textbf{multcomp} package for glm and Cox
+models. \textbf{lavaSearch2} extends it to \texttt{lvm}. 
+
+Let's consider an example where we wish to assess the treatment effect
+on three outcomes X, Y, and Z. We have at hand three measurements
+relative to outcome Z for each individual:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+mSim <- lvm(X ~ Age + 0.5*Treatment,
+            Y ~ Gender + 0.25*Treatment,
+            c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
+            Age[40:5]~1)
+latent(mSim) <- ~eta
+categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
+categorical(mSim, labels = c("male","female")) <- ~Gender
+
+n <- 5e1
+set.seed(10)
+df.data <- sim(mSim, n = n, latent = FALSE)
+head(df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+         X      Age Treatment          Y Gender         Z1         Z2          Z3
+1 39.12289 39.10415   placebo  0.6088958 female  1.8714112  2.2960633 -0.09326935
+2 39.56766 39.25191      SSRI  1.0001325 female  0.9709943  0.6296226  1.31035910
+3 41.68751 43.05884   placebo  2.1551047 female -1.1634011 -0.3332927 -1.30769267
+4 44.68102 44.78019      SSRI  0.3852728 female -1.0305476  0.6678775  0.99780139
+5 41.42559 41.13105   placebo -0.8666783   male -1.6342816 -0.8285492  1.20450488
+6 42.64811 41.75832      SSRI -1.0710170 female -1.2198019 -1.9602130 -1.85472132
+   treatment
+1  1.1639675
+2 -1.5233846
+3 -2.5183351
+4 -0.7075292
+5 -0.2874329
+6 -0.4353083
+\end{verbatim}
+
+We fit a model specific to each outcome:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+lvmX <- estimate(lvm(X ~ Age + Treatment), data = df.data)
+lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
+lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ 1*eta, eta ~ -1 + Treatment), 
+                 data = df.data)
+\end{lstlisting}
+
+and combine them into a list of \texttt{lvmfit} objects:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+mmm.lvm <- multcomp::mmm(X = lvmX, Y = lvmY, Z = lvmZ)
+\end{lstlisting}
+
+We can then call \texttt{glht2} to apply the small sample corrections,
+generate a contrast matrix containing tests for all coefficient
+related to the treatment, and collect the results:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+lvm.glht2 <- glht2(mmm.lvm, linfct = "TreatmentSSRI")
+summary(lvm.glht2)
+\end{lstlisting}
+
+\begin{verbatim}
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Linear Hypotheses:
+                         estimate        se        df     lower     upper statistic
+X: [TreatmentSSRI] == 0  0.466150  0.253280 47.000000 -0.154910  1.087209    1.8405
+Y: [TreatmentSSRI] == 0 -0.542096  0.261321 47.000000 -1.182874  0.098682   -2.0744
+Z: [TreatmentSSRI] == 0 -0.619822  0.440397 47.000000 -1.699707  0.460063   -1.4074
+                        p.value
+X: [TreatmentSSRI] == 0  0.1863
+Y: [TreatmentSSRI] == 0  0.1165
+Z: [TreatmentSSRI] == 0  0.3912
+(CIs/p-values adjusted for multiple comparisons -- single step max-test) 
+Error when computing the adjusted p-value by numerical integration: 0.00025692
+\end{verbatim}
+
+This can be compared to the unadjusted p.values:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+summary(lvm.glht2, test = multcomp::adjusted("none"))
+\end{lstlisting}
+
+\begin{verbatim}
+
+	 Simultaneous Tests for General Linear Hypotheses
+
+Multiple Comparisons of Means (two sided tests) 
+
+Linear Hypotheses:
+                         estimate        se        df     lower     upper statistic
+X: [TreatmentSSRI] == 0  0.466150  0.253280 47.000000 -0.043383  0.975682    1.8405
+Y: [TreatmentSSRI] == 0 -0.542096  0.261321 47.000000 -1.067807 -0.016385   -2.0744
+Z: [TreatmentSSRI] == 0 -0.619822  0.440397 47.000000 -1.505787  0.266143   -1.4074
+                        p.value  
+X: [TreatmentSSRI] == 0 0.07202 .
+Y: [TreatmentSSRI] == 0 0.04354 *
+Z: [TreatmentSSRI] == 0 0.16588  
+---
+Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+(CIs/p-values not adjusted for multiple comparisons)
+\end{verbatim}
+
+\clearpage 
+
+\section{Model diagnostic}
+\label{sec:orgc1e79df}
+\subsection{Detection of local dependencies}
+\label{sec:org439924f}
+
+The \texttt{modelsearch} function of \textbf{lava} is a diagnostic tool for latent
+variable models. It enables to search for local dependencies
+(i.e. model misspecification) and add them to the model. Obviously it
+is a data-driven procedure and its usefulness can be discussed,
+especially in small samples:
+\begin{itemize}
+\item the procedure is instable, i.e. is likely to lead to two different
+models when applied on two different dataset sampled from the same
+generative model.
+\item it is hard to define a meaningful significance threshold since
+p-values should be adjusted for multiple comparisons and sequential
+testing. However traditional methods like Bonferroni-Holm tend to
+over corrected and therefore reduce the power of the procedure since
+they assume that the test are independent.
+\end{itemize}
+
+The function \texttt{modelsearch2} in \textbf{lavaSearch2} partially solves the
+second issue by adjusting the p-values for multiple testing. Let's see
+an example:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+mSim <- lvm(c(y1,y2,y3)~u, u~x1+x2)
+latent(mSim) <- ~u
+covariance(mSim) <- y2~y3
+transform(mSim, Id~u) <- function(x){1:NROW(x)}
+set.seed(10)
+df.data <- lava::sim(mSim, n = 125, latent = FALSE)
+head(df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+          y1           y2         y3         x1         x2 Id
+1  5.5071523  4.883752014  6.2928016  0.8694750  2.3991549  1
+2 -0.6398644  0.025832617  0.5088030 -0.6800096 -0.0898721  2
+3 -2.5835495 -2.616715027 -2.8982645  0.1732145 -0.8216484  3
+4 -2.5312637 -2.518185427 -2.9015033 -0.1594380 -0.2869618  4
+5  1.6346220 -0.001877577  0.3705181  0.7934994  0.1312789  5
+6  0.4939972  1.759884014  1.5010499  1.6943505 -1.0620840  6
+\end{verbatim}
+
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## fit model
+m <- lvm(c(y1,y2,y3)~u, u~x1)
+latent(m) <- ~u
+addvar(m) <- ~x2 
+e.lvm <- estimate(m, data = df.data)
+\end{lstlisting}
+
+\texttt{modelsearch2} can be used to sequentially apply the \texttt{modelsearch}
+function with a given correction for the p.values:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resScore <- modelsearch2(e.lvm, alpha = 0.1, trace = FALSE)
+displayScore <- summary(resScore)
+\end{lstlisting}
+
+\begin{verbatim}
+Sequential search for local dependence using the score statistic 
+The variable selection procedure retained 2 variables:
+    link statistic      p.value adjusted.p.value dp.Info selected nTests
+1   u~x2 36.436487 1.577228e-09     5.008615e-08       1     TRUE     10
+2 y2~~y3  6.912567 8.559203e-03     6.056378e-02       1     TRUE      9
+3  y3~x1  3.136429 7.656125e-02     2.814343e-01       1    FALSE      8
+Confidence level: 0.9 (two sided, adjustement: fastmax)
+\end{verbatim}
+
+
+This indeed matches the highest score statistic found by
+\texttt{modelsearch}:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resScore0 <- modelsearch(e.lvm, silent = TRUE)
+c(statistic = sqrt(max(resScore0$test[,"Test Statistic"])), 
+  p.value = min(resScore0$test[,"P-value"]))
+\end{lstlisting}
+
+\begin{verbatim}
+   statistic      p.value 
+6.036264e+00 1.577228e-09
+\end{verbatim}
+
+
+We can compare the adjustment using the max distribution to bonferroni:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+data.frame(link = displayScore$table[,"link"],
+           none = displayScore$table[,"p.value"],
+           bonferroni = displayScore$table[,"p.value"]*displayScore$table[1,"nTests"],
+           max = displayScore$table[,"adjusted.p.value"])
+\end{lstlisting}
+
+\begin{verbatim}
+    link         none   bonferroni          max
+1   u~x2 1.577228e-09 1.577228e-08 5.008615e-08
+2 y2~~y3 8.559203e-03 8.559203e-02 6.056378e-02
+3  y3~x1 7.656125e-02 7.656125e-01 2.814343e-01
+\end{verbatim}
+
+
+In theory, the correction based on the max statistic should give a p
+value that is smaller or equal than the p value adjusted using
+Bonferroni. However for for very small p-values, the max-correction
+can be numerically inaccurate and result in p-values that are slightly
+larger. The evolution of the estimation of a given coefficient across
+the sequential search can be displayed using \texttt{autoplot}:
+
+\begin{center}
+\includegraphics[width=.9\linewidth]{./modelsearch.png}
+\end{center}
+
+In many cases, all links are not plausible so the user should
+indicates which links should be investigated by \texttt{modelsearch2}. This
+can be done via the argument \texttt{link}:
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+resRed <- modelsearch2(e.lvm, link = c("y1~~y2","y1~~y3","y2~~y3"), trace = FALSE)
+print(resRed)
+\end{lstlisting}
+
+\begin{verbatim}
+Sequential search for local dependence using the score statistic 
+The variable selection procedure did not retain any variable 
+    link statistic    p.value adjusted.p.value dp.Info selected nTests
+1 y1~~y3  3.076875 0.07941299        0.1818963       1    FALSE      3
+Confidence level: 0.95 (two sided, adjustement: fastmax)
+\end{verbatim}
+
+
+The function \texttt{findNewLink} can help the user to identify the set of
+relevant links:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+findNewLink(e.lvm$model, type = "covariance")$link
+\end{lstlisting}
+
+\begin{verbatim}
+[1] "y1~~y2" "y1~~y3" "y2~~y3"
+\end{verbatim}
+
+\subsection{Checking that the names of the variables in the model match those of the data}
+\label{sec:org47cf06d}
+
+When estimating latent variable models using \textbf{lava}, it sometimes
+happens that the model does not converge:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+set.seed(10)
+df.data <- sim(lvm(Y~X1+X2), 1e2)
+
+## fit model
+mWrong <- lvm(Y ~ X + X2)
+eWrong <- estimate(mWrong, data = df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+Warning messages:
+1: In estimate.lvm(mWrong, data = df.data) :
+  Lack of convergence. Increase number of iteration or change starting values.
+2: In sqrt(diag(asVar)) : NaNs produced
+\end{verbatim}
+
+
+This can have several reasons:
+\begin{itemize}
+\item the model is not identifiable.
+\item the optimization routine did not managed to find a local
+optimum. This may happen for complex latent variable model where the
+objective function is not convex or locally convex.
+\item the user has made a mistake when defining the model or has not given
+the appropriate dataset.
+\end{itemize}
+
+The \texttt{checkData} function enables to check the last point. It compares
+the observed variables defined in the model and the one given by the
+dataset. In case of mismatch it returns a message:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+checkData(mWrong, df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+Missing variable in data: X
+\end{verbatim}
+
+
+In presence of latent variables, the user needs to explicitely define
+them in the model, otherwise \texttt{checkData} will identify them as an
+issue:
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+## simulate data
+set.seed(10)
+mSim <- lvm(c(Y1,Y2,Y3)~eta)
+latent(mSim) <- ~eta
+df.data <- sim(mSim, n = 1e2, latent = FALSE)
+
+## fit model
+m <- lvm(c(Y1,Y2,Y3)~eta)
+checkData(m, data = df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+Missing variable in data: eta
+\end{verbatim}
+
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+latent(m) <- ~eta
+checkData(m, data = df.data)
+\end{lstlisting}
+
+\begin{verbatim}
+No issue detected
+\end{verbatim}
+
+
+
+\clearpage
+
+\section{Information about the R session used for this document}
+\label{sec:org95dd3ad}
+
+\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
+\begin{lstlisting}
+sessionInfo()
+\end{lstlisting}
+
+\begin{verbatim}
+R version 4.2.0 (2022-04-22)
+Platform: x86_64-pc-linux-gnu (64-bit)
+Running under: Ubuntu 20.04.4 LTS
+
+Matrix products: default
+BLAS:   /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.9.0
+LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.9.0
+
+locale:
+ [1] LC_CTYPE=en_US.UTF-8       LC_NUMERIC=C               LC_TIME=en_US.UTF-8       
+ [4] LC_COLLATE=en_US.UTF-8     LC_MONETARY=en_US.UTF-8    LC_MESSAGES=en_US.UTF-8   
+ [7] LC_PAPER=en_US.UTF-8       LC_NAME=C                  LC_ADDRESS=C              
+[10] LC_TELEPHONE=C             LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C       
+
+attached base packages:
+[1] stats     graphics  grDevices utils     datasets  methods   base     
+
+other attached packages:
+[1] lavaSearch2_2.0.1 lava_1.7.2        ggplot2_3.4.0     butils.base_1.2  
+[5] Rcpp_1.0.9        devtools_2.4.3    usethis_2.1.5     data.table_1.14.2
+
+loaded via a namespace (and not attached):
+ [1] pkgload_1.2.4            splines_4.2.0            foreach_1.5.2           
+ [4] brio_1.1.3               assertthat_0.2.1         butils_1.4.7            
+ [7] remotes_2.4.2            sessioninfo_1.2.2        globals_0.16.1          
+[10] numDeriv_2016.8-1.1      pillar_1.8.1             lattice_0.20-45         
+[13] glue_1.6.2               digest_0.6.31            colorspace_2.0-3        
+[16] sandwich_3.0-2           Matrix_1.4-1             plyr_1.8.7              
+[19] pkgconfig_2.0.3          listenv_0.8.0            purrr_1.0.0             
+[22] mvtnorm_1.1-3            scales_1.2.1             processx_3.5.3          
+[25] tibble_3.1.8             generics_0.1.3           ellipsis_0.3.2          
+[28] TH.data_1.1-1            cachem_1.0.6             withr_2.5.0             
+[31] cli_3.5.0                survival_3.5-0           magrittr_2.0.3          
+[34] crayon_1.5.2             memoise_2.0.1            ps_1.7.0                
+[37] fs_1.5.2                 future_1.28.0            fansi_1.0.3             
+[40] parallelly_1.32.1        doParallel_1.0.17        nlme_3.1-157            
+[43] MASS_7.3-57              xml2_1.3.3               RcppArmadillo_0.11.2.0.0
+[46] pkgbuild_1.3.1           progressr_0.11.0         tools_4.2.0             
+[49] prettyunits_1.1.1        lifecycle_1.0.3          multcomp_1.4-20         
+[52] stringr_1.5.0            munsell_0.5.0            callr_3.7.0             
+[55] compiler_4.2.0           rlang_1.0.6              grid_4.2.0              
+[58] iterators_1.0.14         boot_1.3-28              testthat_3.1.4          
+[61] gtable_0.3.1             codetools_0.2-18         abind_1.4-5             
+[64] DBI_1.1.3                roxygen2_7.2.1           reshape2_1.4.4          
+[67] R6_2.5.1                 zoo_1.8-11               knitr_1.39              
+[70] dplyr_1.0.10             fastmap_1.1.0            future.apply_1.9.1      
+[73] utf8_1.2.2               rprojroot_2.0.3          desc_1.4.1              
+[76] stringi_1.7.8            parallel_4.2.0           vctrs_0.5.1             
+[79] tidyselect_1.2.0         xfun_0.31
+\end{verbatim}
 \end{document}
\ No newline at end of file
diff --git a/inst/doc/overview.ltx b/inst/doc/overview.ltx
deleted file mode 100644
index 00822b8..0000000
--- a/inst/doc/overview.ltx
+++ /dev/null
@@ -1,1189 +0,0 @@
-% Created 2019-04-04 to 16:15
-% Intended LaTeX compiler: pdflatex
-\documentclass[12pt]{article}
-
-%%%% settings when exporting code %%%% 
-
-\usepackage{listings}
-\lstset{
-backgroundcolor=\color{white},
-basewidth={0.5em,0.4em},
-basicstyle=\ttfamily\small,
-breakatwhitespace=false,
-breaklines=true,
-columns=fullflexible,
-commentstyle=\color[rgb]{0.5,0,0.5},
-frame=single,
-keepspaces=true,
-keywordstyle=\color{black},
-literate={~}{$\sim$}{1},
-numbers=left,
-numbersep=10pt,
-numberstyle=\ttfamily\tiny\color{gray},
-showspaces=false,
-showstringspaces=false,
-stepnumber=1,
-stringstyle=\color[rgb]{0,.5,0},
-tabsize=4,
-xleftmargin=.23in,
-emph={anova,apply,class,coef,colnames,colNames,colSums,dim,dcast,for,ggplot,head,if,ifelse,is.na,lapply,list.files,library,logLik,melt,plot,require,rowSums,sapply,setcolorder,setkey,str,summary,tapply},
-emphstyle=\color{blue}
-}
-
-%%%% packages %%%%%
-
-\usepackage[utf8]{inputenc}
-\usepackage[T1]{fontenc}
-\usepackage{lmodern}
-\usepackage{textcomp}
-\usepackage{color}
-\usepackage{enumerate}
-\usepackage{graphicx}
-\usepackage{grffile}
-\usepackage{wrapfig}
-\usepackage{rotating}
-\usepackage{longtable}
-\usepackage{multirow}
-\usepackage{multicol}
-\usepackage{changes}
-\usepackage{pdflscape}
-\usepackage{geometry}
-\usepackage[normalem]{ulem}
-\usepackage{amssymb}
-\usepackage{amsmath}
-\usepackage{amsfonts}
-\usepackage{dsfont}
-\usepackage{array}
-\usepackage{ifthen}
-\usepackage{hyperref}
-\usepackage{natbib}
-%\VignetteIndexEntry{overview}
-%\VignetteEngine{R.rsp::tex}
-%\VignetteKeyword{R}
-\RequirePackage{fancyvrb}
-\DefineVerbatimEnvironment{verbatim}{Verbatim}{fontsize=\small,formatcom = {\color[rgb]{0.5,0,0}}}
-\geometry{a4paper, left=15mm, right=15mm}
-\RequirePackage{colortbl} % arrayrulecolor to mix colors
-\RequirePackage{setspace} % to modify the space between lines - incompatible with footnote in beamer
-\usepackage{authblk} % enable several affiliations (clash with beamer)
-\renewcommand{\baselinestretch}{1.1}
-\geometry{top=1cm}
-\usepackage{enumitem}
-\RequirePackage{xspace} %
-\newcommand\Rlogo{\textbf{\textsf{R}}\xspace} %
-\RequirePackage{epstopdf} % to be able to convert .eps to .pdf image files
-\author{Brice Ozenne}
-\date{\today}
-\title{Overview of the functionalities of the package lavaSearch2}
-\hypersetup{
- colorlinks=true,
- citecolor=[rgb]{0,0.5,0},
- urlcolor=[rgb]{0,0,0.5},
- linkcolor=[rgb]{0,0,0.5},
- pdfauthor={Brice Ozenne},
- pdftitle={Overview of the functionalities of the package lavaSearch2},
- pdfkeywords={},
- pdfsubject={},
- pdfcreator={Emacs 25.2.1 (Org mode 9.0.4)},
- pdflang={English}
- }
-\begin{document}
-
-\maketitle
-Load \textbf{lavaSearch2} in the R session:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-library(lavaSearch2)
-\end{lstlisting}
-
-\section{Inference}
-\label{sec:orgfd6e90a}
-\subsection{Introductory example}
-\label{sec:org3f1e0eb}
-You may have noticed that for simple linear regression, the p-values
-of the Wald tests from \texttt{lm}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y[1:1]~0.3*X1+0.2*X2)
-set.seed(10)
-df.data <- sim(mSim, 2e1)
-
-## fit linear model
-summary(lm(Y~X1+X2, data = df.data))$coef
-\end{lstlisting}
-
-\begin{verbatim}
-             Estimate Std. Error   t value    Pr(>|t|)
-(Intercept) 0.7967775  0.2506767 3.1785069 0.005495832
-X1          0.1550938  0.2205080 0.7033477 0.491360483
-X2          0.4581556  0.2196785 2.0855736 0.052401103
-\end{verbatim}
-
-differ from those obtained with the corresponding latent variable
-model estimated by maximum likelihood:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## fit latent variable model
-m <- lvm(Y~X1+X2)
-e <- estimate(m, data = df.data)
-
-## extract Wald tests
-summary(e)$coef
-\end{lstlisting}
-
-\begin{verbatim}
-      Estimate Std. Error   Z-value      P-value
-Y~X1 0.1550938  0.2032984 0.7628877 0.4455303456
-Y~X2 0.4581556  0.2025335 2.2621221 0.0236898575
-Y~~Y 0.5557910  0.1757566 3.1622777           NA
-Y    0.7967775  0.2311125 3.4475747 0.0005656439
-\end{verbatim}
-
-For instance, the p-value for the effect of X2 is 0.024 in the latent
-variable model and 0.052 in the linear regression. The discrepancy is
-due to 2 corrections that \texttt{lm} applies in order to improve the control
-of the type 1 error of the Wald tests:
-\begin{itemize}
-\item use of a Student \(t\)-distribution instead of a Gaussian
-distribution (informally using a t-value instead of z-value).
-\item use of an unbiased estimator of the residuals variance instead of
-the ML-estimator.
-\end{itemize}
-\textbf{lavaSearch2} attempts to generalize these corrections to models with
-correlated and heteroschedastic measurements. In the case of a simple
-linear regression, Wald tests obtained with \textbf{lavaSearch2} exactly
-match the results of \texttt{lm}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e)$coef
-\end{lstlisting}
-
-\begin{verbatim}
-      Estimate Std. Error   t-value    P-value    df
-Y~X1 0.1550938  0.2205078 0.7033483 0.49136012 17.00
-Y~X2 0.4581556  0.2196783 2.0855754 0.05240092 17.00
-Y~~Y 0.6538707  0.2242758 2.9154759         NA  4.25
-Y    0.7967775  0.2506765 3.1785096 0.00549580 17.00
-\end{verbatim}
-
-\subsection{How it works in a nutshell}
-\label{sec:org89617bf}
-
-When using \textbf{lava}, the p.values that are obtained from the summary
-(Wald tests) rely on a Gaussian approximation and maximum likelihood
-estimation. While being asymptotically valid, they usually do not
-provide a very accurate control of the type 1 error rate in small
-samples. Simulations have shown that the type 1 error rate tends to be
-too large, i.e. the p.values are have a downward bias. \textbf{lavaSearch2}
-provides two improvements:
-\begin{itemize}
-\item using a Student's \(t\)-distribution instead of a Gaussian
-distribution to account for the uncertainty on the variance of the
-coefficients. The degrees of freedom are estimated using Satterwaite
-approximation, i.e. identifying the chi-squared distribution that
-best fit the observed moments of the variance of the coefficients.
-\item (partially) correcting for the first order bias in the ML estimates
-of the variance parameters. This correction also affects the
-standard error of the estimates.
-\end{itemize}
-
-\subsection{Single univariate Wald test}
-\label{sec:org8e0ca86}
-
-We will illustrate the functionalities using a simulated dataset:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y1~eta,Y2~eta,Y3~0.4+0.4*eta,Y4~0.6+0.6*eta,eta~0.5*X1+0.7*X2)
-latent(mSim) <- ~eta
-set.seed(12)
-df.data <- sim(mSim, n = 3e1, latent = FALSE)
-
-## display
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-          Y1         Y2          Y3         Y4         X1         X2
-1 -1.7606233  0.1264910  0.66442611  0.2579355  0.2523400 -1.5431527
-2  3.0459417  2.4631929  0.00283511  2.1714802  0.6423143 -1.3206009
-3 -2.1443162 -0.3318033  0.82253070  0.3008415 -0.3469361 -0.6758215
-4 -2.5050328 -1.3878987 -0.10474850 -1.7814956 -0.5152632 -0.3670054
-5 -2.5307249  0.3012422  1.22046986 -1.0195188  0.3981689 -0.5138722
-6 -0.9521366  0.1669496 -0.21422548  1.5954456  0.9535572 -0.9592540
-\end{verbatim}
-
-We first fit the latent variable model using, as usual, the \texttt{estimate}
-function:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-m <- lvm(c(Y1,Y2,Y3,Y4)~eta, eta~X1+X2)
-e <- estimate(m, data = df.data)
-\end{lstlisting}
-
-We can extract the Wald tests based on the traditional approach using
-\texttt{summary}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   Z-value      P-value
-Y2     0.2335412  0.2448593 0.9537775 0.3401962906
-Y3     0.5114275  0.1785886 2.8637186 0.0041869974
-Y2~eta 0.9192847  0.2621248 3.5070497 0.0004531045
-Y3~eta 0.2626930  0.1558978 1.6850339 0.0919820326
-eta~X1 0.5150072  0.2513393 2.0490515 0.0404570768
-eta~X2 0.6212222  0.2118930 2.9317729 0.0033703310
-\end{verbatim}
-
-As explain at the begining of this section, \textbf{lavaSearch2} implements
-two corrections that can be directly applied by calling the \texttt{summary2}
-method:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-\end{verbatim}
-
-To use the Satterthwaite correction alone, set the argument
-  \texttt{bias.correct} to \texttt{FALSE}:
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, bias.correct = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2448593 0.9537775 0.357711941 12.911877
-Y3     0.5114275  0.1785886 2.8637186 0.008210968 25.780552
-Y2~eta 0.9192847  0.2621248 3.5070497 0.028396459  3.674640
-Y3~eta 0.2626930  0.1558978 1.6850339 0.141185621  6.222912
-eta~X1 0.5150072  0.2513393 2.0490515 0.052814794 21.571210
-eta~X2 0.6212222  0.2118930 2.9317729 0.006351686 30.370334
-\end{verbatim}
-
-When using the Satterthwaite correction alone, the standard error are
-left unchanged compared to the original lava output. The only change
-is how the p-values are computed, i.e. based on the quantiles of a
-Student's \(t\)-distribution instead of a Gaussian distribution. 
-
-To only use the bias correction, set the argument \texttt{df} to \texttt{FALSE}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, df = FALSE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value      P-value  df
-Y2     0.2335412  0.2518218 0.9274067 0.3537154044 Inf
-Y3     0.5114275  0.1828716 2.7966475 0.0051635832 Inf
-Y2~eta 0.9192847  0.2653220 3.4647887 0.0005306482 Inf
-Y3~eta 0.2626930  0.1562776 1.6809386 0.0927748494 Inf
-eta~X1 0.5150072  0.2642257 1.9491180 0.0512813393 Inf
-eta~X2 0.6212222  0.2221293 2.7966698 0.0051632271 Inf
-\end{verbatim}
-
-
-\subsection{Saving computation time with \texttt{sCorrect}}
-\label{sec:org86bc5ce}
-For each call to \texttt{summary2} the small sample size correction(s) will
-be recalculated. However the calculation of the sample correction(s)
-can be time consuming.
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	res <- summary2(e, bias.correct = FALSE)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.25    0.00    0.25
-\end{verbatim}
-
-In such a case one can pre-compute the main terms of the correction
-(e.g. the derivative of the variance-covariance matrix) once for all
-using the \texttt{sCorrect} method (\texttt{sCorrect} stands for Satterthwaite
-correction). When calling \texttt{sCorrect}, the right hand side indicates
-whether the bias correction should be used (equivalent to
-\texttt{bias.correct} argument described previously):
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e2 <- e
-sCorrect(e2) <- TRUE
-\end{lstlisting}
-
-\texttt{sCorrect} automatically store the pre-computed terms in the \texttt{sCorrect}
-slot of the object. It also adds the class \texttt{lvmfit2} to the object:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-class(e2)
-\end{lstlisting}
-
-\begin{verbatim}
-[1] "lvmfit2" "lvmfit"
-\end{verbatim}
-
-Then p-values computed using the small sample correction can be
-obtained calling the \texttt{summary} method, as usual:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e2)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate Std. Error   t-value     P-value        df
-Y2     0.2335412  0.2518218 0.9274067 0.371516094 12.328385
-Y3     0.5114275  0.1828716 2.7966475 0.009848769 24.707696
-Y2~eta 0.9192847  0.2653220 3.4647887 0.031585600  3.515034
-Y3~eta 0.2626930  0.1562776 1.6809386 0.143826633  5.993407
-eta~X1 0.5150072  0.2642257 1.9491180 0.065414617 20.044312
-eta~X2 0.6212222  0.2221293 2.7966698 0.009275494 27.718363
-\end{verbatim}
-
-The \texttt{summary2} methods take approximately the same time as the usual
-\texttt{summary} method:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	summary2(e2)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.19    0.00    0.19
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-system.time(
-	summary(e2)
-)
-\end{lstlisting}
-
-\begin{verbatim}
-user  system elapsed 
-0.15    0.00    0.16
-\end{verbatim}
-
-\subsection{Single multivariate Wald test}
-\label{sec:org84008c5}
-
-The function \texttt{compare} from the lava package can be use to perform
-multivariate Wald tests, i.e. to test simultaneously several linear
-combinations of the coefficients.  \texttt{compare} uses a contrast matrix to
-encode in lines which linear combination of coefficients should be
-tested. For instance if we want to simultaneously test whether all the
-mean coefficients are 0, we can create a contrast matrix using
-\texttt{createContrast}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resC <- createContrast(e2, par = c("Y2=0","Y2~eta=0","eta~X1=0"))
-resC
-\end{lstlisting}
-
-\begin{verbatim}
-$contrast
-             Y2 Y3 Y4 eta Y2~eta Y3~eta Y4~eta eta~X1 eta~X2 Y1~~Y1 Y2~~Y2 Y3~~Y3 Y4~~Y4
-[Y2] = 0      1  0  0   0      0      0      0      0      0      0      0      0      0
-[Y2~eta] = 0  0  0  0   0      1      0      0      0      0      0      0      0      0
-[eta~X1] = 0  0  0  0   0      0      0      0      1      0      0      0      0      0
-             eta~~eta
-[Y2] = 0            0
-[Y2~eta] = 0        0
-[eta~X1] = 0        0
-
-$null
-    [Y2] = 0 [Y2~eta] = 0 [eta~X1] = 0 
-           0            0            0 
-
-$Q
-[1] 3
-\end{verbatim}
-
-We can then test the linear hypothesis by specifying in \texttt{compare} the
-left hand side of the hypothesis (argument contrast) and the right
-hand side (argument null):
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest0 <- lava::compare(e2, contrast = resC$contrast, null = resC$null)
-resTest0
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-chisq = 21.332, df = 3, p-value = 8.981e-05
-sample estimates:
-          Estimate   Std.Err       2.5%     97.5%
-[Y2]     0.2335412 0.2448593 -0.2463741 0.7134566
-[Y2~eta] 0.9192847 0.2621248  0.4055295 1.4330399
-[eta~X1] 0.5150072 0.2513393  0.0223912 1.0076231
-\end{verbatim}
-
-\texttt{compare} uses a chi-squared distribution to compute the p-values.
-Similarly to the Gaussian approximation, while being valid
-asymptotically this procedure may not provide a very accurate control
-of the type 1 error rate in small samples. Fortunately, the correction
-proposed for the univariate Wald statistic can be adapted to the
-multivariate Wald statistic. This is achieved by \texttt{compare2}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest1 <- compare2(e2, contrast = resC$contrast, null = resC$null)
-resTest1
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 6.7118, df1 = 3, df2 = 11.1, p-value = 0.007596
-sample estimates:
-              Estimate   Std.Err        df       2.5%     97.5%
-[Y2] = 0     0.2335412 0.2518218 12.328385 -0.3135148 0.7805973
-[Y2~eta] = 0 0.9192847 0.2653220  3.515034  0.1407653 1.6978041
-[eta~X1] = 0 0.5150072 0.2642257 20.044312 -0.0360800 1.0660943
-\end{verbatim}
-
-The same result could have been obtained using the par argument to
-define the linear hypothesis:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest2 <- compare2(e2, par = c("Y2","Y2~eta","eta~X1"))
-identical(resTest1,resTest2)
-\end{lstlisting}
-
-\begin{verbatim}
-[1] TRUE
-\end{verbatim}
-
-Now a F-distribution is used to compute the p-values. As before on can
-set the argument \texttt{bias.correct} to \texttt{FALSE} to use the Satterthwaite
-approximation alone:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest3 <- compare2(e, bias.correct = FALSE, 
-					  contrast = resC$contrast, null = resC$null)
-resTest3
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 7.1107, df1 = 3, df2 = 11.13, p-value = 0.006182
-sample estimates:
-              Estimate   Std.Err       df         2.5%     97.5%
-[Y2] = 0     0.2335412 0.2448593 12.91188 -0.295812256 0.7628948
-[Y2~eta] = 0 0.9192847 0.2621248  3.67464  0.165378080 1.6731913
-[eta~X1] = 0 0.5150072 0.2513393 21.57121 -0.006840023 1.0368543
-\end{verbatim}
-
-In this case the F-statistic of \texttt{compare2} is the same as the
-chi-squared statistic of \texttt{compare} divided by the rank of the contrast matrix:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resTest0$statistic/qr(resC$contrast)$rank
-\end{lstlisting}
-
-\begin{verbatim}
-   chisq 
-7.110689
-\end{verbatim}
-
-\subsection{Robust Wald tests}
-\label{sec:org67840d2}
-
-When one does not want to assume normality distributed residuals,
-robust standard error can be used instead of the model based standard
-errors. They can be obtain by setting the argument \texttt{robust} to \texttt{TRUE}
-when computing univariate Wald tests:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary2(e, robust = TRUE)$coef[c("Y2","Y3","Y2~eta","Y3~eta","eta~X1","eta~X2"), ]
-\end{lstlisting}
-
-\begin{verbatim}
-        Estimate robust SE   t-value      P-value       df
-Y2     0.2335412 0.2353245 0.9924222 0.3340117610 18.18841
-Y3     0.5114275 0.1897160 2.6957535 0.0099985389 42.79555
-Y2~eta 0.9192847 0.1791240 5.1321150 0.0002361186 12.19058
-Y3~eta 0.2626930 0.1365520 1.9237585 0.0653095551 26.20919
-eta~X1 0.5150072 0.2167580 2.3759546 0.0315112789 14.74859
-eta~X2 0.6212222 0.2036501 3.0504389 0.0035239307 54.54181
-\end{verbatim}
-
-or multivariate Wald test:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-compare2(e2, robust = TRUE, par = c("Y2","Y2~eta","eta~X1"))
-\end{lstlisting}
-
-\begin{verbatim}
-	- Wald test -
-
-	Null Hypothesis:
-	[Y2] = 0
-	[Y2~eta] = 0
-	[eta~X1] = 0
-
-data:  
-F-statistic = 12.526, df1 = 3, df2 = 23.97, p-value = 3.981e-05
-sample estimates:
-              Estimate robust SE       df        2.5%     97.5%
-[Y2] = 0     0.2335412 0.2353245 18.18841 -0.26049031 0.7275728
-[Y2~eta] = 0 0.9192847 0.1791240 12.19058  0.52968275 1.3088867
-[eta~X1] = 0 0.5150072 0.2167580 14.74859  0.05231154 0.9777028
-\end{verbatim}
-
-Only the standard error is affected by the argument \texttt{robust}, the
-degrees of freedom are the one of the model-based standard errors.  It
-may be surprising that the (corrected) robust standard errors are (in
-this example) smaller than the (corrected) model-based one. This is
-also the case for the uncorrected one:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-rbind(robust = diag(crossprod(iid(e2))),
-	  model = diag(vcov(e2)))
-\end{lstlisting}
-
-\begin{verbatim}
-               Y2         Y3         Y4        eta     Y2~eta     Y3~eta     Y4~eta
-robust 0.04777252 0.03325435 0.03886706 0.06011727 0.08590732 0.02179453 0.02981895
-model  0.05995606 0.03189389 0.04644303 0.06132384 0.06870941 0.02430412 0.03715633
-           eta~X1     eta~X2    Y1~~Y1    Y2~~Y2     Y3~~Y3     Y4~~Y4  eta~~eta
-robust 0.05166005 0.05709393 0.2795272 0.1078948 0.03769614 0.06923165 0.3198022
-model  0.06317144 0.04489865 0.1754744 0.1600112 0.05112998 0.10152642 0.2320190
-\end{verbatim}
-
-This may be explained by the fact the robust standard error tends to
-be liberal in small samples (e.g. see Kauermann 2001, A Note on the
-Efficiency of Sandwich Covariance Matrix Estimation ).
-
-\subsection{Assessing the type 1 error of the testing procedure}
-\label{sec:orgf5e63db}
-
-The function \texttt{calibrateType1} can be used to assess the type 1 error
-of a Wald statistic on a specific example. This however assumes that
-the estimated model is correctly specified. Let's make an example. For
-this we simulate some data:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-set.seed(10)
-m.generative <- lvm(Y ~ X1 + X2 + Gene)
-categorical(m.generative, labels = c("ss","ll")) <- ~Gene
-d <- lava::sim(m.generative, n = 50, latent = FALSE)
-\end{lstlisting}
-
-Let's now imagine that we want to analyze the relationship between
-Y and Gene using the following dataset:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-head(d)
-\end{lstlisting}
-
-\begin{verbatim}
-            Y         X1         X2 Gene
-1 -1.14369572 -0.4006375 -0.7618043   ss
-2 -0.09943370 -0.3345566  0.4193754   ss
-3 -0.04331996  1.3679540 -1.0399434   ll
-4  2.25017335  2.1377671  0.7115740   ss
-5  0.16715138  0.5058193 -0.6332130   ss
-6  1.73931135  0.7863424  0.5631747   ss
-\end{verbatim}
-
-For this we fit define a LVM:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-myModel <- lvm(Y ~ X1 + X2 + Gene)
-\end{lstlisting}
-
-and estimate the coefficients of the model using \texttt{estimate}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e <- estimate(myModel, data = d)
-e
-\end{lstlisting}
-
-\begin{verbatim}
-                    Estimate Std. Error  Z-value  P-value
-Regressions:                                             
-   Y~X1              1.02349    0.12017  8.51728   <1e-12
-   Y~X2              0.91519    0.12380  7.39244   <1e-12
-   Y~Genell          0.48035    0.23991  2.00224  0.04526
-Intercepts:                                              
-   Y                -0.11221    0.15773 -0.71141   0.4768
-Residual Variances:                                      
-   Y                 0.67073    0.13415  5.00000
-\end{verbatim}
-
-We can now use \texttt{calibrateType1} to perform a simulation study. We just
-need to define the null hypotheses (i.e. which coefficients should be
-set to 0 when generating the data) and the number of simulations:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mySimulation <- calibrateType1(e, 
-							   param = "Y~Genell",
-							   n.rep = 50, 
-							   trace = FALSE, seed = 10)
-\end{lstlisting}
-
-To save time we only make 50 simulations but much more are necessary
-to really assess the type 1 error rate. Then we can use the \texttt{summary}
-method to display the results:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(mySimulation)
-\end{lstlisting}
-
-\begin{verbatim}
-Estimated type 1 error rate [95% confidence interval] 
-  > sample size: 50 | number of simulations: 50
-     link statistic correction type1error                  CI
- Y~Genell      Wald       Gaus       0.12 [0.05492 ; 0.24242]
-                          Satt       0.10 [0.04224 ; 0.21869]
-                           SSC       0.10 [0.04224 ; 0.21869]
-                    SSC + Satt       0.08 [0.03035 ; 0.19456]
-
-Corrections: Gaus = Gaussian approximation 
-             SSC  = small sample correction 
-             Satt = Satterthwaite approximation
-\end{verbatim}
-
-
-\clearpage
-
-\section{Adjustment for multiple comparisons}
-\label{sec:org4823f66}
-\subsection{Univariate Wald test, single model}
-\label{sec:org4c37542}
-
-When performing multiple testing, adjustment for multiple comparisons
-is necessary in order to control the type 1 error rate, i.e. to
-provide interpretable p.values. The \textbf{multcomp} package enables to do
-such adjustment when all tests comes from the same \texttt{lvmfit} object:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(Y ~ 0.25 * X1 + 0.3 * X2 + 0.35 * X3 + 0.4 * X4 + 0.45 * X5 + 0.5 * X6)
-set.seed(10)
-df.data <- sim(mSim, n = 4e1)
-
-## fit lvm
-e.lvm <- estimate(lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-name.coef <- names(coef(e.lvm))
-n.coef <- length(name.coef)
-
-## Create contrast matrix
-resC <- createContrast(e.lvm, par = paste0("Y~X",1:6), rowname.rhs = FALSE)
-resC$contrast
-\end{lstlisting}
-
-\begin{verbatim}
-     Y Y~X1 Y~X2 Y~X3 Y~X4 Y~X5 Y~X6 Y~~Y
-Y~X1 0    1    0    0    0    0    0    0
-Y~X2 0    0    1    0    0    0    0    0
-Y~X3 0    0    0    1    0    0    0    0
-Y~X4 0    0    0    0    1    0    0    0
-Y~X5 0    0    0    0    0    1    0    0
-Y~X6 0    0    0    0    0    0    1    0
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e.glht <- multcomp::glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht)
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error z value Pr(>|z|)   
-Y~X1 == 0   0.3270     0.1589   2.058  0.20725   
-Y~X2 == 0   0.4025     0.1596   2.523  0.06611 . 
-Y~X3 == 0   0.5072     0.1383   3.669  0.00144 **
-Y~X4 == 0   0.3161     0.1662   1.902  0.28582   
-Y~X5 == 0   0.3875     0.1498   2.586  0.05554 . 
-Y~X6 == 0   0.3758     0.1314   2.859  0.02482 * 
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-Note that this correction relies on the Gaussian approximation. To use
-small sample corrections implemented in \textbf{lavaSearch2}, just call
-\texttt{glht2} instead of \texttt{glht}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-e.glht2 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(e.glht2)
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.3290  
-Y~X2 == 0   0.4025     0.1757   2.291   0.1482  
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.4128  
-Y~X5 == 0   0.3875     0.1650   2.349   0.1315  
-Y~X6 == 0   0.3758     0.1447   2.597   0.0762 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-The single step method is the appropriate correction when one wants to
-report the most significant p-value relative to a set of
-hypotheses. If the second most significant p-value is also to be
-reported then the method "free" is more efficient:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(e.glht2, test = multcomp::adjusted("free"))
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Fit: estimate.lvm(x = lvm(Y ~ X1 + X2 + X3 + X4 + X5 + X6), data = df.data)
-
-Linear Hypotheses:
-          Estimate Std. Error t value Pr(>|t|)  
-Y~X1 == 0   0.3270     0.1750   1.869   0.1291  
-Y~X2 == 0   0.4025     0.1757   2.291   0.0913 .
-Y~X3 == 0   0.5072     0.1522   3.333   0.0123 *
-Y~X4 == 0   0.3161     0.1830   1.727   0.1291  
-Y~X5 == 0   0.3875     0.1650   2.349   0.0913 .
-Y~X6 == 0   0.3758     0.1447   2.597   0.0645 .
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Adjusted p values reported -- free method)
-\end{verbatim}
-
-See the book: "Multiple Comparisons Using R" by Frank Bretz, Torsten
-Hothorn, and Peter Westfall (2011, CRC Press) for details about the
-theory underlying the \textbf{multcomp} package.
-
-\subsection{Univariate Wald test, multiple models}
-\label{sec:orgec7907f}
-
-Pipper et al. in "A Versatile Method for Confirmatory Evaluation of
-the Effects of a Covariate in Multiple Models" (2012, Journal of the
-Royal Statistical Society, Series C) developed a method to assess the
-effect of an exposure on several outcomes when a different model is
-fitted for each outcome. This method has been implemented in the \texttt{mmm}
-function from the \textbf{multcomp} package for glm and Cox
-models. \textbf{lavaSearch2} extends it to \texttt{lvm}. 
-
-Let's consider an example where we wish to assess the treatment effect
-on three outcomes X, Y, and Z. We have at hand three measurements
-relative to outcome Z for each individual:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mSim <- lvm(X ~ Age + 0.5*Treatment,
-			Y ~ Gender + 0.25*Treatment,
-			c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
-			Age[40:5]~1)
-latent(mSim) <- ~eta
-categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
-categorical(mSim, labels = c("male","female")) <- ~Gender
-
-n <- 5e1
-set.seed(10)
-df.data <- sim(mSim, n = n, latent = FALSE)
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-         X      Age Treatment          Y Gender         Z1         Z2          Z3
-1 39.12289 39.10415   placebo  0.6088958 female  1.8714112  2.2960633 -0.09326935
-2 39.56766 39.25191      SSRI  1.0001325 female  0.9709943  0.6296226  1.31035910
-3 41.68751 43.05884   placebo  2.1551047 female -1.1634011 -0.3332927 -1.30769267
-4 44.68102 44.78019      SSRI  0.3852728 female -1.0305476  0.6678775  0.99780139
-5 41.42559 41.13105   placebo -0.8666783   male -1.6342816 -0.8285492  1.20450488
-6 42.64811 41.75832      SSRI -1.0710170 female -1.2198019 -1.9602130 -1.85472132
-   treatment
-1  1.1639675
-2 -1.5233846
-3 -2.5183351
-4 -0.7075292
-5 -0.2874329
-6 -0.4353083
-\end{verbatim}
-
-We fit a model specific to each outcome:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-lmX <- lm(X ~ Age + Treatment, data = df.data)
-lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
-lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ 1*eta, eta ~ -1 + Treatment), 
-				 data = df.data)
-\end{lstlisting}
-
-and combine them into a list of \texttt{lvmfit} objects:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-mmm.lvm <- multcomp::mmm(X = lmX, Y = lvmY, Z = lvmZ)
-\end{lstlisting}
-
-We can then generate a contrast matrix to test each coefficient
-related to the treatment:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resC <- createContrast(mmm.lvm, var.test = "Treatment", add.variance = TRUE)
-resC$contrast
-\end{lstlisting}
-
-\begin{verbatim}
-                     X: (Intercept) X: Age X: TreatmentSSRI X: sigma2 Y: Y
-X: TreatmentSSRI                  0      0                1         0    0
-Y: Y~TreatmentSSRI                0      0                0         0    0
-Z: eta~TreatmentSSRI              0      0                0         0    0
-                     Y: Y~Genderfemale Y: Y~TreatmentSSRI Y: Y~~Y Z: Z1 Z: Z2 Z: Z3
-X: TreatmentSSRI                     0                  0       0     0     0     0
-Y: Y~TreatmentSSRI                   0                  1       0     0     0     0
-Z: eta~TreatmentSSRI                 0                  0       0     0     0     0
-                     Z: eta~TreatmentSSRI Z: Z1~~Z1 Z: Z2~~Z2 Z: Z3~~Z3 Z: eta~~eta
-X: TreatmentSSRI                        0         0         0         0           0
-Y: Y~TreatmentSSRI                      0         0         0         0           0
-Z: eta~TreatmentSSRI                    1         0         0         0           0
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-lvm.glht2 <- glht2(mmm.lvm, linfct = resC$contrast, rhs = resC$null)
-summary(lvm.glht2)
-\end{lstlisting}
-
-\begin{verbatim}
-
-	 Simultaneous Tests for General Linear Hypotheses
-
-Linear Hypotheses:
-                          Estimate Std. Error t value Pr(>|t|)
-X: TreatmentSSRI == 0       0.4661     0.2533   1.840    0.187
-Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074    0.117
-Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407    0.393
-(Adjusted p values reported -- single-step method)
-\end{verbatim}
-
-This can be compared to the unadjusted p.values:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-summary(lvm.glht2, test = multcomp::univariate())
-\end{lstlisting}
-
-\begin{verbatim}
-	 Simultaneous Tests for General Linear Hypotheses
-
-Linear Hypotheses:
-                          Estimate Std. Error t value Pr(>|t|)  
-X: TreatmentSSRI == 0       0.4661     0.2533   1.840   0.0720 .
-Y: Y~TreatmentSSRI == 0    -0.5421     0.2613  -2.074   0.0435 *
-Z: eta~TreatmentSSRI == 0  -0.6198     0.4404  -1.407   0.1659  
----
-Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
-(Univariate p values reported)
-\end{verbatim}
-
-
-\clearpage 
-
-\section{Model diagnostic}
-\label{sec:org1c04a31}
-\subsection{Detection of local dependencies}
-\label{sec:org6d7e4f5}
-
-The \texttt{modelsearch} function of \textbf{lava} is a diagnostic tool for latent
-variable models. It enables to search for local dependencies
-(i.e. model misspecification) and add them to the model. Obviously it
-is a data-driven procedure and its usefulness can be discussed,
-especially in small samples:
-\begin{itemize}
-\item the procedure is instable, i.e. is likely to lead to two different
-models when applied on two different dataset sampled from the same
-generative model.
-\item it is hard to define a meaningful significance threshold since
-p-values should be adjusted for multiple comparisons and sequential
-testing. However traditional methods like Bonferroni-Holm tend to
-over corrected and therefore reduce the power of the procedure since
-they assume that the test are independent.
-\end{itemize}
-
-The function \texttt{modelsearch2} in \textbf{lavaSearch2} partially solves the
-second issue by adjusting the p-values for multiple testing. Let's see
-an example:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-mSim <- lvm(c(y1,y2,y3)~u, u~x1+x2)
-latent(mSim) <- ~u
-covariance(mSim) <- y2~y3
-transform(mSim, Id~u) <- function(x){1:NROW(x)}
-set.seed(10)
-df.data <- lava::sim(mSim, n = 125, latent = FALSE)
-head(df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-          y1           y2         y3         x1         x2 Id
-1  5.5071523  4.883752014  6.2928016  0.8694750  2.3991549  1
-2 -0.6398644  0.025832617  0.5088030 -0.6800096 -0.0898721  2
-3 -2.5835495 -2.616715027 -2.8982645  0.1732145 -0.8216484  3
-4 -2.5312637 -2.518185427 -2.9015033 -0.1594380 -0.2869618  4
-5  1.6346220 -0.001877577  0.3705181  0.7934994  0.1312789  5
-6  0.4939972  1.759884014  1.5010499  1.6943505 -1.0620840  6
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## fit model
-m <- lvm(c(y1,y2,y3)~u, u~x1)
-latent(m) <- ~u
-addvar(m) <- ~x2 
-e.lvm <- estimate(m, data = df.data)
-\end{lstlisting}
-
-\texttt{modelsearch2} can be used to sequentially apply the \texttt{modelsearch}
-function with a given correction for the p.values:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resScore <- modelsearch2(e.lvm, alpha = 0.1, trace = FALSE)
-displayScore <- summary(resScore)
-\end{lstlisting}
-
-\begin{verbatim}
-Sequential search for local dependence using the score statistic 
-The variable selection procedure retained 2 variables:
-    link statistic      p.value adjusted.p.value dp.Info selected nTests
-1   u~x2  6.036264 1.577228e-09     5.008615e-08       1     TRUE     10
-2 y2~~y3  2.629176 8.559198e-03     6.055947e-02       1     TRUE      9
-3  y3~x1  1.770997 7.656118e-02     2.814424e-01       1    FALSE      8
-Confidence level: 0.9 (two sided, adjustement: fastmax)
-\end{verbatim}
-
-This indeed matches the highest score statistic found by
-\texttt{modelsearch}:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resScore0 <- modelsearch(e.lvm, silent = TRUE)
-c(statistic = sqrt(max(resScore0$test[,"Test Statistic"])), 
-  p.value = min(resScore0$test[,"P-value"]))
-\end{lstlisting}
-
-\begin{verbatim}
-   statistic      p.value 
-6.036264e+00 1.577228e-09
-\end{verbatim}
-
-We can compare the adjustment using the max distribution to bonferroni:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-data.frame(link = displayScore$table[,"link"],
-		   none = displayScore$table[,"p.value"],
-		   bonferroni = displayScore$table[,"p.value"]*displayScore$table[1,"nTests"],
-		   max = displayScore$table[,"adjusted.p.value"])
-\end{lstlisting}
-
-\begin{verbatim}
-    link         none   bonferroni          max
-1   u~x2 1.577228e-09 1.577228e-08 5.008615e-08
-2 y2~~y3 8.559198e-03 8.559198e-02 6.055947e-02
-3  y3~x1 7.656118e-02 7.656118e-01 2.814424e-01
-\end{verbatim}
-
-In theory, the correction based on the max statistic should give a p
-value that is smaller or equal than the p value adjusted using
-Bonferroni. However for for very small p-values, the max-correction
-can be numerically inaccurate and result in p-values that are slightly
-larger. The evolution of the estimation of a given coefficient across
-the sequential search can be displayed using \texttt{autoplot}:
-
-\begin{center}
-\includegraphics[width=.9\linewidth]{./modelsearch.png}
-\end{center}
-
-In many cases, all links are not plausible so the user should
-indicates which links should be investigated by \texttt{modelsearch2}. This
-can be done via the argument \texttt{link}:
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-resRed <- modelsearch2(e.lvm, link = c("y1~~y2","y1~~y3","y2~~y3"), trace = FALSE)
-print(resRed)
-\end{lstlisting}
-
-\begin{verbatim}
-Sequential search for local dependence using the score statistic 
-The variable selection procedure did not retain any variable 
-    link statistic    p.value adjusted.p.value dp.Info selected nTests
-1 y1~~y3  1.754102 0.07941299        0.1818963       1    FALSE      3
-Confidence level: 0.95 (two sided, adjustement: fastmax)
-\end{verbatim}
-
-The function \texttt{findNewLink} can help the user to identify the set of
-relevant links:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-findNewLink(e.lvm$model, type = "covariance")$link
-\end{lstlisting}
-
-\begin{verbatim}
-[1] "y1~~y2" "y1~~y3" "y2~~y3"
-\end{verbatim}
-
-\subsection{Checking that the names of the variables in the model match those of the data}
-\label{sec:org30549f2}
-
-When estimating latent variable models using \textbf{lava}, it sometimes
-happens that the model does not converge:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-set.seed(10)
-df.data <- sim(lvm(Y~X1+X2), 1e2)
-
-## fit model
-mWrong <- lvm(Y ~ X + X2)
-eWrong <- estimate(mWrong, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Warning messages:
-1: In estimate.lvm(mWrong, data = df.data) :
-  Lack of convergence. Increase number of iteration or change starting values.
-2: In sqrt(diag(asVar)) : NaNs produced
-\end{verbatim}
-
-This can have several reasons:
-\begin{itemize}
-\item the model is not identifiable.
-\item the optimization routine did not managed to find a local
-optimum. This may happen for complex latent variable model where the
-objective function is not convex or locally convex.
-\item the user has made a mistake when defining the model or has not given
-the appropriate dataset.
-\end{itemize}
-
-The \texttt{checkData} function enables to check the last point. It compares
-the observed variables defined in the model and the one given by the
-dataset. In case of mismatch it returns a message:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-checkData(mWrong, df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Missing variable in data: X
-\end{verbatim}
-
-In presence of latent variables, the user needs to explicitely define
-them in the model, otherwise \texttt{checkData} will identify them as an
-issue:
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-## simulate data
-set.seed(10)
-mSim <- lvm(c(Y1,Y2,Y3)~eta)
-latent(mSim) <- ~eta
-df.data <- sim(mSim, n = 1e2, latent = FALSE)
-
-## fit model
-m <- lvm(c(Y1,Y2,Y3)~eta)
-checkData(m, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-Missing variable in data: eta
-\end{verbatim}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-latent(m) <- ~eta
-checkData(m, data = df.data)
-\end{lstlisting}
-
-\begin{verbatim}
-No issue detected
-\end{verbatim}
-
-
-\clearpage
-
-\section{Information about the R session used for this document}
-\label{sec:orgae25241}
-
-\lstset{language=r,label= ,caption= ,captionpos=b,numbers=none}
-\begin{lstlisting}
-sessionInfo()
-\end{lstlisting}
-
-\begin{verbatim}
-R version 3.5.1 (2018-07-02)
-Platform: x86_64-w64-mingw32/x64 (64-bit)
-Running under: Windows 7 x64 (build 7601) Service Pack 1
-
-Matrix products: default
-
-locale:
-[1] LC_COLLATE=Danish_Denmark.1252  LC_CTYPE=Danish_Denmark.1252   
-[3] LC_MONETARY=Danish_Denmark.1252 LC_NUMERIC=C                   
-[5] LC_TIME=Danish_Denmark.1252    
-
-attached base packages:
-[1] stats     graphics  grDevices utils     datasets  methods   base     
-
-other attached packages:
-[1] lavaSearch2_1.5.1 lava_1.6.4        ggplot2_3.1.0    
-
-loaded via a namespace (and not attached):
- [1] Rcpp_1.0.0        pillar_1.3.1      compiler_3.5.1    plyr_1.8.4       
- [5] bindr_0.1.1       tools_3.5.1       tibble_2.0.1      gtable_0.2.0     
- [9] lattice_0.20-35   pkgconfig_2.0.2   rlang_0.3.1       Matrix_1.2-14    
-[13] parallel_3.5.1    mvtnorm_1.0-8     bindrcpp_0.2.2    withr_2.1.2      
-[17] dplyr_0.7.8       stringr_1.3.1     grid_3.5.1        tidyselect_0.2.5 
-[21] glue_1.3.0        R6_2.3.0          survival_2.42-6   multcomp_1.4-8   
-[25] TH.data_1.0-9     purrr_0.3.0       reshape2_1.4.3    magrittr_1.5     
-[29] scales_1.0.0      codetools_0.2-15  MASS_7.3-50       splines_3.5.1    
-[33] assertthat_0.2.0  colorspace_1.3-2  numDeriv_2016.8-1 labeling_0.3     
-[37] sandwich_2.5-0    stringi_1.2.4     lazyeval_0.2.1    munsell_0.5.0    
-[41] crayon_1.3.4      zoo_1.8-4
-\end{verbatim}
-\end{document}
\ No newline at end of file
diff --git a/inst/doc/overview.pdf b/inst/doc/overview.pdf
index f795e3f..933ed64 100644
Binary files a/inst/doc/overview.pdf and b/inst/doc/overview.pdf differ
diff --git a/inst/doc/overview.pdf.asis b/inst/doc/overview.pdf.asis
new file mode 100644
index 0000000..ff145cc
--- /dev/null
+++ b/inst/doc/overview.pdf.asis
@@ -0,0 +1,5 @@
+%\VignetteIndexEntry{lavaSearch2: overview}
+%\VignetteEngine{R.rsp::asis}
+%\VignetteKeyword{PDF}
+%\VignetteKeyword{vignette}
+%\VignetteKeyword{package}
\ No newline at end of file
diff --git a/inst/implementationScheme.png b/inst/implementationScheme.png
deleted file mode 100644
index cd10348..0000000
Binary files a/inst/implementationScheme.png and /dev/null differ
diff --git a/inst/implementationScheme.svg b/inst/implementationScheme.svg
deleted file mode 100644
index 88eb0b2..0000000
--- a/inst/implementationScheme.svg
+++ /dev/null
@@ -1,936 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="998.85199mm"
-   height="320.72757mm"
-   viewBox="0 0 3539.2393 1136.4363"
-   id="svg2"
-   version="1.1"
-   inkscape:version="0.91 r13725"
-   sodipodi:docname="implementationScheme.svg"
-   inkscape:export-filename="C:\Users\hpl802\Documents\GitHub\lavaSearch2\inst\implementationScheme.png"
-   inkscape:export-xdpi="200"
-   inkscape:export-ydpi="200">
-  <defs
-     id="defs4">
-    <marker
-       inkscape:stockid="EmptyTriangleInL"
-       orient="auto"
-       refY="0.0"
-       refX="0.0"
-       id="EmptyTriangleInL"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path4843"
-         d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
-         style="fill-rule:evenodd;fill:#FFFFFF;stroke:#000000;stroke-width:1.0pt"
-         transform="scale(-0.8) translate(-6,0)" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker4440"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend">
-      <path
-         inkscape:connector-curvature="0"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path4442" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker9638"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path9640"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker9242"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path9244"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker8826"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path8828"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker8354"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path8356"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker8138"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend"
-       inkscape:collect="always">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path8140"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker8016"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path8018"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker7128"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path7130"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker6948"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path6950"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker6382"
-       style="overflow:visible"
-       inkscape:isstock="true"
-       inkscape:collect="always">
-      <path
-         id="path6384"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker5858"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend"
-       inkscape:collect="always">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path5860"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker5568"
-       style="overflow:visible"
-       inkscape:isstock="true"
-       inkscape:collect="always">
-      <path
-         id="path5570"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker5344"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path5346"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker5276"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend"
-       inkscape:collect="always">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path5278"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lstart"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="Arrow1Lstart"
-       style="overflow:visible"
-       inkscape:isstock="true">
-      <path
-         id="path4266"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(0.8,0,0,0.8,10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="marker5162"
-       style="overflow:visible"
-       inkscape:isstock="true"
-       inkscape:collect="always">
-      <path
-         id="path5164"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker5044"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend"
-       inkscape:collect="always">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path5046"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:isstock="true"
-       style="overflow:visible"
-       id="marker4628"
-       refX="0"
-       refY="0"
-       orient="auto"
-       inkscape:stockid="Arrow1Lend"
-       inkscape:collect="always">
-      <path
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         id="path4630"
-         inkscape:connector-curvature="0" />
-    </marker>
-    <marker
-       inkscape:stockid="Arrow1Lend"
-       orient="auto"
-       refY="0"
-       refX="0"
-       id="Arrow1Lend"
-       style="overflow:visible"
-       inkscape:isstock="true"
-       inkscape:collect="always">
-      <path
-         id="path4269"
-         d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
-         style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1"
-         transform="matrix(-0.8,0,0,-0.8,-10,0)"
-         inkscape:connector-curvature="0" />
-    </marker>
-  </defs>
-  <sodipodi:namedview
-     id="base"
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1.0"
-     inkscape:pageopacity="0.0"
-     inkscape:pageshadow="2"
-     inkscape:zoom="0.24748737"
-     inkscape:cx="2558.4954"
-     inkscape:cy="746.51653"
-     inkscape:document-units="px"
-     inkscape:current-layer="layer1"
-     showgrid="false"
-     inkscape:window-width="1920"
-     inkscape:window-height="1058"
-     inkscape:window-x="-8"
-     inkscape:window-y="-8"
-     inkscape:window-maximized="1"
-     showguides="false"
-     fit-margin-top="0"
-     fit-margin-left="0"
-     fit-margin-right="0"
-     fit-margin-bottom="0">
-    <sodipodi:guide
-       position="808.80734,1592.0252"
-       orientation="-0.70710678,0.70710678"
-       id="guide7508" />
-  </sodipodi:namedview>
-  <metadata
-     id="metadata7">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-        <dc:title />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <g
-     inkscape:label="Layer 1"
-     inkscape:groupmode="layer"
-     id="layer1"
-     transform="translate(839.1119,229.51122)">
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot3336"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-157.14286,-240.89631)"><flowRegion
-         id="flowRegion3338"><rect
-           id="rect3340"
-           width="374.28571"
-           height="162.85715"
-           x="41.42857"
-           y="38.076488" /></flowRegion><flowPara
-         id="flowPara3342">object</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot3344"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-182.85714,-105.71429)"><flowRegion
-         id="flowRegion3346"><rect
-           id="rect3348"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara3350">sCorrect()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot3352"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-813.84055,113.40249)"><flowRegion
-         id="flowRegion3354"><rect
-           id="rect3356"
-           width="351.42856"
-           height="162.85715"
-           x="22.857143"
-           y="470.93362" /></flowRegion><flowPara
-         id="flowPara3358">summary2()</flowPara></flowRoot>    <flowRoot
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot3360"
-       xml:space="preserve"
-       transform="translate(-462.86735,113.40249)"><flowRegion
-         id="flowRegion3362"><rect
-           y="470.93362"
-           x="22.857143"
-           height="162.85715"
-           width="351.42856"
-           id="rect3364" /></flowRegion><flowPara
-         id="flowPara3366">compare2()</flowPara></flowRoot>    <flowRoot
-       transform="translate(-124.48154,114.83107)"
-       xml:space="preserve"
-       id="flowRoot3370"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><flowRegion
-         id="flowRegion3372"><rect
-           id="rect3374"
-           width="351.42856"
-           height="162.85715"
-           x="22.857143"
-           y="470.93362" /></flowRegion><flowPara
-         id="flowPara3376">iid2()</flowPara></flowRoot>    <flowRoot
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot3378"
-       xml:space="preserve"
-       transform="translate(507.79705,112.21903)"><flowRegion
-         id="flowRegion3380"><rect
-           y="470.93362"
-           x="22.857143"
-           height="162.85715"
-           width="351.42856"
-           id="rect3382" /></flowRegion><flowPara
-         id="flowPara3384">residuals2()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot3450"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-303.55936,-290.99783)"><flowRegion
-         id="flowRegion3452"><rect
-           id="rect3454"
-           width="667.14288"
-           height="162.85715"
-           x="-74.285713"
-           y="676.64795" /></flowRegion><flowPara
-         id="flowPara3456"
-         style="text-align:center;text-anchor:middle">User inferface </flowPara><flowPara
-         id="flowPara3458"
-         style="text-align:center;text-anchor:middle">small sample adjustement</flowPara></flowRoot>    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
-       d="M -312.33508,474.56092 -626.14802,587.7647"
-       id="path3460"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path4626"
-       d="M -201.42135,482.05042 -312.6643,589.19328"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker4628)" />
-    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path5040"
-       d="M 226.34254,480.62184 582.58187,587.7647"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5044)" />
-    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5162)"
-       d="M -3.7691858,482.05042 -64.252172,589.19328"
-       id="path5042"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5276)"
-       d="m -808.12626,634.90756 -28.57143,28.57142 67.14286,0"
-       id="path5202"
-       inkscape:connector-curvature="0" />
-    <flowRoot
-       transform="translate(-786.69769,160.54535)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot5328"
-       xml:space="preserve"><flowRegion
-         id="flowRegion5330"><rect
-           y="470.93362"
-           x="22.857143"
-           height="162.85715"
-           width="351.42856"
-           id="rect5332" /></flowRegion><flowPara
-         id="flowPara5334">call summary</flowPara></flowRoot>    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker4440)"
-       d="m 94.802242,120.93362 672.463348,0"
-       id="path5566"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path5856"
-       d="m 1072.1613,147.83138 0,207.97248"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5858)" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot6162"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(1703.0734,168.07402)"><flowRegion
-         id="flowRegion6164"><rect
-           id="rect6166"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara6168">.estimate2()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot6350"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(756.34297,-113.03046)"><flowRegion
-         id="flowRegion6352"><rect
-           id="rect6354"
-           width="462.51562"
-           height="189.62364"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara6356">conditionalMoment()</flowPara></flowRoot>    <flowRoot
-       transform="translate(1292.8151,-254.51119)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot6358"
-       xml:space="preserve"><flowRegion
-         id="flowRegion6360"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect6362" /></flowRegion><flowPara
-         id="flowPara6364">skeleton()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot6366"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(944.97874,166.4517)"><flowRegion
-         id="flowRegion6368"><rect
-           id="rect6370"
-           width="682.85718"
-           height="155.71429"
-           x="762.85712"
-           y="-217.6378" /></flowRegion><flowPara
-         id="flowPara6372">rebuild matrices of coeffients </flowPara></flowRoot>    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker6382)"
-       d="m 1217.6595,98.076481 93.3053,-127.447432"
-       id="path6380"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot6908"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(1292.8151,-122.37868)"><flowRegion
-         id="flowRegion6910"><rect
-           id="rect6912"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara6914">skeletonDtheta()</flowPara></flowRoot>    <flowRoot
-       transform="translate(946.54124,269.75609)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot6916"
-       xml:space="preserve"><flowRegion
-         id="flowRegion6918"><rect
-           y="-217.6378"
-           x="762.85712"
-           height="155.71429"
-           width="682.85718"
-           id="rect6920" /></flowRegion><flowPara
-         id="flowPara6922">compute first partial derivative regarding mean and variance</flowPara></flowRoot>    <flowRoot
-       transform="translate(1292.8151,1.5775356)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot6924"
-       xml:space="preserve"><flowRegion
-         id="flowRegion6926"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect6928" /></flowRegion><flowPara
-         id="flowPara6930">skeletonD2theta()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot6932"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(946.54124,393.61464)"><flowRegion
-         id="flowRegion6934"><rect
-           id="rect6936"
-           width="682.85718"
-           height="155.71429"
-           x="762.85712"
-           y="-217.6378" /></flowRegion><flowPara
-         id="flowPara6938">compute second partial derivative regarding mean and variance</flowPara></flowRoot>    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path6946"
-       d="m 1219.0881,134.73346 89.2647,99.16316"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker6948)" />
-    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker7128)"
-       d="m 1223.3737,113.79076 88.5714,-1.42857"
-       id="path7126"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot7390"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(1442.4978,462.16459)"><flowRegion
-         id="flowRegion7392"><rect
-           id="rect7394"
-           width="670.74127"
-           height="315.1676"
-           x="969.74646"
-           y="-517.41486" /></flowRegion><flowPara
-         id="flowPara7396">Λ, K, B, ...</flowPara></flowRoot>    <flowRoot
-       transform="translate(1440.9353,570.59593)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot7398"
-       xml:space="preserve"><flowRegion
-         id="flowRegion7400"><rect
-           y="-517.41486"
-           x="969.74646"
-           height="315.1676"
-           width="670.74127"
-           id="rect7402" /></flowRegion><flowPara
-         id="flowPara7404">δμ/δΘ</flowPara><flowPara
-         id="flowPara7406">δΩ/δΘ</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot7408"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(1440.9353,697.00527)"><flowRegion
-         id="flowRegion7410"><rect
-           id="rect7412"
-           width="670.74127"
-           height="315.1676"
-           x="969.74646"
-           y="-517.41486" /></flowRegion><flowPara
-         id="flowPara7414">δ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7418">2</flowSpan>μ/δΘ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7422">2</flowSpan></flowPara><flowPara
-         id="flowPara7416">δ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7420">2</flowSpan>Ω/δΘ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7424">2</flowSpan></flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot7426"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-1262.6907,-40.406102)"><flowRegion
-         id="flowRegion7428"><rect
-           id="rect7430"
-           width="1448.5588"
-           height="101.01525"
-           x="2592.0515"
-           y="-192.14574" /></flowRegion><flowPara
-         id="flowPara7432">.lvm: template</flowPara><flowPara
-         id="flowPara7434">.lvmfit: update template with the value of the model coefficients</flowPara></flowRoot>    <rect
-       style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#030203;stroke-width:2;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:2, 4.00000001;stroke-dashoffset:0;stroke-opacity:1"
-       id="rect7510"
-       width="1385.9291"
-       height="531.34033"
-       x="1313.1984"
-       y="-228.51122" />
-    <rect
-       y="342.05167"
-       x="2133.1985"
-       height="513.15759"
-       width="1385.9291"
-       id="rect7632"
-       style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:#030203;stroke-width:1.99999976;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:2.00000001, 4.00000002;stroke-dashoffset:0;stroke-opacity:1" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot7662"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(832.12183,27.080504)"><flowRegion
-         id="flowRegion7664"><rect
-           id="rect7666"
-           width="690.94446"
-           height="155.56349"
-           x="1709.1781"
-           y="343.23511" /></flowRegion><flowPara
-         id="flowPara7668">compute the corrected residuals</flowPara></flowRoot>    <flowRoot
-       transform="translate(832.12183,152.2785)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot7670"
-       xml:space="preserve"><flowRegion
-         id="flowRegion7672"><rect
-           y="343.23511"
-           x="1709.1781"
-           height="155.56349"
-           width="690.94446"
-           id="rect7674" /></flowRegion><flowPara
-         id="flowPara7676">compute the leverage</flowPara></flowRoot>    <flowRoot
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot7688"
-       xml:space="preserve"
-       transform="translate(882.62946,-118.36117)"><flowRegion
-         id="flowRegion7690"><rect
-           y="484.65646"
-           x="2422.3457"
-           height="88.893425"
-           width="165.66502"
-           id="rect7692" /></flowRegion><flowPara
-         id="flowPara7694">ε<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7702">c</flowSpan></flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot7704"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(2115.2156,539.81015)"><flowRegion
-         id="flowRegion7706"><rect
-           id="rect7708"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara7710">.information2()</flowPara></flowRoot>    <flowRoot
-       transform="translate(832.12183,392.77603)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot7712"
-       xml:space="preserve"><flowRegion
-         id="flowRegion7714"><rect
-           y="343.23511"
-           x="1709.1781"
-           height="137.38074"
-           width="731.35059"
-           id="rect7716" /></flowRegion><flowPara
-         id="flowPara7718">compute the corrected information matrix</flowPara></flowRoot>    <flowRoot
-       transform="translate(882.62946,249.33436)"
-       xml:space="preserve"
-       id="flowRoot7720"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"><flowRegion
-         id="flowRegion7722"><rect
-           id="rect7724"
-           width="165.66502"
-           height="88.893425"
-           x="2422.3457"
-           y="484.65646" /></flowRegion><flowPara
-         id="flowPara7726">I<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan7730">c</flowSpan></flowPara></flowRoot>    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8016)"
-       d="m 1892.1613,425.45002 0,381.71874"
-       id="path8014"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <flowRoot
-       transform="translate(1703.0734,616.58176)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot8122"
-       xml:space="preserve"><flowRegion
-         id="flowRegion8124"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect8126" /></flowRegion><flowPara
-         id="flowPara8128">.score2()</flowPara></flowRoot>    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path8136"
-       d="m -40.939669,148.17079 1.42857,247.14285"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8138)" />
-    <path
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8354)"
-       d="m -40.939669,-148.81406 1.42857,247.142852"
-       id="path8352"
-       inkscape:connector-curvature="0"
-       sodipodi:nodetypes="cc" />
-    <path
-       inkscape:connector-curvature="0"
-       id="path9232"
-       d="m -444.47134,634.90756 -28.57143,28.57142 67.14286,0"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker9242)" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot9234"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-423.04277,160.54535)"><flowRegion
-         id="flowRegion9236"><rect
-           id="rect9238"
-           width="351.42856"
-           height="162.85715"
-           x="22.857143"
-           y="470.93362" /></flowRegion><flowPara
-         id="flowPara9240">call iid2()</flowPara></flowRoot>    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path9636"
-       d="M 165.93644,482.05042 240.8139,583.13236"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker9638)" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot10082"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(1703.0734,660.86746)"><flowRegion
-         id="flowRegion10084"><rect
-           id="rect10086"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara10088">.d2Information()</flowPara></flowRoot>    <flowRoot
-       transform="translate(922.25864,168.65765)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot10096"
-       xml:space="preserve"><flowRegion
-         id="flowRegion10098"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect10100" /></flowRegion><flowPara
-         id="flowPara10102">.sCorrect()</flowPara></flowRoot>    <path
-       sodipodi:nodetypes="cc"
-       inkscape:connector-curvature="0"
-       id="path3580"
-       d="m 1194.8022,398.07648 533.2621,0"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5568)" />
-    <flowRoot
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot3582"
-       xml:space="preserve"
-       transform="translate(149.80417,114.83107)"><flowRegion
-         id="flowRegion3584"><rect
-           y="470.93362"
-           x="22.857143"
-           height="162.85715"
-           width="351.42856"
-           id="rect3586" /></flowRegion><flowPara
-         id="flowPara3588">leverage()</flowPara></flowRoot>    <path
-       inkscape:connector-curvature="0"
-       id="path3590"
-       d="m -813.84055,689.19327 -28.57143,28.57142 67.14286,0"
-       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker5276)" />
-    <flowRoot
-       xml:space="preserve"
-       id="flowRoot3592"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(-792.41198,214.83106)"><flowRegion
-         id="flowRegion3594"><rect
-           id="rect3596"
-           width="351.42856"
-           height="162.85715"
-           x="22.857143"
-           y="470.93362" /></flowRegion><flowPara
-         id="flowPara3598">call compare2</flowPara></flowRoot>    <flowRoot
-       transform="translate(2115.2156,172.11462)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot4648"
-       xml:space="preserve"><flowRegion
-         id="flowRegion4650"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect4652" /></flowRegion><flowPara
-         id="flowPara4654">.adjustResiduals()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot4656"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(2115.2156,291.31262)"><flowRegion
-         id="flowRegion4658"><rect
-           id="rect4660"
-           width="368.57144"
-           height="212.85715"
-           x="34.285713"
-           y="199.50507" /></flowRegion><flowPara
-         id="flowPara4662">.adjustLeverage()</flowPara></flowRoot>    <flowRoot
-       transform="translate(2261.1791,1020.2541)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot4664"
-       xml:space="preserve"><flowRegion
-         id="flowRegion4666"><rect
-           y="-517.41486"
-           x="969.74646"
-           height="315.1676"
-           width="670.74127"
-           id="rect4668" /></flowRegion><flowPara
-         id="flowPara4676">δ\hat{Y}/δY<flowSpan
-   id="flowSpan4674"
-   style="font-size:64.99999762%;baseline-shift:super" /></flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot5070"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(832.12183,273.49681)"><flowRegion
-         id="flowRegion5072"><rect
-           id="rect5074"
-           width="690.94446"
-           height="155.56349"
-           x="1709.1781"
-           y="343.23511" /></flowRegion><flowPara
-         id="flowPara5076">compute the adjusted parameters</flowPara><flowPara
-         id="flowPara5096">and moments</flowPara></flowRoot>    <flowRoot
-       transform="translate(2115.2156,412.53093)"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       id="flowRoot5078"
-       xml:space="preserve"><flowRegion
-         id="flowRegion5080"><rect
-           y="199.50507"
-           x="34.285713"
-           height="212.85715"
-           width="368.57144"
-           id="rect5082" /></flowRegion><flowPara
-         id="flowPara5084">.adjustMoment()</flowPara></flowRoot>    <flowRoot
-       xml:space="preserve"
-       id="flowRoot5098"
-       style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
-       transform="translate(2323.8087,1126.1798)"><flowRegion
-         id="flowRegion5100"><rect
-           id="rect5102"
-           width="670.74127"
-           height="315.1676"
-           x="969.74646"
-           y="-517.41486" /></flowRegion><flowPara
-         id="flowPara5104">δμ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan5108">c</flowSpan>/δΘ</flowPara><flowPara
-         id="flowPara5106">δΩ<flowSpan
-   style="font-size:64.99999762%;baseline-shift:super"
-   id="flowSpan5110">c</flowSpan>/δΘ</flowPara></flowRoot>  </g>
-</svg>
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.aux b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.aux
deleted file mode 100644
index 5726732..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.aux
+++ /dev/null
@@ -1,29 +0,0 @@
-\relax 
-\providecommand\hyper@newdestlabel[2]{}
-\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
-\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
-\global\let\oldcontentsline\contentsline
-\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
-\global\let\oldnewlabel\newlabel
-\gdef\newlabel#1#2{\newlabelxx{#1}#2}
-\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
-\AtEndDocument{\ifx\hyper@anchor\@undefined
-\let\contentsline\oldcontentsline
-\let\newlabel\oldnewlabel
-\fi}
-\fi}
-\global\let\hyper@last\relax 
-\gdef\HyperFirstAtBeginDocument#1{#1}
-\providecommand\HyField@AuxAddToFields[1]{}
-\providecommand\HyField@AuxAddToCoFields[2]{}
-\providecommand \oddpage@label [2]{}
-\@writefile{toc}{\contentsline {section}{\numberline {1}Likelihood}{1}{section.1}}
-\newlabel{sec:org20faa29}{{1}{1}{Likelihood}{section.1}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {2}Partial derivative for the conditional mean and variance}{2}{section.2}}
-\newlabel{sec:orgef944c3}{{2}{2}{Partial derivative for the conditional mean and variance}{section.2}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {3}First derivative: score}{4}{section.3}}
-\newlabel{sec:orga852a0f}{{3}{4}{First derivative: score}{section.3}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {4}Second derivative: Hessian and expected information}{4}{section.4}}
-\newlabel{SM:Information}{{4}{4}{Second derivative: Hessian and expected information}{section.4}{}}
-\@writefile{toc}{\contentsline {section}{\numberline {5}First derivatives of the information matrix}{6}{section.5}}
-\newlabel{sec:org27a8d69}{{5}{6}{First derivatives of the information matrix}{section.5}{}}
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fdb_latexmk b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fdb_latexmk
deleted file mode 100644
index 752fa87..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fdb_latexmk
+++ /dev/null
@@ -1,177 +0,0 @@
-# Fdb version 3
-["pdflatex"] 1576138825 "c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst/likelihood-derivatives-LVM.tex" "likelihood-derivatives-LVM.pdf" "likelihood-derivatives-LVM" 1576138830
-  "C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-ec.enc" 1254935040 2375 baa924870cfb487815765f9094cf3728 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathex.enc" 1254935040 3486 c7eadf5dcc57b3b2d11736679f6636ba ""
-  "C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathit.enc" 1254935040 2405 5dcf2c1b967ee25cc46c58cd52244aed ""
-  "C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathsy.enc" 1254935040 2840 216e6e45ad352e2456e1149f28885bee ""
-  "C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-rm.enc" 1254935040 2327 9d6df24f9c4f7368395224341a95523a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/jknappen/ec/ecrm1000.tfm" 993058522 3148 7e594c8240c71e432203f65ab5f95a51 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam10.tfm" 1233951854 916 f87d7c45f9c908e672703b83b72241a3 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam5.tfm" 1233951854 924 9904cf1d39e9767e7a3622f2a125a565 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam7.tfm" 1233951854 928 2dc8d444221b7a635bb58038579b861a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm10.tfm" 1233951854 908 2921f8a10601f252058503cc6570e581 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm5.tfm" 1233951854 940 75ac932a52f80982a9f8ea75d03a34cf ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm7.tfm" 1233951854 940 228d6584342e91276bf566bcf9716b83 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/latex-fonts/lasy6.tfm" 1136765053 520 4889cce2180234b97cad636b6039c722 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmbx12.tfm" 1254935040 12088 d750ac78274fa7c9f73ba09914c04f8a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr10.tfm" 1254935040 12056 7e13df7fe4cbce21b072ba7c4f4deb6e ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr12.tfm" 1254935040 12092 7b1546e2d096cfd5dcbd4049b0b1ec2e ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr17.tfm" 1254935040 12156 ca1ae6a3c8564e89597f1f993fba1608 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy10.tfm" 1254935040 1300 2df9da0fc09d4a8c772b3dd386a47c6a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy5.tfm" 1254935040 1304 2ff0a255ae754422adbc0e6519ed2658 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy7.tfm" 1254935040 1304 535e0954c1961c817723e44bc6a9662c ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmex10.tfm" 1254935040 992 ce925c9346c7613270a79afbee98c070 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi10.tfm" 1254935040 1528 6d36b2385e0ca062a654de6ac59cb34f ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi12.tfm" 1254935040 1524 753b192b18f2991794f9d41a8228510b ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi5.tfm" 1254935040 1508 51526923e80e59e37a4e87448b5af6e0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi6.tfm" 1254935040 1512 94a3fd88c6f27dbd9ecb46987e297a4e ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi7.tfm" 1254935040 1528 d5b028dd23da623848ef0645c96a1ed7 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi8.tfm" 1254935040 1520 a3fe5596932db2db2cbda300920dd4e9 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib10.tfm" 1254935040 1524 94d8ba2701edc3d8c3337e16e222f220 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib5.tfm" 1254935040 1496 026e52505574e5c5b11a8037b8db27d0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib7.tfm" 1254935040 1508 e1d41318430466dfe2207ded55ef3af5 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy10.tfm" 1254935040 1308 02cc510f9dd6012e5815d0c0ffbf6869 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy5.tfm" 1254935040 1296 54ed1a711e2303d5282575278e3620b0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy6.tfm" 1254935040 1300 b0605d44c16c22d99dc001808e4f24ea ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy7.tfm" 1254935040 1304 32f22a15acc296b2a4e15698403dcb88 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy8.tfm" 1254935040 1304 cdc9a17df9ef0d2dc320eff37bbab1c4 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx10.tfm" 1254935040 11880 35fcf136a2198418dfc53c83e9e2a07f ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx5.tfm" 1254935040 11828 9b1880528bdbe7e6035fd1b46bff1bbb ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx7.tfm" 1254935040 11864 44cdb751af976143ebc0bed7eb1df9f4 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr10.tfm" 1254935040 11868 4f81e9b6033c032bdaf9884f4d7ef412 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr12.tfm" 1254935040 11888 6841b91e46b65cf41a49b160e6e74130 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr5.tfm" 1254935040 11804 aefb10c002e6492c25236524a447f969 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr6.tfm" 1254935040 11836 e3b6ce3e601aec94f64a536e7f4224d5 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr7.tfm" 1254935040 11852 5a9022f105fd1ee2797df861e79ae9a0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr8.tfm" 1254935040 11864 309fd7f43e4a0ba39f6f7644d76e8edf ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/latxfont/line10.pfb" 1247593053 11493 4f5ed183a47d3197cf8cd322325db6de ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msam7.pfb" 1247593073 33366 60462f3d16c3d0f024f1f856e64b09cd ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msbm10.pfb" 1247593073 34694 870c211f62cb72718a00e353f14f254d ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmbx12.pfb" 1254935040 116908 9a7a12d1e4df280ea0ab4aad83cdef88 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmex10.pfb" 1254935040 23055 2e5b42921de910eaa97b85df04ca4891 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi10.pfb" 1254935040 30388 702fae6a5f0e6e9c48a1d872b442ffcf ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi5.pfb" 1254935040 31443 ba2241b179aa231f73e052863720cb42 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi7.pfb" 1254935040 30789 39c6b966e509dbc60e7a1aaaf85d08a0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmib10.pfb" 1254935040 30962 0fa65166a316c2ff6663754edfd267df ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr10.pfb" 1254935040 119235 05e5c8d6d60439433e50047898c2c0e0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr12.pfb" 1254935040 113634 9384aebdf26f9881e046a4b5574566db ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr17.pfb" 1254935040 119752 1bd8d06e4079df624bf59ce3ad7c9aa6 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr5.pfb" 1254935040 120387 4904c2679a7b90fe9e17171d6c7c9dd0 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr7.pfb" 1254935040 121145 00b210fbe9efd7ce850c04e64670de9a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy10.pfb" 1254935040 27863 09ce3735688ffde955e72da27c95b61a ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy5.pfb" 1254935040 28210 b80965e85ec354118856303654d00ba5 ""
-  "C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy7.pfb" 1254935040 27941 d1f5d03f61a46c3fcc3a2ba904ddda52 ""
-  "C:/Program Files/MiKTeX 2.9/tex/context/base/supp-pdf.mkii" 1306914861 71592 fe353e1fbd982ffc647f3e98ff74da96 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/ifxetex/ifxetex.sty" 1284257189 1458 43ab4710dc82f3edeabecd0d099626b2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/etexcmds.sty" 1489934424 7612 729a8cc22a1ee0029997c7f74717ae05 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/gettitlestring.sty" 1489934424 8237 3b62ef1f7e2c23a328c814b3893bc11f ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-generic.sty" 1489934424 185082 1fb09d7d24834377f95006300bc91fd2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-hyperref.sty" 1489934424 70864 bcd5b216757bd619ae692a151d90085d ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifluatex.sty" 1489934424 7324 2310d1247db0114eb4726807c8837a0e ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifpdf.sty" 1489934424 1251 d170e11a3246c3392bc7f59595af42cb ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifvtex.sty" 1489934424 6797 90b7f83b0ad46826bc16058b1e3d48df ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/infwarerr.sty" 1489934424 8253 473e0e41f9adadb1977e8631b8f72ea6 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/kvsetkeys.sty" 1489934426 14040 ac8866aac45982ac84021584b0abb252 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ltxcmds.sty" 1489934426 18425 5b3c0c59d76fac78978b5558e83c1f36 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/pdftexcmds.sty" 1489934426 20151 72b3c7cacb61f7dd527505c39a23f7c1 ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/xkeyval/xkeyval.tex" 1419270738 19853 26434a5656c684f5ffb1f26f98006baa ""
-  "C:/Program Files/MiKTeX 2.9/tex/generic/xkeyval/xkvutils.tex" 1419270738 7948 6f5ce7c1124cad7ec57d05b2562bd8fe ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/00miktex/hyperref.cfg" 1136242423 235 6031e5765137be07eed51a510b2b8fb7 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/algorithm2e/algorithm2e.sty" 1500388004 167160 d91cee26d3ef5727644d2110445741dd ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amscls/amsthm.sty" 1425932800 12225 3cca0d18522255979a1047206228b9d0 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amsfonts.sty" 1358197772 5949 3f3fd50a8cc94c3d4cbf4fc66cd3df1c ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amssymb.sty" 1358197772 13829 94730e64147574077f8ecfea9bb69af4 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsa.fd" 1358197772 961 6518c6525a34feb5e8250ffa91731cff ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsb.fd" 1358197772 961 d02606146ba5601b5645f987c92e6193 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsbsy.sty" 1492419549 2282 5c54ab129b848a5071554186d0168766 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsgen.sty" 1492419550 4296 c115536cf8d4ff25aa8c1c9bc4ecb79a ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsmath.sty" 1492419550 87150 81aa65c5042562f79cb421feff9b8bdc ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsopn.sty" 1492419550 4232 318a66090112f3aa3f415aeb6fe8540f ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amstext.sty" 1492419550 2507 fe3078ec12fc30287f568596f8e0b948 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/article.cls" 1492211338 19821 310da678527a7dfe2a02c88af38079b7 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/fontenc.sty" 1492211346 4571 13977df0eda144b93597fc709035ad1f ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/ifthen.sty" 1492211346 5159 a08c9bbd48fc492f15b22e458bef961f ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/inputenc.sty" 1492211346 4732 d63eda807ac82cca2ca8488efd31a966 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/omsenc.dfu" 1492211342 2004 ac51aeac484f08c01026120d62677eca ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/ot1enc.dfu" 1492211342 3181 1cb3e9ad01f4a01127b2ffd821bfeec7 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/size10.clo" 1492211338 8292 e897c12e1e886ce77fe26afc5d470886 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.def" 1492211342 10006 a90ba4035cf778f32f424e297d92e235 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.dfu" 1492211342 11255 9d97362866549d3d3c994b5f28d1b9b5 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/textcomp.sty" 1492211348 16154 f2c73e20ca771d534a8516c62c6b0eae ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1cmr.fd" 1492211344 2217 d274654bda1292013bdf48d5f720a495 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.def" 1492211342 7767 aa88823823f5e767d79ea1166ab1ae74 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.dfu" 1492211342 4919 76510afd60e8282294f944c2f9f5103b ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/base/utf8.def" 1492211342 7784 325a2a09984cb5c4ff230f9867145ad3 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/booktabs/booktabs.sty" 1114280098 6557 989ecfb545a13610facc71ae7e40ced1 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/cancel/cancel.sty" 1388370247 7592 dd751af313a16a0308545d5bfd7aaaa2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption.sty" 1463939504 66233 8b81cfab95a1f8fc2f0f6c89415b087a ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption3.sty" 1463939504 64866 1ea74c5f2d1685881497f021b8f186b2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/caption/ltcaption.sty" 1463939504 7257 dd37a106002acb997b6c947cf18f297e ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/caption/subcaption.sty" 1463939504 5038 413302044f9700cf65465d590c2f4ad1 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/changes/changes.sty" 1432572028 22616 c5e991dc58c2315fc7cddd86fba19304 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/colortbl/colortbl.sty" 1353417692 10955 9635c8e2bffccf182429bfdea9b665a5 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/doublestroke/dsfont.sty" 1030466772 230 7bc61880b468bfd38aedc173be7c3486 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.cfg" 1284467836 993 8db2c8abcd1627733e569409f4331e11 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.sty" 1284467836 41632 e003406220954b0716679d7928aedd8a ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/color.cfg" 1465890692 1213 620bba36b25224fa9b7e1ccb4ecb76fd ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/graphics.cfg" 1465890692 1224 978390e9c2234eab29404bc21b268d1e ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics-def/pdftex.def" 1485033486 58250 3792a9d2d1d664ee8c742498e295b051 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/color.sty" 1492419583 7229 4ca2945c73eec4e726c0da188fbbf50f ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphics.sty" 1492419583 15094 b288c52bd5d46d593af31dbc7e548236 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphicx.sty" 1492419583 8369 557ab9f1bfa80d369fb45a914aa8a3b4 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/keyval.sty" 1492419583 2681 d18d5e19aa8239cf867fa670c556d2e9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/lscape.sty" 1492419583 1818 94e21888eaf2391b7bbc441045c70c12 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/rotating.sty" 1492419583 7246 f1843f5f0545182bf419df38f75e3930 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/graphics/trig.sty" 1492419583 4097 0a268fbfda01e381fa95821ab13b6aee ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hpdftex.def" 1489917524 51983 9a4f683a2a7b213874a28bc4008b84d9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hyperref.sty" 1489917524 233808 b63d91422c362e723c6e8b1a2fffcba5 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/nameref.sty" 1489917524 12949 81e4e808884a8f0e276b69410e234656 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/pd1enc.def" 1489917524 14098 7631f11156e5f9cd76010dbd230aa268 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/ifmtarg/ifmtarg.sty" 1278747487 438 4c0f57b7cba6e6ca9226b32da26f82e9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/ifoddpage/ifoddpage.sty" 1461432684 2148 0426cd8bb94163c1e23726d0c15e2c21 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.cfg" 1446815279 1876 d72ad54409ca5c1068a1939c63441bd2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.sty" 1446815279 82579 ff90c926c3d7bfdaa3d80ca57123b0bb ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/listings/lstmisc.sty" 1446815279 79112 c3eb00afb55a32bc13ca8da7f5234377 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/lmodern.sty" 1256929440 1606 c17281c7cff2bbd7ff0173e1433487ec ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/omllmm.fd" 1256929440 888 44447a3a3af84a22454ef89500942d93 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/omslmsy.fd" 1256929440 805 af340a8260c447aa315cfc740ff0152f ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/omxlmex.fd" 1256929440 566 a94661f7b66063f191960bb7935b6ba2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/ot1lmr.fd" 1256929440 1880 bae7b659316f7344a86218ad38b01d91 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/lm/t1lmr.fd" 1256929440 1865 afbfccbe7fda9c2dc5078ad7c486bbed ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/ltxmisc/relsize.sty" 1068046500 16969 b414bd18cf77ed8750f7d9fe3c40418e ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/multirow/multirow.sty" 1480361749 5169 cd8d25106b58dfc16af57528a07d0bc9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/natbib/natbib.sty" 1292249081 46712 1c8843383c0bd05870c45fa0ebea6cc2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/auxhook.sty" 1489934430 3834 4363110eb0ef1eb2b71c8fcbcdb6c357 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf-base.sty" 1489934432 12095 5337833c991d80788a43d3ce26bd1c46 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf.sty" 1489934432 4167 fc50b4e5b185008bb9308b1ea4ce64d4 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grfext.sty" 1489934432 7075 2fe3d848bba95f139de11ded085e74aa ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grffile.sty" 1489934432 13538 be77aa888ffba3c8d06db69357d0b1ed ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/kvoptions.sty" 1489934432 22417 1d9df1eb66848aa31b18a593099cf45c ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdfcolmk.sty" 1489934434 11548 5747071fb49b31177f7d600e84775322 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdflscape.sty" 1489934434 6688 c84de1eae6cda82865a6d3d09e339ec9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/rerunfilecheck.sty" 1489934434 9581 023642318cef9f4677efe364de1e2a27 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/array.sty" 1492419593 12755 d41f82b039f900e95f351e54ae740f31 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/calc.sty" 1492419594 10503 d03d065f799d54f6b7e9b175f8d84279 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/enumerate.sty" 1492419594 3555 d39e40f25d5fd2b3b49a58d155063dd6 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/longtable.sty" 1492419594 12529 80916157594a8e4354985aaefae4f367 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/multicol.sty" 1492419594 31235 4d723b83c66873ba53f72c4a8730d6d3 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/tools/xspace.sty" 1492419594 4696 8b8f59969458e1c25e0559e8e0ced1a4 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/truncate/truncate.sty" 998349558 6657 bba86b8809c129cc7edd6da64c0c1e43 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/ulem/ulem.sty" 1338506272 23756 854c01b779030ff5b2aad88ba7a119f2 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/url/url.sty" 1388490452 12796 8edb7d69a20b857904dd0ea757c14ec9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/wrapfig/wrapfig.sty" 1044403200 26220 3701aebf80ccdef248c0c20dd062fea9 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/xcolor/xcolor.sty" 1463131981 57049 34128738f682d033422ca125f82e5d62 ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/xifthen/xifthen.sty" 1448274436 5689 1bfa68243e89a54479e7d0580773c57e ""
-  "C:/Program Files/MiKTeX 2.9/tex/latex/xkeyval/xkeyval.sty" 1419270738 5114 9c1069474ff71dbc47d5006555e352d3 ""
-  "C:/Users/hpl802/AppData/Local/MiKTeX/2.9/miktex/data/le/pdftex/pdflatex.fmt" 1533727005 4060061 5261e98a887502f86f6ada574bd5f174 ""
-  "C:/Users/hpl802/AppData/Local/MiKTeX/2.9/pdftex/config/pdftex.map" 1567165148 103739 916732b1001256fe005645b1f622599d ""
-  "c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst/likelihood-derivatives-LVM.tex" 1576138788 17975 a17d12d0bf45659692b5f5423cac0ec7 ""
-  "likelihood-derivatives-LVM.aux" 1576138830 1704 ec533e6fcbb4f3a57d5e68243623df49 ""
-  "likelihood-derivatives-LVM.out" 1576138830 363 f983158485294b61c27b6afd955d7f50 ""
-  "likelihood-derivatives-LVM.tex" 1576138788 17975 a17d12d0bf45659692b5f5423cac0ec7 ""
-  (generated)
-  "likelihood-derivatives-LVM.pdf"
-  "likelihood-derivatives-LVM.out"
-  "likelihood-derivatives-LVM.aux"
-  "likelihood-derivatives-LVM.soc"
-  "likelihood-derivatives-LVM.log"
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fls b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fls
deleted file mode 100644
index b52adf2..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.fls
+++ /dev/null
@@ -1,311 +0,0 @@
-PWD c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst
-INPUT C:/Users/hpl802/AppData/Local/MiKTeX/2.9/miktex/data/le/pdftex/pdflatex.fmt
-INPUT c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst/likelihood-derivatives-LVM.tex
-OUTPUT likelihood-derivatives-LVM.log
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/article.cls
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/article.cls
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/size10.clo
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/size10.clo
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/keyval.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/keyval.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/lstmisc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/lstmisc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/listings/listings.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/inputenc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/inputenc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/utf8.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/utf8.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ot1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ot1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/omsenc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/omsenc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/fontenc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/fontenc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/t1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/jknappen/ec/ecrm1000.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/lmodern.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/lmodern.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/textcomp.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/textcomp.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1enc.dfu
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/color.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/color.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/color.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/color.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-def/pdftex.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-def/pdftex.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/infwarerr.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/infwarerr.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ltxcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ltxcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/enumerate.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/enumerate.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphicx.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphicx.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphics.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/graphics.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/trig.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/trig.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/graphics.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/graphics.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grffile.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grffile.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifpdf.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifpdf.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/ifxetex/ifxetex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/ifxetex/ifxetex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/kvoptions.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/kvoptions.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/kvsetkeys.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/kvsetkeys.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/etexcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/etexcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifluatex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifluatex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/pdftexcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/pdftexcmds.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/wrapfig/wrapfig.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/wrapfig/wrapfig.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/rotating.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/rotating.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ifthen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ifthen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/longtable.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/longtable.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/multirow/multirow.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/multirow/multirow.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/multicol.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/multicol.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/changes/changes.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/changes/changes.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xkeyval/xkeyval.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xkeyval/xkeyval.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/xkeyval/xkeyval.tex
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/xkeyval/xkvutils.tex
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xifthen/xifthen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xifthen/xifthen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/calc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/calc.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ifmtarg/ifmtarg.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ifmtarg/ifmtarg.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xcolor/xcolor.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/xcolor/xcolor.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/color.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics-cfg/color.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/colortbl/colortbl.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/colortbl/colortbl.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/array.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/array.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdfcolmk.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdfcolmk.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ulem/ulem.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ulem/ulem.sty
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/latex-fonts/lasy6.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/truncate/truncate.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/truncate/truncate.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdflscape.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/pdflscape.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/lscape.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/graphics/lscape.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifvtex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/ifvtex.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/geometry/geometry.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amssymb.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amssymb.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amsfonts.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/amsfonts.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsmath.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsmath.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amstext.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amstext.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsgen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsgen.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsbsy.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsbsy.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsopn.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsmath/amsopn.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/doublestroke/dsfont.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/doublestroke/dsfont.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hyperref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hyperref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-hyperref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-hyperref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-hyperref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-generic.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/hobsub-generic.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/auxhook.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/auxhook.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/pd1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/pd1enc.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/00miktex/hyperref.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/00miktex/hyperref.cfg
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/url/url.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/url/url.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hpdftex.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/hpdftex.def
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/rerunfilecheck.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/rerunfilecheck.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/natbib/natbib.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/natbib/natbib.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf-base.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/epstopdf-base.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grfext.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/oberdiek/grfext.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption3.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/caption3.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/ltcaption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/ltcaption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/subcaption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/caption/subcaption.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/booktabs/booktabs.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/booktabs/booktabs.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/algorithm2e/algorithm2e.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/algorithm2e/algorithm2e.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ifoddpage/ifoddpage.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ifoddpage/ifoddpage.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/xspace.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/tools/xspace.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ltxmisc/relsize.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/ltxmisc/relsize.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amscls/amsthm.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amscls/amsthm.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/cancel/cancel.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/cancel/cancel.sty
-INPUT likelihood-derivatives-LVM.aux
-INPUT likelihood-derivatives-LVM.aux
-OUTPUT likelihood-derivatives-LVM.aux
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1cmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/base/ts1cmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/t1lmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/t1lmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/context/base/supp-pdf.mkii
-INPUT C:/Program Files/MiKTeX 2.9/tex/context/base/supp-pdf.mkii
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/nameref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/hyperref/nameref.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/gettitlestring.sty
-INPUT C:/Program Files/MiKTeX 2.9/tex/generic/oberdiek/gettitlestring.sty
-INPUT likelihood-derivatives-LVM.out
-INPUT likelihood-derivatives-LVM.out
-INPUT likelihood-derivatives-LVM.out
-INPUT likelihood-derivatives-LVM.out
-OUTPUT likelihood-derivatives-LVM.pdf
-INPUT ./likelihood-derivatives-LVM.out
-INPUT ./likelihood-derivatives-LVM.out
-OUTPUT likelihood-derivatives-LVM.out
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr17.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr12.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/ot1lmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/ot1lmr.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr12.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr8.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr6.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omllmm.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omllmm.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi12.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi8.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi6.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omslmsy.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omslmsy.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy8.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy6.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omxlmex.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/lm/omxlmex.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmex10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsa.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsa.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsb.fd
-INPUT C:/Program Files/MiKTeX 2.9/tex/latex/amsfonts/umsb.fd
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmr12.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/ec-lmbx12.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmr5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmi5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmsy5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msam5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/amsfonts/symbols/msbm5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/rm-lmbx5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmmib5.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy10.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy7.tfm
-INPUT C:/Program Files/MiKTeX 2.9/fonts/tfm/public/lm/lmbsy5.tfm
-INPUT C:/Users/hpl802/AppData/Local/MiKTeX/2.9/pdftex/config/pdftex.map
-OUTPUT likelihood-derivatives-LVM.soc
-INPUT likelihood-derivatives-LVM.aux
-INPUT ./likelihood-derivatives-LVM.out
-INPUT ./likelihood-derivatives-LVM.out
-INPUT C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathsy.enc
-INPUT C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathit.enc
-INPUT C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-rm.enc
-INPUT C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathex.enc
-INPUT C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-ec.enc
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/latxfont/line10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/latxfont/line10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmbx12.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmbx12.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmex10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmex10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmib10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmib10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr12.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr12.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr17.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr17.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy5.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msam7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msam7.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msbm10.pfb
-INPUT C:/Program Files/MiKTeX 2.9/fonts/type1/public/amsfonts/symbols/msbm10.pfb
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.log b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.log
deleted file mode 100644
index f03ac89..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.log
+++ /dev/null
@@ -1,1387 +0,0 @@
-This is pdfTeX, Version 3.14159265-2.6-1.40.18 (MiKTeX 2.9.6350 64-bit) (preloaded format=pdflatex 2018.8.8)  12 DEC 2019 09:20
-entering extended mode
-**c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst/likelihood-derivatives-LVM.
-tex
-
-(c:/Users/hpl802/Documents/GitHub/lavaSearch2/inst/likelihood-derivatives-LVM.t
-ex
-LaTeX2e <2017-04-15>
-Babel <3.10> and hyphenation patterns for 75 language(s) loaded.
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\article.cls"
-Document Class: article 2014/09/29 v1.4h Standard LaTeX document class
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\size10.clo"
-File: size10.clo 2014/09/29 v1.4h Standard LaTeX file (size option)
-)
-\c@part=\count79
-\c@section=\count80
-\c@subsection=\count81
-\c@subsubsection=\count82
-\c@paragraph=\count83
-\c@subparagraph=\count84
-\c@figure=\count85
-\c@table=\count86
-\abovecaptionskip=\skip41
-\belowcaptionskip=\skip42
-\bibindent=\dimen102
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\listings\listings.sty"
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\keyval.sty"
-Package: keyval 2014/10/28 v1.15 key=value parser (DPC)
-\KV@toks@=\toks14
-)
-\lst@mode=\count87
-\lst@gtempboxa=\box26
-\lst@token=\toks15
-\lst@length=\count88
-\lst@currlwidth=\dimen103
-\lst@column=\count89
-\lst@pos=\count90
-\lst@lostspace=\dimen104
-\lst@width=\dimen105
-\lst@newlines=\count91
-\lst@lineno=\count92
-\lst@maxwidth=\dimen106
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\listings\lstmisc.sty"
-File: lstmisc.sty 2015/06/04 1.6 (Carsten Heinz)
-\c@lstnumber=\count93
-\lst@skipnumbers=\count94
-\lst@framebox=\box27
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\listings\listings.cfg"
-File: listings.cfg 2015/06/04 1.6 listings configuration
-))
-Package: listings 2015/06/04 1.6 (Carsten Heinz)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\inputenc.sty"
-Package: inputenc 2015/03/17 v1.2c Input encoding file
-\inpenc@prehook=\toks16
-\inpenc@posthook=\toks17
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\utf8.def"
-File: utf8.def 2017/01/28 v1.1t UTF-8 support for inputenc
-Now handling font encoding OML ...
-... no UTF-8 mapping file for font encoding OML
-Now handling font encoding T1 ...
-... processing UTF-8 mapping file for font encoding T1
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\t1enc.dfu"
-File: t1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc
-   defining Unicode char U+00A0 (decimal 160)
-   defining Unicode char U+00A1 (decimal 161)
-   defining Unicode char U+00A3 (decimal 163)
-   defining Unicode char U+00AB (decimal 171)
-   defining Unicode char U+00AD (decimal 173)
-   defining Unicode char U+00BB (decimal 187)
-   defining Unicode char U+00BF (decimal 191)
-   defining Unicode char U+00C0 (decimal 192)
-   defining Unicode char U+00C1 (decimal 193)
-   defining Unicode char U+00C2 (decimal 194)
-   defining Unicode char U+00C3 (decimal 195)
-   defining Unicode char U+00C4 (decimal 196)
-   defining Unicode char U+00C5 (decimal 197)
-   defining Unicode char U+00C6 (decimal 198)
-   defining Unicode char U+00C7 (decimal 199)
-   defining Unicode char U+00C8 (decimal 200)
-   defining Unicode char U+00C9 (decimal 201)
-   defining Unicode char U+00CA (decimal 202)
-   defining Unicode char U+00CB (decimal 203)
-   defining Unicode char U+00CC (decimal 204)
-   defining Unicode char U+00CD (decimal 205)
-   defining Unicode char U+00CE (decimal 206)
-   defining Unicode char U+00CF (decimal 207)
-   defining Unicode char U+00D0 (decimal 208)
-   defining Unicode char U+00D1 (decimal 209)
-   defining Unicode char U+00D2 (decimal 210)
-   defining Unicode char U+00D3 (decimal 211)
-   defining Unicode char U+00D4 (decimal 212)
-   defining Unicode char U+00D5 (decimal 213)
-   defining Unicode char U+00D6 (decimal 214)
-   defining Unicode char U+00D8 (decimal 216)
-   defining Unicode char U+00D9 (decimal 217)
-   defining Unicode char U+00DA (decimal 218)
-   defining Unicode char U+00DB (decimal 219)
-   defining Unicode char U+00DC (decimal 220)
-   defining Unicode char U+00DD (decimal 221)
-   defining Unicode char U+00DE (decimal 222)
-   defining Unicode char U+00DF (decimal 223)
-   defining Unicode char U+00E0 (decimal 224)
-   defining Unicode char U+00E1 (decimal 225)
-   defining Unicode char U+00E2 (decimal 226)
-   defining Unicode char U+00E3 (decimal 227)
-   defining Unicode char U+00E4 (decimal 228)
-   defining Unicode char U+00E5 (decimal 229)
-   defining Unicode char U+00E6 (decimal 230)
-   defining Unicode char U+00E7 (decimal 231)
-   defining Unicode char U+00E8 (decimal 232)
-   defining Unicode char U+00E9 (decimal 233)
-   defining Unicode char U+00EA (decimal 234)
-   defining Unicode char U+00EB (decimal 235)
-   defining Unicode char U+00EC (decimal 236)
-   defining Unicode char U+00ED (decimal 237)
-   defining Unicode char U+00EE (decimal 238)
-   defining Unicode char U+00EF (decimal 239)
-   defining Unicode char U+00F0 (decimal 240)
-   defining Unicode char U+00F1 (decimal 241)
-   defining Unicode char U+00F2 (decimal 242)
-   defining Unicode char U+00F3 (decimal 243)
-   defining Unicode char U+00F4 (decimal 244)
-   defining Unicode char U+00F5 (decimal 245)
-   defining Unicode char U+00F6 (decimal 246)
-   defining Unicode char U+00F8 (decimal 248)
-   defining Unicode char U+00F9 (decimal 249)
-   defining Unicode char U+00FA (decimal 250)
-   defining Unicode char U+00FB (decimal 251)
-   defining Unicode char U+00FC (decimal 252)
-   defining Unicode char U+00FD (decimal 253)
-   defining Unicode char U+00FE (decimal 254)
-   defining Unicode char U+00FF (decimal 255)
-   defining Unicode char U+0100 (decimal 256)
-   defining Unicode char U+0101 (decimal 257)
-   defining Unicode char U+0102 (decimal 258)
-   defining Unicode char U+0103 (decimal 259)
-   defining Unicode char U+0104 (decimal 260)
-   defining Unicode char U+0105 (decimal 261)
-   defining Unicode char U+0106 (decimal 262)
-   defining Unicode char U+0107 (decimal 263)
-   defining Unicode char U+0108 (decimal 264)
-   defining Unicode char U+0109 (decimal 265)
-   defining Unicode char U+010A (decimal 266)
-   defining Unicode char U+010B (decimal 267)
-   defining Unicode char U+010C (decimal 268)
-   defining Unicode char U+010D (decimal 269)
-   defining Unicode char U+010E (decimal 270)
-   defining Unicode char U+010F (decimal 271)
-   defining Unicode char U+0110 (decimal 272)
-   defining Unicode char U+0111 (decimal 273)
-   defining Unicode char U+0112 (decimal 274)
-   defining Unicode char U+0113 (decimal 275)
-   defining Unicode char U+0114 (decimal 276)
-   defining Unicode char U+0115 (decimal 277)
-   defining Unicode char U+0116 (decimal 278)
-   defining Unicode char U+0117 (decimal 279)
-   defining Unicode char U+0118 (decimal 280)
-   defining Unicode char U+0119 (decimal 281)
-   defining Unicode char U+011A (decimal 282)
-   defining Unicode char U+011B (decimal 283)
-   defining Unicode char U+011C (decimal 284)
-   defining Unicode char U+011D (decimal 285)
-   defining Unicode char U+011E (decimal 286)
-   defining Unicode char U+011F (decimal 287)
-   defining Unicode char U+0120 (decimal 288)
-   defining Unicode char U+0121 (decimal 289)
-   defining Unicode char U+0122 (decimal 290)
-   defining Unicode char U+0123 (decimal 291)
-   defining Unicode char U+0124 (decimal 292)
-   defining Unicode char U+0125 (decimal 293)
-   defining Unicode char U+0128 (decimal 296)
-   defining Unicode char U+0129 (decimal 297)
-   defining Unicode char U+012A (decimal 298)
-   defining Unicode char U+012B (decimal 299)
-   defining Unicode char U+012C (decimal 300)
-   defining Unicode char U+012D (decimal 301)
-   defining Unicode char U+012E (decimal 302)
-   defining Unicode char U+012F (decimal 303)
-   defining Unicode char U+0130 (decimal 304)
-   defining Unicode char U+0131 (decimal 305)
-   defining Unicode char U+0132 (decimal 306)
-   defining Unicode char U+0133 (decimal 307)
-   defining Unicode char U+0134 (decimal 308)
-   defining Unicode char U+0135 (decimal 309)
-   defining Unicode char U+0136 (decimal 310)
-   defining Unicode char U+0137 (decimal 311)
-   defining Unicode char U+0139 (decimal 313)
-   defining Unicode char U+013A (decimal 314)
-   defining Unicode char U+013B (decimal 315)
-   defining Unicode char U+013C (decimal 316)
-   defining Unicode char U+013D (decimal 317)
-   defining Unicode char U+013E (decimal 318)
-   defining Unicode char U+0141 (decimal 321)
-   defining Unicode char U+0142 (decimal 322)
-   defining Unicode char U+0143 (decimal 323)
-   defining Unicode char U+0144 (decimal 324)
-   defining Unicode char U+0145 (decimal 325)
-   defining Unicode char U+0146 (decimal 326)
-   defining Unicode char U+0147 (decimal 327)
-   defining Unicode char U+0148 (decimal 328)
-   defining Unicode char U+014A (decimal 330)
-   defining Unicode char U+014B (decimal 331)
-   defining Unicode char U+014C (decimal 332)
-   defining Unicode char U+014D (decimal 333)
-   defining Unicode char U+014E (decimal 334)
-   defining Unicode char U+014F (decimal 335)
-   defining Unicode char U+0150 (decimal 336)
-   defining Unicode char U+0151 (decimal 337)
-   defining Unicode char U+0152 (decimal 338)
-   defining Unicode char U+0153 (decimal 339)
-   defining Unicode char U+0154 (decimal 340)
-   defining Unicode char U+0155 (decimal 341)
-   defining Unicode char U+0156 (decimal 342)
-   defining Unicode char U+0157 (decimal 343)
-   defining Unicode char U+0158 (decimal 344)
-   defining Unicode char U+0159 (decimal 345)
-   defining Unicode char U+015A (decimal 346)
-   defining Unicode char U+015B (decimal 347)
-   defining Unicode char U+015C (decimal 348)
-   defining Unicode char U+015D (decimal 349)
-   defining Unicode char U+015E (decimal 350)
-   defining Unicode char U+015F (decimal 351)
-   defining Unicode char U+0160 (decimal 352)
-   defining Unicode char U+0161 (decimal 353)
-   defining Unicode char U+0162 (decimal 354)
-   defining Unicode char U+0163 (decimal 355)
-   defining Unicode char U+0164 (decimal 356)
-   defining Unicode char U+0165 (decimal 357)
-   defining Unicode char U+0168 (decimal 360)
-   defining Unicode char U+0169 (decimal 361)
-   defining Unicode char U+016A (decimal 362)
-   defining Unicode char U+016B (decimal 363)
-   defining Unicode char U+016C (decimal 364)
-   defining Unicode char U+016D (decimal 365)
-   defining Unicode char U+016E (decimal 366)
-   defining Unicode char U+016F (decimal 367)
-   defining Unicode char U+0170 (decimal 368)
-   defining Unicode char U+0171 (decimal 369)
-   defining Unicode char U+0172 (decimal 370)
-   defining Unicode char U+0173 (decimal 371)
-   defining Unicode char U+0174 (decimal 372)
-   defining Unicode char U+0175 (decimal 373)
-   defining Unicode char U+0176 (decimal 374)
-   defining Unicode char U+0177 (decimal 375)
-   defining Unicode char U+0178 (decimal 376)
-   defining Unicode char U+0179 (decimal 377)
-   defining Unicode char U+017A (decimal 378)
-   defining Unicode char U+017B (decimal 379)
-   defining Unicode char U+017C (decimal 380)
-   defining Unicode char U+017D (decimal 381)
-   defining Unicode char U+017E (decimal 382)
-   defining Unicode char U+01CD (decimal 461)
-   defining Unicode char U+01CE (decimal 462)
-   defining Unicode char U+01CF (decimal 463)
-   defining Unicode char U+01D0 (decimal 464)
-   defining Unicode char U+01D1 (decimal 465)
-   defining Unicode char U+01D2 (decimal 466)
-   defining Unicode char U+01D3 (decimal 467)
-   defining Unicode char U+01D4 (decimal 468)
-   defining Unicode char U+01E2 (decimal 482)
-   defining Unicode char U+01E3 (decimal 483)
-   defining Unicode char U+01E6 (decimal 486)
-   defining Unicode char U+01E7 (decimal 487)
-   defining Unicode char U+01E8 (decimal 488)
-   defining Unicode char U+01E9 (decimal 489)
-   defining Unicode char U+01EA (decimal 490)
-   defining Unicode char U+01EB (decimal 491)
-   defining Unicode char U+01F0 (decimal 496)
-   defining Unicode char U+01F4 (decimal 500)
-   defining Unicode char U+01F5 (decimal 501)
-   defining Unicode char U+0218 (decimal 536)
-   defining Unicode char U+0219 (decimal 537)
-   defining Unicode char U+021A (decimal 538)
-   defining Unicode char U+021B (decimal 539)
-   defining Unicode char U+0232 (decimal 562)
-   defining Unicode char U+0233 (decimal 563)
-   defining Unicode char U+1E02 (decimal 7682)
-   defining Unicode char U+1E03 (decimal 7683)
-   defining Unicode char U+200C (decimal 8204)
-   defining Unicode char U+2010 (decimal 8208)
-   defining Unicode char U+2011 (decimal 8209)
-   defining Unicode char U+2012 (decimal 8210)
-   defining Unicode char U+2013 (decimal 8211)
-   defining Unicode char U+2014 (decimal 8212)
-   defining Unicode char U+2015 (decimal 8213)
-   defining Unicode char U+2018 (decimal 8216)
-   defining Unicode char U+2019 (decimal 8217)
-   defining Unicode char U+201A (decimal 8218)
-   defining Unicode char U+201C (decimal 8220)
-   defining Unicode char U+201D (decimal 8221)
-   defining Unicode char U+201E (decimal 8222)
-   defining Unicode char U+2030 (decimal 8240)
-   defining Unicode char U+2031 (decimal 8241)
-   defining Unicode char U+2039 (decimal 8249)
-   defining Unicode char U+203A (decimal 8250)
-   defining Unicode char U+2423 (decimal 9251)
-   defining Unicode char U+1E20 (decimal 7712)
-   defining Unicode char U+1E21 (decimal 7713)
-)
-Now handling font encoding OT1 ...
-... processing UTF-8 mapping file for font encoding OT1
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\ot1enc.dfu"
-File: ot1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc
-   defining Unicode char U+00A0 (decimal 160)
-   defining Unicode char U+00A1 (decimal 161)
-   defining Unicode char U+00A3 (decimal 163)
-   defining Unicode char U+00AD (decimal 173)
-   defining Unicode char U+00B8 (decimal 184)
-   defining Unicode char U+00BF (decimal 191)
-   defining Unicode char U+00C5 (decimal 197)
-   defining Unicode char U+00C6 (decimal 198)
-   defining Unicode char U+00D8 (decimal 216)
-   defining Unicode char U+00DF (decimal 223)
-   defining Unicode char U+00E6 (decimal 230)
-   defining Unicode char U+00EC (decimal 236)
-   defining Unicode char U+00ED (decimal 237)
-   defining Unicode char U+00EE (decimal 238)
-   defining Unicode char U+00EF (decimal 239)
-   defining Unicode char U+00F8 (decimal 248)
-   defining Unicode char U+0131 (decimal 305)
-   defining Unicode char U+0141 (decimal 321)
-   defining Unicode char U+0142 (decimal 322)
-   defining Unicode char U+0152 (decimal 338)
-   defining Unicode char U+0153 (decimal 339)
-   defining Unicode char U+0174 (decimal 372)
-   defining Unicode char U+0175 (decimal 373)
-   defining Unicode char U+0176 (decimal 374)
-   defining Unicode char U+0177 (decimal 375)
-   defining Unicode char U+0218 (decimal 536)
-   defining Unicode char U+0219 (decimal 537)
-   defining Unicode char U+021A (decimal 538)
-   defining Unicode char U+021B (decimal 539)
-   defining Unicode char U+2013 (decimal 8211)
-   defining Unicode char U+2014 (decimal 8212)
-   defining Unicode char U+2018 (decimal 8216)
-   defining Unicode char U+2019 (decimal 8217)
-   defining Unicode char U+201C (decimal 8220)
-   defining Unicode char U+201D (decimal 8221)
-)
-Now handling font encoding OMS ...
-... processing UTF-8 mapping file for font encoding OMS
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\omsenc.dfu"
-File: omsenc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc
-   defining Unicode char U+00A7 (decimal 167)
-   defining Unicode char U+00B6 (decimal 182)
-   defining Unicode char U+00B7 (decimal 183)
-   defining Unicode char U+2020 (decimal 8224)
-   defining Unicode char U+2021 (decimal 8225)
-   defining Unicode char U+2022 (decimal 8226)
-)
-Now handling font encoding OMX ...
-... no UTF-8 mapping file for font encoding OMX
-Now handling font encoding U ...
-... no UTF-8 mapping file for font encoding U
-   defining Unicode char U+00A9 (decimal 169)
-   defining Unicode char U+00AA (decimal 170)
-   defining Unicode char U+00AE (decimal 174)
-   defining Unicode char U+00BA (decimal 186)
-   defining Unicode char U+02C6 (decimal 710)
-   defining Unicode char U+02DC (decimal 732)
-   defining Unicode char U+200C (decimal 8204)
-   defining Unicode char U+2026 (decimal 8230)
-   defining Unicode char U+2122 (decimal 8482)
-   defining Unicode char U+2423 (decimal 9251)
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\fontenc.sty"
-Package: fontenc 2017/04/05 v2.0i Standard LaTeX package
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\t1enc.def"
-File: t1enc.def 2017/04/05 v2.0i Standard LaTeX file
-LaTeX Font Info:    Redeclaring font encoding T1 on input line 48.
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\lmodern.sty"
-Package: lmodern 2009/10/30 v1.6 Latin Modern Fonts
-LaTeX Font Info:    Overwriting symbol font `operators' in version `normal'
-(Font)                  OT1/cmr/m/n --> OT1/lmr/m/n on input line 22.
-LaTeX Font Info:    Overwriting symbol font `letters' in version `normal'
-(Font)                  OML/cmm/m/it --> OML/lmm/m/it on input line 23.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `normal'
-(Font)                  OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24.
-LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `normal'
-(Font)                  OMX/cmex/m/n --> OMX/lmex/m/n on input line 25.
-LaTeX Font Info:    Overwriting symbol font `operators' in version `bold'
-(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26.
-LaTeX Font Info:    Overwriting symbol font `letters' in version `bold'
-(Font)                  OML/cmm/b/it --> OML/lmm/b/it on input line 27.
-LaTeX Font Info:    Overwriting symbol font `symbols' in version `bold'
-(Font)                  OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28.
-LaTeX Font Info:    Overwriting symbol font `largesymbols' in version `bold'
-(Font)                  OMX/cmex/m/n --> OMX/lmex/m/n on input line 29.
-LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `normal'
-(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31.
-LaTeX Font Info:    Overwriting math alphabet `\mathsf' in version `normal'
-(Font)                  OT1/cmss/m/n --> OT1/lmss/m/n on input line 32.
-LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `normal'
-(Font)                  OT1/cmr/m/it --> OT1/lmr/m/it on input line 33.
-LaTeX Font Info:    Overwriting math alphabet `\mathtt' in version `normal'
-(Font)                  OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34.
-LaTeX Font Info:    Overwriting math alphabet `\mathbf' in version `bold'
-(Font)                  OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35.
-LaTeX Font Info:    Overwriting math alphabet `\mathsf' in version `bold'
-(Font)                  OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36.
-LaTeX Font Info:    Overwriting math alphabet `\mathit' in version `bold'
-(Font)                  OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37.
-LaTeX Font Info:    Overwriting math alphabet `\mathtt' in version `bold'
-(Font)                  OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\textcomp.sty"
-Package: textcomp 2017/04/05 v2.0i Standard LaTeX package
-Package textcomp Info: Sub-encoding information:
-(textcomp)               5 = only ISO-Adobe without \textcurrency
-(textcomp)               4 = 5 + \texteuro
-(textcomp)               3 = 4 + \textohm
-(textcomp)               2 = 3 + \textestimated + \textcurrency
-(textcomp)               1 = TS1 - \textcircled - \t
-(textcomp)               0 = TS1 (full)
-(textcomp)             Font families with sub-encoding setting implement
-(textcomp)             only a restricted character set as indicated.
-(textcomp)             Family '?' is the default used for unknown fonts.
-(textcomp)             See the documentation for details.
-Package textcomp Info: Setting ? sub-encoding to TS1/1 on input line 79.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\ts1enc.def"
-File: ts1enc.def 2001/06/05 v3.0e (jk/car/fm) Standard LaTeX file
-Now handling font encoding TS1 ...
-... processing UTF-8 mapping file for font encoding TS1
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\ts1enc.dfu"
-File: ts1enc.dfu 2017/01/28 v1.1t UTF-8 support for inputenc
-   defining Unicode char U+00A2 (decimal 162)
-   defining Unicode char U+00A3 (decimal 163)
-   defining Unicode char U+00A4 (decimal 164)
-   defining Unicode char U+00A5 (decimal 165)
-   defining Unicode char U+00A6 (decimal 166)
-   defining Unicode char U+00A7 (decimal 167)
-   defining Unicode char U+00A8 (decimal 168)
-   defining Unicode char U+00A9 (decimal 169)
-   defining Unicode char U+00AA (decimal 170)
-   defining Unicode char U+00AC (decimal 172)
-   defining Unicode char U+00AE (decimal 174)
-   defining Unicode char U+00AF (decimal 175)
-   defining Unicode char U+00B0 (decimal 176)
-   defining Unicode char U+00B1 (decimal 177)
-   defining Unicode char U+00B2 (decimal 178)
-   defining Unicode char U+00B3 (decimal 179)
-   defining Unicode char U+00B4 (decimal 180)
-   defining Unicode char U+00B5 (decimal 181)
-   defining Unicode char U+00B6 (decimal 182)
-   defining Unicode char U+00B7 (decimal 183)
-   defining Unicode char U+00B9 (decimal 185)
-   defining Unicode char U+00BA (decimal 186)
-   defining Unicode char U+00BC (decimal 188)
-   defining Unicode char U+00BD (decimal 189)
-   defining Unicode char U+00BE (decimal 190)
-   defining Unicode char U+00D7 (decimal 215)
-   defining Unicode char U+00F7 (decimal 247)
-   defining Unicode char U+0192 (decimal 402)
-   defining Unicode char U+02C7 (decimal 711)
-   defining Unicode char U+02D8 (decimal 728)
-   defining Unicode char U+02DD (decimal 733)
-   defining Unicode char U+0E3F (decimal 3647)
-   defining Unicode char U+2016 (decimal 8214)
-   defining Unicode char U+2020 (decimal 8224)
-   defining Unicode char U+2021 (decimal 8225)
-   defining Unicode char U+2022 (decimal 8226)
-   defining Unicode char U+2030 (decimal 8240)
-   defining Unicode char U+2031 (decimal 8241)
-   defining Unicode char U+203B (decimal 8251)
-   defining Unicode char U+203D (decimal 8253)
-   defining Unicode char U+2044 (decimal 8260)
-   defining Unicode char U+204E (decimal 8270)
-   defining Unicode char U+2052 (decimal 8274)
-   defining Unicode char U+20A1 (decimal 8353)
-   defining Unicode char U+20A4 (decimal 8356)
-   defining Unicode char U+20A6 (decimal 8358)
-   defining Unicode char U+20A9 (decimal 8361)
-   defining Unicode char U+20AB (decimal 8363)
-   defining Unicode char U+20AC (decimal 8364)
-   defining Unicode char U+20B1 (decimal 8369)
-   defining Unicode char U+2103 (decimal 8451)
-   defining Unicode char U+2116 (decimal 8470)
-   defining Unicode char U+2117 (decimal 8471)
-   defining Unicode char U+211E (decimal 8478)
-   defining Unicode char U+2120 (decimal 8480)
-   defining Unicode char U+2122 (decimal 8482)
-   defining Unicode char U+2126 (decimal 8486)
-   defining Unicode char U+2127 (decimal 8487)
-   defining Unicode char U+212E (decimal 8494)
-   defining Unicode char U+2190 (decimal 8592)
-   defining Unicode char U+2191 (decimal 8593)
-   defining Unicode char U+2192 (decimal 8594)
-   defining Unicode char U+2193 (decimal 8595)
-   defining Unicode char U+2329 (decimal 9001)
-   defining Unicode char U+232A (decimal 9002)
-   defining Unicode char U+2422 (decimal 9250)
-   defining Unicode char U+25E6 (decimal 9702)
-   defining Unicode char U+25EF (decimal 9711)
-   defining Unicode char U+266A (decimal 9834)
-))
-LaTeX Info: Redefining \oldstylenums on input line 334.
-Package textcomp Info: Setting cmr sub-encoding to TS1/0 on input line 349.
-Package textcomp Info: Setting cmss sub-encoding to TS1/0 on input line 350.
-Package textcomp Info: Setting cmtt sub-encoding to TS1/0 on input line 351.
-Package textcomp Info: Setting cmvtt sub-encoding to TS1/0 on input line 352.
-Package textcomp Info: Setting cmbr sub-encoding to TS1/0 on input line 353.
-Package textcomp Info: Setting cmtl sub-encoding to TS1/0 on input line 354.
-Package textcomp Info: Setting ccr sub-encoding to TS1/0 on input line 355.
-Package textcomp Info: Setting ptm sub-encoding to TS1/4 on input line 356.
-Package textcomp Info: Setting pcr sub-encoding to TS1/4 on input line 357.
-Package textcomp Info: Setting phv sub-encoding to TS1/4 on input line 358.
-Package textcomp Info: Setting ppl sub-encoding to TS1/3 on input line 359.
-Package textcomp Info: Setting pag sub-encoding to TS1/4 on input line 360.
-Package textcomp Info: Setting pbk sub-encoding to TS1/4 on input line 361.
-Package textcomp Info: Setting pnc sub-encoding to TS1/4 on input line 362.
-Package textcomp Info: Setting pzc sub-encoding to TS1/4 on input line 363.
-Package textcomp Info: Setting bch sub-encoding to TS1/4 on input line 364.
-Package textcomp Info: Setting put sub-encoding to TS1/5 on input line 365.
-Package textcomp Info: Setting uag sub-encoding to TS1/5 on input line 366.
-Package textcomp Info: Setting ugq sub-encoding to TS1/5 on input line 367.
-Package textcomp Info: Setting ul8 sub-encoding to TS1/4 on input line 368.
-Package textcomp Info: Setting ul9 sub-encoding to TS1/4 on input line 369.
-Package textcomp Info: Setting augie sub-encoding to TS1/5 on input line 370.
-Package textcomp Info: Setting dayrom sub-encoding to TS1/3 on input line 371.
-Package textcomp Info: Setting dayroms sub-encoding to TS1/3 on input line 372.
-
-Package textcomp Info: Setting pxr sub-encoding to TS1/0 on input line 373.
-Package textcomp Info: Setting pxss sub-encoding to TS1/0 on input line 374.
-Package textcomp Info: Setting pxtt sub-encoding to TS1/0 on input line 375.
-Package textcomp Info: Setting txr sub-encoding to TS1/0 on input line 376.
-Package textcomp Info: Setting txss sub-encoding to TS1/0 on input line 377.
-Package textcomp Info: Setting txtt sub-encoding to TS1/0 on input line 378.
-Package textcomp Info: Setting lmr sub-encoding to TS1/0 on input line 379.
-Package textcomp Info: Setting lmdh sub-encoding to TS1/0 on input line 380.
-Package textcomp Info: Setting lmss sub-encoding to TS1/0 on input line 381.
-Package textcomp Info: Setting lmssq sub-encoding to TS1/0 on input line 382.
-Package textcomp Info: Setting lmvtt sub-encoding to TS1/0 on input line 383.
-Package textcomp Info: Setting lmtt sub-encoding to TS1/0 on input line 384.
-Package textcomp Info: Setting qhv sub-encoding to TS1/0 on input line 385.
-Package textcomp Info: Setting qag sub-encoding to TS1/0 on input line 386.
-Package textcomp Info: Setting qbk sub-encoding to TS1/0 on input line 387.
-Package textcomp Info: Setting qcr sub-encoding to TS1/0 on input line 388.
-Package textcomp Info: Setting qcs sub-encoding to TS1/0 on input line 389.
-Package textcomp Info: Setting qpl sub-encoding to TS1/0 on input line 390.
-Package textcomp Info: Setting qtm sub-encoding to TS1/0 on input line 391.
-Package textcomp Info: Setting qzc sub-encoding to TS1/0 on input line 392.
-Package textcomp Info: Setting qhvc sub-encoding to TS1/0 on input line 393.
-Package textcomp Info: Setting futs sub-encoding to TS1/4 on input line 394.
-Package textcomp Info: Setting futx sub-encoding to TS1/4 on input line 395.
-Package textcomp Info: Setting futj sub-encoding to TS1/4 on input line 396.
-Package textcomp Info: Setting hlh sub-encoding to TS1/3 on input line 397.
-Package textcomp Info: Setting hls sub-encoding to TS1/3 on input line 398.
-Package textcomp Info: Setting hlst sub-encoding to TS1/3 on input line 399.
-Package textcomp Info: Setting hlct sub-encoding to TS1/5 on input line 400.
-Package textcomp Info: Setting hlx sub-encoding to TS1/5 on input line 401.
-Package textcomp Info: Setting hlce sub-encoding to TS1/5 on input line 402.
-Package textcomp Info: Setting hlcn sub-encoding to TS1/5 on input line 403.
-Package textcomp Info: Setting hlcw sub-encoding to TS1/5 on input line 404.
-Package textcomp Info: Setting hlcf sub-encoding to TS1/5 on input line 405.
-Package textcomp Info: Setting pplx sub-encoding to TS1/3 on input line 406.
-Package textcomp Info: Setting pplj sub-encoding to TS1/3 on input line 407.
-Package textcomp Info: Setting ptmx sub-encoding to TS1/4 on input line 408.
-Package textcomp Info: Setting ptmj sub-encoding to TS1/4 on input line 409.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\color.sty"
-Package: color 2016/07/10 v1.1e Standard LaTeX Color (DPC)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics-cfg\color.cfg"
-File: color.cfg 2016/01/02 v1.6 sample color configuration
-)
-Package color Info: Driver file: pdftex.def on input line 147.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics-def\pdftex.def"
-File: pdftex.def 2017/01/12 v0.06k Graphics/color for pdfTeX
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\infwarerr.sty"
-Package: infwarerr 2016/05/16 v1.4 Providing info/warning/error messages (HO)
-)
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\ltxcmds.sty"
-Package: ltxcmds 2016/05/16 v1.23 LaTeX kernel commands for general use (HO)
-)
-\Gread@gobject=\count95
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\enumerate.sty"
-Package: enumerate 2015/07/23 v3.00 enumerate extensions (DPC)
-\@enLab=\toks18
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\graphicx.sty"
-Package: graphicx 2014/10/28 v1.0g Enhanced LaTeX Graphics (DPC,SPQR)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\graphics.sty"
-Package: graphics 2017/04/14 v1.1b Standard LaTeX Graphics (DPC,SPQR)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\trig.sty"
-Package: trig 2016/01/03 v1.10 sin cos tan (DPC)
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics-cfg\graphics.cfg"
-File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
-)
-Package graphics Info: Driver file: pdftex.def on input line 99.
-)
-\Gin@req@height=\dimen107
-\Gin@req@width=\dimen108
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\grffile.sty"
-Package: grffile 2016/05/16 v1.17 Extended file name support for graphics (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\ifpdf.sty"
-Package: ifpdf 2017/03/15 v3.2 Provides the ifpdf switch
-)
-("C:\Program Files\MiKTeX 2.9\tex\generic\ifxetex\ifxetex.sty"
-Package: ifxetex 2010/09/12 v0.6 Provides ifxetex conditional
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\kvoptions.sty"
-Package: kvoptions 2016/05/16 v3.12 Key value format for package options (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\kvsetkeys.sty"
-Package: kvsetkeys 2016/05/16 v1.17 Key value parser (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\etexcmds.sty"
-Package: etexcmds 2016/05/16 v1.6 Avoid name clashes with e-TeX commands (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\ifluatex.sty"
-Package: ifluatex 2016/05/16 v1.4 Provides the ifluatex switch (HO)
-Package ifluatex Info: LuaTeX not detected.
-)
-Package etexcmds Info: Could not find \expanded.
-(etexcmds)             That can mean that you are not using pdfTeX 1.50 or
-(etexcmds)             that some package has redefined \expanded.
-(etexcmds)             In the latter case, load this package earlier.
-)))
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\pdftexcmds.sty"
-Package: pdftexcmds 2017/03/19 v0.25 Utility functions of pdfTeX for LuaTeX (HO
-)
-Package pdftexcmds Info: LuaTeX not detected.
-Package pdftexcmds Info: \pdf@primitive is available.
-Package pdftexcmds Info: \pdf@ifprimitive is available.
-Package pdftexcmds Info: \pdfdraftmode found.
-)
-Package grffile Info: Option `multidot' is set to `true'.
-Package grffile Info: Option `extendedchars' is set to `false'.
-Package grffile Info: Option `space' is set to `true'.
-Package grffile Info: \Gin@ii of package `graphicx' fixed on input line 486.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\wrapfig\wrapfig.sty"
-\wrapoverhang=\dimen109
-\WF@size=\dimen110
-\c@WF@wrappedlines=\count96
-\WF@box=\box28
-\WF@everypar=\toks19
-Package: wrapfig 2003/01/31  v 3.6
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\rotating.sty"
-Package: rotating 2016/08/11 v2.16d rotated objects in LaTeX
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\ifthen.sty"
-Package: ifthen 2014/09/29 v1.1c Standard LaTeX ifthen package (DPC)
-)
-\c@r@tfl@t=\count97
-\rotFPtop=\skip43
-\rotFPbot=\skip44
-\rot@float@box=\box29
-\rot@mess@toks=\toks20
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\longtable.sty"
-Package: longtable 2014/10/28 v4.11 Multi-page Table package (DPC)
-\LTleft=\skip45
-\LTright=\skip46
-\LTpre=\skip47
-\LTpost=\skip48
-\LTchunksize=\count98
-\LTcapwidth=\dimen111
-\LT@head=\box30
-\LT@firsthead=\box31
-\LT@foot=\box32
-\LT@lastfoot=\box33
-\LT@cols=\count99
-\LT@rows=\count100
-\c@LT@tables=\count101
-\c@LT@chunks=\count102
-\LT@p@ftn=\toks21
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\multirow\multirow.sty"
-Package: multirow 2016/11/25 v2.2 Span multiple rows of a table
-\multirow@colwidth=\skip49
-\multirow@cntb=\count103
-\multirow@dima=\skip50
-\bigstrutjot=\dimen112
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\multicol.sty"
-Package: multicol 2017/04/11 v1.8q multicolumn formatting (FMi)
-\c@tracingmulticols=\count104
-\mult@box=\box34
-\multicol@leftmargin=\dimen113
-\c@unbalance=\count105
-\c@collectmore=\count106
-\doublecol@number=\count107
-\multicoltolerance=\count108
-\multicolpretolerance=\count109
-\full@width=\dimen114
-\page@free=\dimen115
-\premulticols=\dimen116
-\postmulticols=\dimen117
-\multicolsep=\skip51
-\multicolbaselineskip=\skip52
-\partial@page=\box35
-\last@line=\box36
-\maxbalancingoverflow=\dimen118
-\mult@rightbox=\box37
-\mult@grightbox=\box38
-\mult@gfirstbox=\box39
-\mult@firstbox=\box40
-\@tempa=\box41
-\@tempa=\box42
-\@tempa=\box43
-\@tempa=\box44
-\@tempa=\box45
-\@tempa=\box46
-\@tempa=\box47
-\@tempa=\box48
-\@tempa=\box49
-\@tempa=\box50
-\@tempa=\box51
-\@tempa=\box52
-\@tempa=\box53
-\@tempa=\box54
-\@tempa=\box55
-\@tempa=\box56
-\@tempa=\box57
-\c@columnbadness=\count110
-\c@finalcolumnbadness=\count111
-\last@try=\dimen119
-\multicolovershoot=\dimen120
-\multicolundershoot=\dimen121
-\mult@nat@firstbox=\box58
-\colbreak@box=\box59
-\mc@col@check@num=\count112
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\changes\changes.sty"
-Package: changes 2015/04/27 v2.0.4 changes package
-
-*** changes package 2015/04/27 v2.0.4 ***
-("C:\Program Files\MiKTeX 2.9\tex\latex\xkeyval\xkeyval.sty"
-Package: xkeyval 2014/12/03 v2.7a package option processing (HA)
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\xkeyval\xkeyval.tex"
-("C:\Program Files\MiKTeX 2.9\tex\generic\xkeyval\xkvutils.tex"
-\XKV@toks=\toks22
-\XKV@tempa@toks=\toks23
-)
-\XKV@depth=\count113
-File: xkeyval.tex 2014/12/03 v2.7a key=value parser (HA)
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\xifthen\xifthen.sty"
-Package: xifthen 2015/11/05 v1.4.0 Extended ifthen features
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\calc.sty"
-Package: calc 2014/10/28 v4.3 Infix arithmetic (KKT,FJ)
-\calc@Acount=\count114
-\calc@Bcount=\count115
-\calc@Adimen=\dimen122
-\calc@Bdimen=\dimen123
-\calc@Askip=\skip53
-\calc@Bskip=\skip54
-LaTeX Info: Redefining \setlength on input line 80.
-LaTeX Info: Redefining \addtolength on input line 81.
-\calc@Ccount=\count116
-\calc@Cskip=\skip55
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\ifmtarg\ifmtarg.sty"
-Package: ifmtarg 2009/09/02 v1.2a check for an empty argument
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\xcolor\xcolor.sty"
-Package: xcolor 2016/05/11 v2.12 LaTeX color extensions (UK)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics-cfg\color.cfg"
-File: color.cfg 2016/01/02 v1.6 sample color configuration
-)
-Package xcolor Info: Driver file: pdftex.def on input line 225.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\colortbl\colortbl.sty"
-Package: colortbl 2012/02/13 v1.0a Color table columns (DPC)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\array.sty"
-Package: array 2016/10/06 v2.4d Tabular extension package (FMi)
-\col@sep=\dimen124
-\extrarowheight=\dimen125
-\NC@list=\toks24
-\extratabsurround=\skip56
-\backup@length=\skip57
-)
-\everycr=\toks25
-\minrowclearance=\skip58
-)
-LaTeX Info: Redefining \color on input line 709.
-\rownum=\count117
-Package xcolor Info: Model `cmy' substituted by `cmy0' on input line 1348.
-Package xcolor Info: Model `hsb' substituted by `rgb' on input line 1352.
-Package xcolor Info: Model `RGB' extended on input line 1364.
-Package xcolor Info: Model `HTML' substituted by `rgb' on input line 1366.
-Package xcolor Info: Model `Hsb' substituted by `hsb' on input line 1367.
-Package xcolor Info: Model `tHsb' substituted by `hsb' on input line 1368.
-Package xcolor Info: Model `HSB' substituted by `hsb' on input line 1369.
-Package xcolor Info: Model `Gray' substituted by `gray' on input line 1370.
-Package xcolor Info: Model `wave' substituted by `hsb' on input line 1371.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\pdfcolmk.sty"
-Package: pdfcolmk 2016/05/16 v1.3 Color support for pdfTeX via marks (HO)
-Package pdfcolmk Info: The color stack of pdfTeX >= 1.40 is used. Therefore
-(pdfcolmk)             this package is not necessary and not loaded.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\ulem\ulem.sty"
-\UL@box=\box60
-\UL@hyphenbox=\box61
-\UL@skip=\skip59
-\UL@hook=\toks26
-\UL@height=\dimen126
-\UL@pe=\count118
-\UL@pixel=\dimen127
-\ULC@box=\box62
-Package: ulem 2012/05/18
-\ULdepth=\dimen128
-)
-\c@Changes@AuthorCount=\count119
-\c@Changes@Author=\count120
-\c@Changes@AddCount=\count121
-\c@Changes@DeleteCount=\count122
-\c@Changes@ReplaceCount=\count123
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\truncate\truncate.sty"
-Package: truncate 2001/08/20  ver 3.6
-\@Trunc@RestHyph=\toks27
-)
-\Changes@Len@summ=\skip60
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\pdflscape.sty"
-Package: pdflscape 2016/05/14 v0.11 Display of landscape pages in PDF (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\graphics\lscape.sty"
-Package: lscape 2000/10/22 v3.01 Landscape Pages (DPC)
-)
-Package pdflscape Info: Auto-detected driver: pdftex on input line 81.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\geometry\geometry.sty"
-Package: geometry 2010/09/12 v5.6 Page Geometry
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\ifvtex.sty"
-Package: ifvtex 2016/05/16 v1.6 Detect VTeX and its facilities (HO)
-Package ifvtex Info: VTeX not detected.
-)
-\Gm@cnth=\count124
-\Gm@cntv=\count125
-\c@Gm@tempcnt=\count126
-\Gm@bindingoffset=\dimen129
-\Gm@wd@mp=\dimen130
-\Gm@odd@mp=\dimen131
-\Gm@even@mp=\dimen132
-\Gm@layoutwidth=\dimen133
-\Gm@layoutheight=\dimen134
-\Gm@layouthoffset=\dimen135
-\Gm@layoutvoffset=\dimen136
-\Gm@dimlist=\toks28
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\geometry\geometry.cfg"))
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\amssymb.sty"
-Package: amssymb 2013/01/14 v3.01 AMS font symbols
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\amsfonts.sty"
-Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
-\@emptytoks=\toks29
-\symAMSa=\mathgroup4
-\symAMSb=\mathgroup5
-LaTeX Font Info:    Overwriting math alphabet `\mathfrak' in version `bold'
-(Font)                  U/euf/m/n --> U/euf/b/n on input line 106.
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsmath\amsmath.sty"
-Package: amsmath 2016/11/05 v2.16a AMS math features
-\@mathmargin=\skip61
-
-For additional information on amsmath, use the `?' option.
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsmath\amstext.sty"
-Package: amstext 2000/06/29 v2.01 AMS text
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsmath\amsgen.sty"
-File: amsgen.sty 1999/11/30 v2.0 generic functions
-\@emptytoks=\toks30
-\ex@=\dimen137
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsmath\amsbsy.sty"
-Package: amsbsy 1999/11/29 v1.2d Bold Symbols
-\pmbraise@=\dimen138
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsmath\amsopn.sty"
-Package: amsopn 2016/03/08 v2.02 operator names
-)
-\inf@bad=\count127
-LaTeX Info: Redefining \frac on input line 213.
-\uproot@=\count128
-\leftroot@=\count129
-LaTeX Info: Redefining \overline on input line 375.
-\classnum@=\count130
-\DOTSCASE@=\count131
-LaTeX Info: Redefining \ldots on input line 472.
-LaTeX Info: Redefining \dots on input line 475.
-LaTeX Info: Redefining \cdots on input line 596.
-\Mathstrutbox@=\box63
-\strutbox@=\box64
-\big@size=\dimen139
-LaTeX Font Info:    Redeclaring font encoding OML on input line 712.
-LaTeX Font Info:    Redeclaring font encoding OMS on input line 713.
-\macc@depth=\count132
-\c@MaxMatrixCols=\count133
-\dotsspace@=\muskip10
-\c@parentequation=\count134
-\dspbrk@lvl=\count135
-\tag@help=\toks31
-\row@=\count136
-\column@=\count137
-\maxfields@=\count138
-\andhelp@=\toks32
-\eqnshift@=\dimen140
-\alignsep@=\dimen141
-\tagshift@=\dimen142
-\tagwidth@=\dimen143
-\totwidth@=\dimen144
-\lineht@=\dimen145
-\@envbody=\toks33
-\multlinegap=\skip62
-\multlinetaggap=\skip63
-\mathdisplay@stack=\toks34
-LaTeX Info: Redefining \[ on input line 2817.
-LaTeX Info: Redefining \] on input line 2818.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\doublestroke\dsfont.sty"
-Package: dsfont 1995/08/01 v0.1 Double stroke roman fonts
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\hyperref\hyperref.sty"
-Package: hyperref 2017/03/14 v6.85a Hypertext links for LaTeX
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\hobsub-hyperref.sty"
-Package: hobsub-hyperref 2016/05/16 v1.14 Bundle oberdiek, subset hyperref (HO)
-
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\hobsub-generic.sty"
-Package: hobsub-generic 2016/05/16 v1.14 Bundle oberdiek, subset generic (HO)
-Package: hobsub 2016/05/16 v1.14 Construct package bundles (HO)
-Package hobsub Info: Skipping package `infwarerr' (already loaded).
-Package hobsub Info: Skipping package `ltxcmds' (already loaded).
-Package hobsub Info: Skipping package `ifluatex' (already loaded).
-Package hobsub Info: Skipping package `ifvtex' (already loaded).
-Package: intcalc 2016/05/16 v1.2 Expandable calculations with integers (HO)
-Package hobsub Info: Skipping package `ifpdf' (already loaded).
-Package hobsub Info: Skipping package `etexcmds' (already loaded).
-Package hobsub Info: Skipping package `kvsetkeys' (already loaded).
-Package: kvdefinekeys 2016/05/16 v1.4 Define keys (HO)
-Package hobsub Info: Skipping package `pdftexcmds' (already loaded).
-Package: pdfescape 2016/05/16 v1.14 Implements pdfTeX's escape features (HO)
-Package: bigintcalc 2016/05/16 v1.4 Expandable calculations on big integers (HO
-)
-Package: bitset 2016/05/16 v1.2 Handle bit-vector datatype (HO)
-Package: uniquecounter 2016/05/16 v1.3 Provide unlimited unique counter (HO)
-)
-Package hobsub Info: Skipping package `hobsub' (already loaded).
-Package: letltxmacro 2016/05/16 v1.5 Let assignment for LaTeX macros (HO)
-Package: hopatch 2016/05/16 v1.3 Wrapper for package hooks (HO)
-Package: xcolor-patch 2016/05/16 xcolor patch
-Package: atveryend 2016/05/16 v1.9 Hooks at the very end of document (HO)
-Package atveryend Info: \enddocument detected (standard20110627).
-Package: atbegshi 2016/06/09 v1.18 At begin shipout hook (HO)
-Package: refcount 2016/05/16 v3.5 Data extraction from label references (HO)
-Package: hycolor 2016/05/16 v1.8 Color options for hyperref/bookmark (HO)
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\auxhook.sty"
-Package: auxhook 2016/05/16 v1.4 Hooks for auxiliary files (HO)
-)
-\@linkdim=\dimen146
-\Hy@linkcounter=\count139
-\Hy@pagecounter=\count140
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\hyperref\pd1enc.def"
-File: pd1enc.def 2017/03/14 v6.85a Hyperref: PDFDocEncoding definition (HO)
-Now handling font encoding PD1 ...
-... no UTF-8 mapping file for font encoding PD1
-)
-\Hy@SavedSpaceFactor=\count141
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\00miktex\hyperref.cfg"
-File: hyperref.cfg 2002/06/06 v1.2 hyperref configuration of TeXLive
-)
-Package hyperref Info: Hyper figures OFF on input line 4498.
-Package hyperref Info: Link nesting OFF on input line 4503.
-Package hyperref Info: Hyper index ON on input line 4506.
-Package hyperref Info: Plain pages OFF on input line 4513.
-Package hyperref Info: Backreferencing OFF on input line 4518.
-Package hyperref Info: Implicit mode ON; LaTeX internals redefined.
-Package hyperref Info: Bookmarks ON on input line 4751.
-\c@Hy@tempcnt=\count142
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\url\url.sty"
-\Urlmuskip=\muskip11
-Package: url 2013/09/16  ver 3.4  Verb mode for urls, etc.
-)
-LaTeX Info: Redefining \url on input line 5104.
-\XeTeXLinkMargin=\dimen147
-\Fld@menulength=\count143
-\Field@Width=\dimen148
-\Fld@charsize=\dimen149
-Package hyperref Info: Hyper figures OFF on input line 6358.
-Package hyperref Info: Link nesting OFF on input line 6363.
-Package hyperref Info: Hyper index ON on input line 6366.
-Package hyperref Info: backreferencing OFF on input line 6373.
-Package hyperref Info: Link coloring OFF on input line 6378.
-Package hyperref Info: Link coloring with OCG OFF on input line 6383.
-Package hyperref Info: PDF/A mode OFF on input line 6388.
-LaTeX Info: Redefining \ref on input line 6428.
-LaTeX Info: Redefining \pageref on input line 6432.
-\Hy@abspage=\count144
-\c@Item=\count145
-\c@Hfootnote=\count146
-)
-
-Package hyperref Message: Driver (autodetected): hpdftex.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\hyperref\hpdftex.def"
-File: hpdftex.def 2017/03/14 v6.85a Hyperref driver for pdfTeX
-\Fld@listcount=\count147
-\c@bookmark@seq@number=\count148
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\rerunfilecheck.sty"
-Package: rerunfilecheck 2016/05/16 v1.8 Rerun checks for auxiliary files (HO)
-Package uniquecounter Info: New unique counter `rerunfilecheck' on input line 2
-82.
-)
-\Hy@SectionHShift=\skip64
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\natbib\natbib.sty"
-Package: natbib 2010/09/13 8.31b (PWD, AO)
-\bibhang=\skip65
-\bibsep=\skip66
-LaTeX Info: Redefining \cite on input line 694.
-\c@NAT@ctr=\count149
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\epstopdf.sty"
-Package: epstopdf 2016/05/15 v2.6 Conversion with epstopdf on the fly (HO)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\epstopdf-base.sty"
-Package: epstopdf-base 2016/05/15 v2.6 Base part for package epstopdf
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\oberdiek\grfext.sty"
-Package: grfext 2016/05/16 v1.2 Manage graphics extensions (HO)
-)
-Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4
-38.
-Package grfext Info: Graphics extension search list:
-(grfext)             [.png,.pdf,.jpg,.mps,.jpeg,.jbig2,.jb2,.PNG,.PDF,.JPG,.JPE
-G,.JBIG2,.JB2,.eps]
-(grfext)             \AppendGraphicsExtensions on input line 456.
-))
-("C:\Program Files\MiKTeX 2.9\tex\latex\caption\caption.sty"
-Package: caption 2016/02/21 v3.3-144 Customizing captions (AR)
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\caption\caption3.sty"
-Package: caption3 2016/05/22 v1.7-166 caption3 kernel (AR)
-Package caption3 Info: TeX engine: e-TeX on input line 67.
-\captionmargin=\dimen150
-\captionmargin@=\dimen151
-\captionwidth=\dimen152
-\caption@tempdima=\dimen153
-\caption@indent=\dimen154
-\caption@parindent=\dimen155
-\caption@hangindent=\dimen156
-)
-\c@ContinuedFloat=\count150
-Package caption Info: hyperref package is loaded.
-Package caption Info: listings package is loaded.
-Package caption Info: longtable package is loaded.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\caption\ltcaption.sty"
-Package: ltcaption 2013/06/09 v1.4-94 longtable captions (AR)
-)
-Package caption Info: rotating package is loaded.
-Package caption Info: wrapfig package is loaded.
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\caption\subcaption.sty"
-Package: subcaption 2016/05/22 v1.1-161 Sub-captions (AR)
-\c@subfigure=\count151
-\c@subtable=\count152
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\booktabs\booktabs.sty"
-Package: booktabs 2005/04/14 v1.61803 publication quality tables
-\heavyrulewidth=\dimen157
-\lightrulewidth=\dimen158
-\cmidrulewidth=\dimen159
-\belowrulesep=\dimen160
-\belowbottomsep=\dimen161
-\aboverulesep=\dimen162
-\abovetopsep=\dimen163
-\cmidrulesep=\dimen164
-\cmidrulekern=\dimen165
-\defaultaddspace=\dimen166
-\@cmidla=\count153
-\@cmidlb=\count154
-\@aboverulesep=\dimen167
-\@belowrulesep=\dimen168
-\@thisruleclass=\count155
-\@lastruleclass=\count156
-\@thisrulewidth=\dimen169
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\algorithm2e\algorithm2e.sty"
-Package: algorithm2e 2017/07/18 v5.2 algorithms environments
-\c@AlgoLine=\count157
-\algocf@hangindent=\skip67
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\ifoddpage\ifoddpage.sty"
-Package: ifoddpage 2016/04/23 v1.1 Conditionals for odd/even page detection
-\c@checkoddpage=\count158
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\tools\xspace.sty"
-Package: xspace 2014/10/28 v1.13 Space after command names (DPC,MH)
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\ltxmisc\relsize.sty"
-Package: relsize 2003/07/04 ver 3.1
-)
-\skiptotal=\skip68
-\skiplinenumber=\skip69
-\skiprule=\skip70
-\skiphlne=\skip71
-\skiptext=\skip72
-\skiplength=\skip73
-\algomargin=\skip74
-\skipalgocfslide=\skip75
-\algowidth=\dimen170
-\inoutsize=\dimen171
-\inoutindent=\dimen172
-\interspacetitleruled=\dimen173
-\interspacealgoruled=\dimen174
-\interspacetitleboxruled=\dimen175
-\algocf@ruledwidth=\skip76
-\algocf@inoutbox=\box65
-\algocf@inputbox=\box66
-\AlCapSkip=\skip77
-\AlCapHSkip=\skip78
-\algoskipindent=\skip79
-\algocf@nlbox=\box67
-\algocf@hangingbox=\box68
-\algocf@untilbox=\box69
-\algocf@skipuntil=\skip80
-\algocf@capbox=\box70
-\algocf@lcaptionbox=\skip81
-\algoheightruledefault=\skip82
-\algoheightrule=\skip83
-\algotitleheightruledefault=\skip84
-\algotitleheightrule=\skip85
-\c@algocfline=\count159
-\c@algocfproc=\count160
-\c@algocf=\count161
-\algocf@algoframe=\box71
-\algocf@algobox=\box72
-)
-("C:\Program Files\MiKTeX 2.9\tex\latex\amscls\amsthm.sty"
-Package: amsthm 2015/03/04 v2.20.2
-\thm@style=\toks35
-\thm@bodyfont=\toks36
-\thm@headfont=\toks37
-\thm@notefont=\toks38
-\thm@headpunct=\toks39
-\thm@preskip=\skip86
-\thm@postskip=\skip87
-\thm@headsep=\skip88
-\dth@everypar=\toks40
-)
-\c@lemma=\count162
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\cancel\cancel.sty"
-Package: cancel 2013/04/12 v2.2 Cancel math terms
-)
-Package hyperref Info: Option `colorlinks' set `true' on input line 123.
-
-(likelihood-derivatives-LVM.aux)
-\openout1 = `likelihood-derivatives-LVM.aux'.
-
-LaTeX Font Info:    Checking defaults for OML/cmm/m/it on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for T1/cmr/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for OT1/cmr/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for OMS/cmsy/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for OMX/cmex/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for U/cmr/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for TS1/cmr/m/n on input line 124.
-LaTeX Font Info:    Try loading font information for TS1+cmr on input line 124.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\base\ts1cmr.fd"
-File: ts1cmr.fd 2014/09/29 v2.5h Standard LaTeX font definitions
-)
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Checking defaults for PD1/pdf/m/n on input line 124.
-LaTeX Font Info:    ... okay on input line 124.
-LaTeX Font Info:    Try loading font information for T1+lmr on input line 124.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\t1lmr.fd"
-File: t1lmr.fd 2009/10/30 v1.6 Font defs for Latin Modern
-)
-\c@lstlisting=\count163
-
-("C:\Program Files\MiKTeX 2.9\tex\context\base\supp-pdf.mkii"
-[Loading MPS to PDF converter (version 2006.09.02).]
-\scratchcounter=\count164
-\scratchdimen=\dimen176
-\scratchbox=\box73
-\nofMPsegments=\count165
-\nofMParguments=\count166
-\everyMPshowfont=\toks41
-\MPscratchCnt=\count167
-\MPscratchDim=\dimen177
-\MPnumerator=\count168
-\makeMPintoPDFobject=\count169
-\everyMPtoPDFconversion=\toks42
-)
-*geometry* driver: auto-detecting
-*geometry* detected driver: pdftex
-*geometry* verbose mode - [ preamble ] result:
-* driver: pdftex
-* paper: <default>
-* layout: <same size as paper>
-* layoutoffset:(h,v)=(0.0pt,0.0pt)
-* modes: 
-* h-part:(L,W,R)=(108.405pt, 415.5525pt, 90.3375pt)
-* v-part:(T,H,B)=(85.35826pt, 624.25346pt, 85.35826pt)
-* \paperwidth=614.295pt
-* \paperheight=794.96999pt
-* \textwidth=415.5525pt
-* \textheight=624.25346pt
-* \oddsidemargin=36.13501pt
-* \evensidemargin=36.13501pt
-* \topmargin=-23.91173pt
-* \headheight=12.0pt
-* \headsep=25.0pt
-* \topskip=10.0pt
-* \footskip=30.0pt
-* \marginparwidth=65.0pt
-* \marginparsep=11.0pt
-* \columnsep=10.0pt
-* \skip\footins=9.0pt plus 4.0pt minus 2.0pt
-* \hoffset=0.0pt
-* \voffset=0.0pt
-* \mag=1000
-* \@twocolumnfalse
-* \@twosidefalse
-* \@mparswitchfalse
-* \@reversemarginfalse
-* (1in=72.27pt=25.4mm, 1cm=28.453pt)
-
-\AtBeginShipoutBox=\box74
-Package hyperref Info: Link coloring ON on input line 124.
-("C:\Program Files\MiKTeX 2.9\tex\latex\hyperref\nameref.sty"
-Package: nameref 2016/05/21 v2.44 Cross-referencing by name of section
-
-("C:\Program Files\MiKTeX 2.9\tex\generic\oberdiek\gettitlestring.sty"
-Package: gettitlestring 2016/05/16 v1.5 Cleanup title references (HO)
-)
-\c@section@level=\count170
-)
-LaTeX Info: Redefining \ref on input line 124.
-LaTeX Info: Redefining \pageref on input line 124.
-LaTeX Info: Redefining \nameref on input line 124.
-
-(likelihood-derivatives-LVM.out) (likelihood-derivatives-LVM.out)
-\@outlinefile=\write3
-\openout3 = `likelihood-derivatives-LVM.out'.
-
-Package caption Info: Begin \AtBeginDocument code.
-Package caption Info: End \AtBeginDocument code.
-LaTeX Font Info:    Try loading font information for OT1+lmr on input line 127.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\ot1lmr.fd"
-File: ot1lmr.fd 2009/10/30 v1.6 Font defs for Latin Modern
-)
-LaTeX Font Info:    Try loading font information for OML+lmm on input line 127.
-
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\omllmm.fd"
-File: omllmm.fd 2009/10/30 v1.6 Font defs for Latin Modern
-)
-LaTeX Font Info:    Try loading font information for OMS+lmsy on input line 127
-.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\omslmsy.fd"
-File: omslmsy.fd 2009/10/30 v1.6 Font defs for Latin Modern
-)
-LaTeX Font Info:    Try loading font information for OMX+lmex on input line 127
-.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\lm\omxlmex.fd"
-File: omxlmex.fd 2009/10/30 v1.6 Font defs for Latin Modern
-)
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <12> on input line 127.
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <8> on input line 127.
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <6> on input line 127.
-LaTeX Font Info:    Try loading font information for U+msa on input line 127.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\umsa.fd"
-File: umsa.fd 2013/01/14 v3.01 AMS symbols A
-)
-LaTeX Font Info:    Try loading font information for U+msb on input line 127.
-
-("C:\Program Files\MiKTeX 2.9\tex\latex\amsfonts\umsb.fd"
-File: umsb.fd 2013/01/14 v3.01 AMS symbols B
-)
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <10> on input line 135.
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <7> on input line 135.
-LaTeX Font Info:    External font `lmex10' loaded for size
-(Font)              <5> on input line 135.
- [1
-
-{C:/Users/hpl802/AppData/Local/MiKTeX/2.9/pdftex/config/pdftex.map}]
-Overfull \hbox (7.9696pt too wide) in paragraph at lines 207--207
-[] 
- []
-
-[2] [3] [4] [5
-
-]
-\Changes@OutFile=\write4
-\openout4 = `likelihood-derivatives-LVM.soc'.
-
-Package atveryend Info: Empty hook `BeforeClearDocument' on input line 305.
- [6]
-Package atveryend Info: Empty hook `AfterLastShipout' on input line 305.
- (likelihood-derivatives-LVM.aux)
-Package atveryend Info: Executing hook `AtVeryEndDocument' on input line 305.
-Package atveryend Info: Executing hook `AtEndAfterFileList' on input line 305.
-Package rerunfilecheck Info: File `likelihood-derivatives-LVM.out' has not chan
-ged.
-(rerunfilecheck)             Checksum: A8F3224F7456B3594EAF827253E8B27A;363.
-Package atveryend Info: Empty hook `AtVeryVeryEnd' on input line 305.
- ) 
-Here is how much of TeX's memory you used:
- 13119 strings out of 493323
- 191986 string characters out of 3139064
- 325936 words of memory out of 3000000
- 16359 multiletter control sequences out of 15000+200000
- 54590 words of font info for 61 fonts, out of 3000000 for 9000
- 1141 hyphenation exceptions out of 8191
- 53i,24n,92p,395b,338s stack positions out of 5000i,500n,10000p,200000b,50000s
-{C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathsy.enc}{C:/Program Fil
-es/MiKTeX 2.9/fonts/enc/dvips/lm/lm-mathit.enc}{C:/Program Files/MiKTeX 2.9/fon
-ts/enc/dvips/lm/lm-rm.enc}{C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-ma
-thex.enc}{C:/Program Files/MiKTeX 2.9/fonts/enc/dvips/lm/lm-ec.enc}<C:/Program 
-Files/MiKTeX 2.9/fonts/type1/public/amsfonts/latxfont/line10.pfb><C:/Program Fi
-les/MiKTeX 2.9/fonts/type1/public/lm/lmbx12.pfb><C:/Program Files/MiKTeX 2.9/fo
-nts/type1/public/lm/lmex10.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/
-lm/lmmi10.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi5.pfb><C:/
-Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmmi7.pfb><C:/Program Files/MiKT
-eX 2.9/fonts/type1/public/lm/lmmib10.pfb><C:/Program Files/MiKTeX 2.9/fonts/typ
-e1/public/lm/lmr10.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr12
-.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmr17.pfb><C:/Program F
-iles/MiKTeX 2.9/fonts/type1/public/lm/lmr5.pfb><C:/Program Files/MiKTeX 2.9/fon
-ts/type1/public/lm/lmr7.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/
-lmsy10.pfb><C:/Program Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy5.pfb><C:/Pro
-gram Files/MiKTeX 2.9/fonts/type1/public/lm/lmsy7.pfb><C:/Program Files/MiKTeX 
-2.9/fonts/type1/public/amsfonts/symbols/msam7.pfb><C:/Program Files/MiKTeX 2.9/
-fonts/type1/public/amsfonts/symbols/msbm10.pfb>
-Output written on likelihood-derivatives-LVM.pdf (6 pages, 216351 bytes).
-PDF statistics:
- 129 PDF objects out of 1000 (max. 8388607)
- 12 named destinations out of 1000 (max. 500000)
- 41 words of extra memory for PDF output out of 10000 (max. 10000000)
-
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.org b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.org
deleted file mode 100644
index ac53753..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.org
+++ /dev/null
@@ -1,306 +0,0 @@
-#+TITLE: Likelihood, first and second order derivatives in a LVM
-#+AUTHOR: Brice Ozenne
-#+DATE: 
-
-In this document, we show the expression of the likelihood, its first
-two derivatives, the information matrix, and the first derivative of
-the information matrix.
-
-* Likelihood
-
-At the individual level, the measurement and structural models can be written:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\VY_i &= \nu + \Veta_i \Lambda + \VX_i K + \Vvarepsilon_i \\
-\Veta_i &= \alpha + \Veta_i B + \VX_i \Gamma + \boldsymbol{\zeta}_i 
-\end{align*}
-\begin{tabular}{lll}
-with & \(\Sigma_{\epsilon}\)   &the variance-covariance matrix of the residuals \(\Vvarepsilon_i\)\\
-     & \(\Sigma_{\zeta}\) & the variance-covariance matrix of the residuals \(\boldsymbol{\zeta}_i\). \\
-\end{tabular}
-#+END_EXPORT
-
-\bigskip
-
-By combining the previous equations, we can get an expression for
-\(\VY_i\) that does not depend on \(\Veta_i\):
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\VY_i &= \nu + \left(\boldsymbol{\zeta}_i + \alpha + \VX_i \Gamma \right) (I-B)^{-1} \Lambda + \VX_i K + \Vvarepsilon_i 
-\end{align*}
-#+END_EXPORT
-Since \(\Var[Ax] = A \Var[x] \trans{A}\) we have \(\Var[xA] =
-\trans{A} \Var[x] A\), we have the following expressions for the
-conditional mean and variance of \(\VY_i\):
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
- \Vmu(\Vparam,\VX_i) &= E[\VY_i|\VX_i] = \nu + (\alpha + \VX_i \Gamma) (1-B)^{-1} \Lambda + \VX_i K \\
-\Omega(\Vparam) &= Var[\VY_i|\VX_i] = \Lambda^t (1-B)^{-t}  \Sigma_{\zeta} (1-B)^{-1} \Lambda + \Sigma_{\varepsilon} 
-\end{align*}
-#+END_EXPORT
-
-\bigskip
- 
-where \(\Vparam\) is the collection of all parameters. The
-log-likelihood can be written:
- #+BEGIN_EXPORT LaTeX
-\begin{align*}
-l(\Vparam|\VY,\VX) &= \sum_{i=1}^n l(\Vparam|\VY_i,\VX_i) \\
-&= \sum_{i=1}^{n} - \frac{p}{2} log(2\pi) - \frac{1}{2} log|\Omega(\Vparam)| - \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}
-\end{align*}
- #+END_EXPORT
-
-* Partial derivative for the conditional mean and variance
-
-In the following, we denote by \(\delta_{\sigma \in \Sigma}\) the
-indicator matrix taking value 1 at the position of \(\sigma\) in the
-matrix \(\Sigma\). For instance:
-#+BEGIN_EXPORT latex
-\begin{align*}
-\Sigma =
-\begin{bmatrix}
- \sigma_{1,1} & \sigma_{1,2} & \sigma_{1,3} \\
- \sigma_{1,2} & \sigma_{2,2} & \sigma_{2,3} \\
- \sigma_{1,3} & \sigma_{2,3} & \sigma_{3,3} \\
-\end{bmatrix}
- \qquad 
-\delta_{\sigma_{1,2} \in \Sigma} =
-\begin{bmatrix}
-0 & 1 & 0 \\
-1 & 0 & 0 \\
-0 & 0 & 0 \\
-\end{bmatrix}
-\end{align*}
-#+END_EXPORT
-The same goes for \(\delta_{\lambda \in \Lambda}\), \(\delta_{b \in
-B}\), and \(\delta_{\psi \in \Psi}\). 
-
-\bigskip
-
-First order derivatives:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \nu} &= 1 \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial K} &= \VX_i \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \alpha} &= (1-B)^{-1}\Lambda \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \Gamma} &= \VX_i(1-B)^{-1}\Lambda \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \lambda} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{\lambda \in \Lambda} \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial b} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\Lambda \\
- &\\
- \frac{\partial \Omega(\Vparam)}{\partial \psi} &= \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda \\
- \frac{\partial \Omega(\Vparam)}{\partial \sigma} &= \delta_{\sigma \in \Sigma} \\
- \frac{\partial \Omega(\Vparam)}{\partial \lambda} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial \Omega(\Vparam)}{\partial b} &= \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda\\
-\end{align*}
-#+END_EXPORT
-
-Second order derivatives:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \alpha \partial b} &= \delta_{\alpha} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \alpha \partial \lambda} &= \delta_{\alpha} (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \Gamma \partial b} &= \VX_i (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \Gamma \partial \lambda} &= \VX_i (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \lambda \partial b } &=  (\alpha + \VX_i \Gamma)(1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial b \partial b'} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b' \in B}(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\Lambda \\
-& + (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\delta_{b' \in B}(1-B)^{-1}\Lambda  \\
-& \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \psi \partial \lambda} &=  \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda  \\
-& + \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \delta_{\lambda \in \Lambda}  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \psi \partial b} &= \Lambda^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \lambda \partial b} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta^t_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
-& + \Lambda^t (1-B)^{-t}  \Psi (1-B)^{-1} \delta^t_{b \in B} (1-B)^{-1} \delta_{\lambda \in \Lambda}  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \lambda \partial \lambda'} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda' \in \Lambda} \\
-& + \delta_{\lambda' \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda}   \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial b \partial b'} &= \Lambda^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \Lambda \\
-\end{align*}
-#+END_EXPORT
-* First derivative: score
-The individual score is obtained by derivating the log-likelihood:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-   \Score(\param|\VY_i,\VX_i) =& \dpartial{l_i(\Vparam|\VY_i,\VX_i)}{\param}\\
- =& - \frac{1}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param}\right) \\
- &+  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &+ \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}
-\end{align*}
-#+END_EXPORT
-
-* Second derivative: Hessian and expected information
-:PROPERTIES:
-:CUSTOM_ID: SM:Information
-:END:
-The individual Hessian is obtained by derivating twice the
-log-likelihood:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-   \Hessian_i(\param,\param') =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} + \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}\right) \\
- &+  \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}  \\
- &-  (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &+ \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
-\end{align*}
-#+END_EXPORT
-
-\clearpage
-
-Using that \(\Vmu(\param,\VX_i)\) and \(\Omega(\Vparam)\) are deterministic quantities,
-we can then take the expectation to obtain:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \Vparam} + \Omega(\param)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}\right) \\
- &+  \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam)}{\param'}} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]}  \\
- &-  \Esp\left[(\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right] \\
- &+ \Esp \left[\frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right] \\
-\end{align*}
-#+END_EXPORT
-
-The last two expectations can be re-written using that \(\Esp[\trans{x}Ax] = tr\left(A\Var[x]\right)+\trans{\Esp[x]}A\Esp[x]\):
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} + \Ccancel[red]{\Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}}\right) \\
- &-  \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param'}} \\
- &- tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\left(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right]\right)} \right) \\
- &+ \Ccancel[red]{\frac{1}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Ccancel[blue]{\Omega(\Vparam)^{-1}} \Ccancel[blue]{\trans{\left(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right]\right)}} \right)} \\
-\end{align*}
-#+END_EXPORT
-where we have used that \(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right] =
-\Var\left[\VY_i|\VX_i\right] = \Omega(\Vparam)\). Finally we get:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param}\right) \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-\end{align*}
-#+END_EXPORT
-So we can deduce from the previous equation the expected information matrix:
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\Information(\param,\param') =& \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param}\right) 
- + \sum_{i=1}^n \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}}
-\end{align*}
-#+END_EXPORT
-
-* First derivatives of the information matrix
-#+BEGIN_EXPORT LaTeX
-\begin{align*}
-\frac{\partial \Information(\param,\param')}{\partial \param''} 
-=& - \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& + \frac{n}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial\param\partial\param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& - \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& + \frac{n}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial\param} \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param' \partial \param''}\right) \\
-& + \sum_{i=1}^n \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial\param\partial\param''} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-& + \sum_{i=1}^n \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\ddpartial{\Vmu(\Vparam,\VX_i)}{\Vparam'}{\param''}} \\
-& - \sum_{i=1}^n \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-\end{align*}
-#+END_EXPORT
-
-
-* Config                                                           :noexport:
-
-#+LANGUAGE:  en
-#+LaTeX_CLASS: org-article
-#+OPTIONS: author:t date:nil todo:t title:t toc:nil
-#+LaTeX_class_options: [table] 
-#+STARTUP: content
-
-** R code 
-#+PROPERTY: session *R*
-
-** Margins and space between lines
-# ## set margin
-#+LATEX_HEADER: \geometry{innermargin=1.5in,outermargin=1.25in,vmargin=3cm}
-# ## set space between lines
-#+LATEX_HEADER: \linespread{1.4}
-
-** Figures
-#+LATEX_HEADER: \usepackage{epstopdf} % to be able to convert .eps to .pdf image files
-#+LATEX_HEADER: \renewcommand{\thefigure}{S\arabic{figure}}
-#+LATEX_HEADER: \renewcommand{\thetable}{S\arabic{table}}
-#+LATEX_HEADER: \renewcommand{\theequation}{S\arabic{equation}}
-
-# # for figure S1
-#+LaTeX_HEADER: \usepackage{caption}
-#+LaTeX_HEADER: \usepackage[labelformat=simple]{subcaption}
-#+LaTeX_HEADER: \renewcommand{\thesubfigure}{Study \Alph{subfigure}}
-
-** Tables 
-#+LaTeX_HEADER: \usepackage{booktabs}
-
-** Algorithm
-#+LATEX_HEADER: \usepackage{algorithm2e}
-#+LaTeX_HEADER: \usepackage{amsthm}
- 
-** Math - shortcut
-#+LATEX_HEADER: \usepackage{amsthm,dsfont,amsmath}
-#+LaTeX_HEADER: \newtheorem{lemma}{Lemma}
-
-#+LaTeX_HEADER:\newcommand{\Vn}{\mathbf{n}}
-#+LaTeX_HEADER:\newcommand{\X}{X}
-#+LaTeX_HEADER:\newcommand{\VX}{\boldsymbol{X}}
-#+LaTeX_HEADER:\newcommand{\Y}{Y}
-#+LaTeX_HEADER:\newcommand{\VY}{\boldsymbol{Y}}
-#+LaTeX_HEADER:\newcommand{\Vy}{\boldsymbol{y}}
-#+LaTeX_HEADER:\newcommand{\VZ}{\boldsymbol{Z}}
-#+LaTeX_HEADER:\newcommand{\Veta}{\boldsymbol{\eta}}
-#+LaTeX_HEADER:\newcommand{\Vvarepsilon}{\boldsymbol{\varepsilon}}
-
-#+LaTeX_HEADER:\newcommand{\set}{\mathcal{S}}
-#+LaTeX_HEADER:\newcommand{\Vmu}{\boldsymbol{\mu}}
-#+LaTeX_HEADER:\newcommand{\Vxi}{\boldsymbol{\xi}}
-
-#+LaTeX_HEADER:\newcommand{\param}{\theta}
-#+LaTeX_HEADER:\newcommand{\paramHat}{\hat{\param}}
-#+LaTeX_HEADER:\newcommand{\Vparam}{\boldsymbol{\param}}
-#+LaTeX_HEADER:\newcommand{\VparamHat}{\boldsymbol{\paramHat}}
-
-#+LATEX_HEADER: \newcommand\Hessian{\mathcal{H}}
-#+LATEX_HEADER: \newcommand\Likelihood{\mathcal{L}}
-#+LATEX_HEADER: \newcommand\Information{\mathcal{I}}
-#+LATEX_HEADER: \newcommand\Score{\mathcal{U}}
-#+LATEX_HEADER: \newcommand\Hypothesis{\mathcal{H}}
-
-#+LATEX_HEADER: \newcommand\Real{\mathbb{R}}
-#+LaTeX_HEADER: \newcommand\half{\frac{1}{2}}
-
-** Math - operator
-
-#+LATEX_HEADER: \newcommand\Ind[1]{\mathds{1}_{#1}}
-#+LATEX_HEADER: \newcommand\dpartial[2]{\frac{\partial #1}{\partial #2}}
-#+LATEX_HEADER: \newcommand\ddpartial[3]{\frac{\partial^2 #1}{\partial #2 \partial #3}}
-
-#+LATEX_HEADER: \newcommand\Esp{\mathbb{E}}
-#+LATEX_HEADER: \newcommand\Var{\mathbb{V}ar}
-#+LATEX_HEADER: \newcommand\Cov{\mathbb{C}ov}
-#+LATEX_HEADER: \newcommand\Gaus{\mathcal{N}}
-
-#+LATEX_HEADER: \newcommand\trans[1]{{#1}^\intercal}%\newcommand\trans[1]{{\vphantom{#1}}^\top{#1}}
-
-#+LATEX_HEADER: \newcommand{\independent}{\mathrel{\text{\scalebox{1.5}{$\perp\mkern-10mu\perp$}}}}
-
-** Math - cancel
-#+LaTeX_HEADER: \RequirePackage[makeroom]{cancel} 
-#+LaTeX_HEADER: \newcommand\Ccancelto[3][black]{\renewcommand\CancelColor{\color{#1}}\cancelto{#2}{#3}}
-#+LaTeX_HEADER: \newcommand\Ccancel[2][black]{\renewcommand\CancelColor{\color{#1}}\cancel{#2}}
-
-** Local Words
-#  LocalWords:  REML JRSS Kenward bootstrapLavaan boldsymbol Veta VX
-#  LocalWords:  Vvarepsilon sim Gaus varepsilon eq notag frac mathcal
-#  LocalWords:  df qquad ldots nabla dpartial nablaFtheta ddpartial
-#  LocalWords:  biasVcov OmegaOmegahat infty eqref biasOmega VY BDNF
-#  LocalWords:  correctedN dScoredY RestyleAlgo boxruled textbf ATTR
-#  LocalWords:  leftarrow intercal widehat HTTLPR begingroup endgroup
-#  LocalWords:  renewcommand textwidth subfigure includegraphics
-#  LocalWords:  linewidth GraphSimul graphFactorModel orgmode
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.out b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.out
deleted file mode 100644
index 39caaff..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.out
+++ /dev/null
@@ -1,5 +0,0 @@
-\BOOKMARK [1][-]{section.1}{Likelihood}{}% 1
-\BOOKMARK [1][-]{section.2}{Partial derivative for the conditional mean and variance}{}% 2
-\BOOKMARK [1][-]{section.3}{First derivative: score}{}% 3
-\BOOKMARK [1][-]{section.4}{Second derivative: Hessian and expected information}{}% 4
-\BOOKMARK [1][-]{section.5}{First derivatives of the information matrix}{}% 5
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.pdf b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.pdf
deleted file mode 100644
index 29a751a..0000000
Binary files a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.pdf and /dev/null differ
diff --git a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.tex b/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.tex
deleted file mode 100644
index f787d09..0000000
--- a/inst/likelihood-derivatives-LVM/likelihood-derivatives-LVM.tex
+++ /dev/null
@@ -1,305 +0,0 @@
-% Created 2019-12-12 to 09:19
-% Intended LaTeX compiler: pdflatex
-\documentclass[table]{article}
-
-%%%% settings when exporting code %%%% 
-
-\usepackage{listings}
-\lstset{
-backgroundcolor=\color{white},
-basewidth={0.5em,0.4em},
-basicstyle=\ttfamily\small,
-breakatwhitespace=false,
-breaklines=true,
-columns=fullflexible,
-commentstyle=\color[rgb]{0.5,0,0.5},
-frame=single,
-keepspaces=true,
-keywordstyle=\color{black},
-literate={~}{$\sim$}{1},
-numbers=left,
-numbersep=10pt,
-numberstyle=\ttfamily\tiny\color{gray},
-showspaces=false,
-showstringspaces=false,
-stepnumber=1,
-stringstyle=\color[rgb]{0,.5,0},
-tabsize=4,
-xleftmargin=.23in,
-emph={anova,apply,class,coef,colnames,colNames,colSums,dim,dcast,for,ggplot,head,if,ifelse,is.na,lapply,list.files,library,logLik,melt,plot,require,rowSums,sapply,setcolorder,setkey,str,summary,tapply},
-emphstyle=\color{blue}
-}
-
-%%%% packages %%%%%
-
-\usepackage[utf8]{inputenc}
-\usepackage[T1]{fontenc}
-\usepackage{lmodern}
-\usepackage{textcomp}
-\usepackage{color}
-\usepackage{enumerate}
-\usepackage{graphicx}
-\usepackage{grffile}
-\usepackage{wrapfig}
-\usepackage{rotating}
-\usepackage{longtable}
-\usepackage{multirow}
-\usepackage{multicol}
-\usepackage{changes}
-\usepackage{pdflscape}
-\usepackage{geometry}
-\usepackage[normalem]{ulem}
-\usepackage{amssymb}
-\usepackage{amsmath}
-\usepackage{amsfonts}
-\usepackage{dsfont}
-\usepackage{array}
-\usepackage{ifthen}
-\usepackage{hyperref}
-\usepackage{natbib}
-\geometry{innermargin=1.5in,outermargin=1.25in,vmargin=3cm}
-\linespread{1.4}
-\usepackage{epstopdf} % to be able to convert .eps to .pdf image files
-\renewcommand{\thefigure}{S\arabic{figure}}
-\renewcommand{\thetable}{S\arabic{table}}
-\renewcommand{\theequation}{S\arabic{equation}}
-\usepackage{caption}
-\usepackage[labelformat=simple]{subcaption}
-\renewcommand{\thesubfigure}{Study \Alph{subfigure}}
-\usepackage{booktabs}
-\usepackage{algorithm2e}
-\usepackage{amsthm}
-\usepackage{amsthm,dsfont,amsmath}
-\newtheorem{lemma}{Lemma}
-\newcommand{\Vn}{\mathbf{n}}
-\newcommand{\X}{X}
-\newcommand{\VX}{\boldsymbol{X}}
-\newcommand{\Y}{Y}
-\newcommand{\VY}{\boldsymbol{Y}}
-\newcommand{\Vy}{\boldsymbol{y}}
-\newcommand{\VZ}{\boldsymbol{Z}}
-\newcommand{\Veta}{\boldsymbol{\eta}}
-\newcommand{\Vvarepsilon}{\boldsymbol{\varepsilon}}
-\newcommand{\set}{\mathcal{S}}
-\newcommand{\Vmu}{\boldsymbol{\mu}}
-\newcommand{\Vxi}{\boldsymbol{\xi}}
-\newcommand{\param}{\theta}
-\newcommand{\paramHat}{\hat{\param}}
-\newcommand{\Vparam}{\boldsymbol{\param}}
-\newcommand{\VparamHat}{\boldsymbol{\paramHat}}
-\newcommand\Hessian{\mathcal{H}}
-\newcommand\Likelihood{\mathcal{L}}
-\newcommand\Information{\mathcal{I}}
-\newcommand\Score{\mathcal{U}}
-\newcommand\Hypothesis{\mathcal{H}}
-\newcommand\Real{\mathbb{R}}
-\newcommand\half{\frac{1}{2}}
-\newcommand\Ind[1]{\mathds{1}_{#1}}
-\newcommand\dpartial[2]{\frac{\partial #1}{\partial #2}}
-\newcommand\ddpartial[3]{\frac{\partial^2 #1}{\partial #2 \partial #3}}
-\newcommand\Esp{\mathbb{E}}
-\newcommand\Var{\mathbb{V}ar}
-\newcommand\Cov{\mathbb{C}ov}
-\newcommand\Gaus{\mathcal{N}}
-\newcommand\trans[1]{{#1}^\intercal}%\newcommand\trans[1]{{\vphantom{#1}}^\top{#1}}
-\newcommand{\independent}{\mathrel{\text{\scalebox{1.5}{$\perp\mkern-10mu\perp$}}}}
-\RequirePackage[makeroom]{cancel}
-\newcommand\Ccancelto[3][black]{\renewcommand\CancelColor{\color{#1}}\cancelto{#2}{#3}}
-\newcommand\Ccancel[2][black]{\renewcommand\CancelColor{\color{#1}}\cancel{#2}}
-\author{Brice Ozenne}
-\date{}
-\title{Likelihood, first and second order derivatives in a LVM}
-\hypersetup{
- colorlinks=true,
- citecolor=[rgb]{0,0.5,0},
- urlcolor=[rgb]{0,0,0.5},
- linkcolor=[rgb]{0,0,0.5},
- pdfauthor={Brice Ozenne},
- pdftitle={Likelihood, first and second order derivatives in a LVM},
- pdfkeywords={},
- pdfsubject={},
- pdfcreator={Emacs 25.2.1 (Org mode 9.0.4)},
- pdflang={English}
- }
-\begin{document}
-
-\maketitle
-In this document, we show the expression of the likelihood, its first
-two derivatives, the information matrix, and the first derivative of
-the information matrix.
-
-\section{Likelihood}
-\label{sec:org20faa29}
-
-At the individual level, the measurement and structural models can be written:
-\begin{align*}
-\VY_i &= \nu + \Veta_i \Lambda + \VX_i K + \Vvarepsilon_i \\
-\Veta_i &= \alpha + \Veta_i B + \VX_i \Gamma + \boldsymbol{\zeta}_i 
-\end{align*}
-\begin{tabular}{lll}
-with & \(\Sigma_{\epsilon}\)   &the variance-covariance matrix of the residuals \(\Vvarepsilon_i\)\\
-     & \(\Sigma_{\zeta}\) & the variance-covariance matrix of the residuals \(\boldsymbol{\zeta}_i\). \\
-\end{tabular}
-
-\bigskip
-
-By combining the previous equations, we can get an expression for
-\(\VY_i\) that does not depend on \(\Veta_i\):
-\begin{align*}
-\VY_i &= \nu + \left(\boldsymbol{\zeta}_i + \alpha + \VX_i \Gamma \right) (I-B)^{-1} \Lambda + \VX_i K + \Vvarepsilon_i 
-\end{align*}
-Since \(\Var[Ax] = A \Var[x] \trans{A}\) we have \(\Var[xA] =
-\trans{A} \Var[x] A\), we have the following expressions for the
-conditional mean and variance of \(\VY_i\):
-\begin{align*}
- \Vmu(\Vparam,\VX_i) &= E[\VY_i|\VX_i] = \nu + (\alpha + \VX_i \Gamma) (1-B)^{-1} \Lambda + \VX_i K \\
-\Omega(\Vparam) &= Var[\VY_i|\VX_i] = \Lambda^t (1-B)^{-t}  \Sigma_{\zeta} (1-B)^{-1} \Lambda + \Sigma_{\varepsilon} 
-\end{align*}
-
-\bigskip
-
-where \(\Vparam\) is the collection of all parameters. The
-log-likelihood can be written:
-\begin{align*}
-l(\Vparam|\VY,\VX) &= \sum_{i=1}^n l(\Vparam|\VY_i,\VX_i) \\
-&= \sum_{i=1}^{n} - \frac{p}{2} log(2\pi) - \frac{1}{2} log|\Omega(\Vparam)| - \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}
-\end{align*}
-
-\section{Partial derivative for the conditional mean and variance}
-\label{sec:orgef944c3}
-
-In the following, we denote by \(\delta_{\sigma \in \Sigma}\) the
-indicator matrix taking value 1 at the position of \(\sigma\) in the
-matrix \(\Sigma\). For instance:
-\begin{align*}
-\Sigma =
-\begin{bmatrix}
- \sigma_{1,1} & \sigma_{1,2} & \sigma_{1,3} \\
- \sigma_{1,2} & \sigma_{2,2} & \sigma_{2,3} \\
- \sigma_{1,3} & \sigma_{2,3} & \sigma_{3,3} \\
-\end{bmatrix}
- \qquad 
-\delta_{\sigma_{1,2} \in \Sigma} =
-\begin{bmatrix}
-0 & 1 & 0 \\
-1 & 0 & 0 \\
-0 & 0 & 0 \\
-\end{bmatrix}
-\end{align*}
-The same goes for \(\delta_{\lambda \in \Lambda}\), \(\delta_{b \in
-B}\), and \(\delta_{\psi \in \Psi}\). 
-
-\bigskip
-
-First order derivatives:
-\begin{align*}
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \nu} &= 1 \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial K} &= \VX_i \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \alpha} &= (1-B)^{-1}\Lambda \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \Gamma} &= \VX_i(1-B)^{-1}\Lambda \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \lambda} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{\lambda \in \Lambda} \\
- \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial b} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\Lambda \\
- &\\
- \frac{\partial \Omega(\Vparam)}{\partial \psi} &= \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda \\
- \frac{\partial \Omega(\Vparam)}{\partial \sigma} &= \delta_{\sigma \in \Sigma} \\
- \frac{\partial \Omega(\Vparam)}{\partial \lambda} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial \Omega(\Vparam)}{\partial b} &= \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda\\
-\end{align*}
-
-Second order derivatives:
-\begin{align*}
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \alpha \partial b} &= \delta_{\alpha} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \alpha \partial \lambda} &= \delta_{\alpha} (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \Gamma \partial b} &= \VX_i (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \Gamma \partial \lambda} &= \VX_i (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \lambda \partial b } &=  (\alpha + \VX_i \Gamma)(1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
- \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial b \partial b'} &= (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b' \in B}(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\Lambda \\
-& + (\alpha + \VX_i \Gamma)(1-B)^{-1}\delta_{b \in B}(1-B)^{-1}\delta_{b' \in B}(1-B)^{-1}\Lambda  \\
-& \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \psi \partial \lambda} &=  \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda  \\
-& + \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \delta_{\lambda \in \Lambda}  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \psi \partial b} &= \Lambda^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{\psi \in \Psi} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \lambda \partial b} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta^t_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta^t_{b \in B} (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda} \\
-& + \Lambda^t (1-B)^{-t}  \Psi (1-B)^{-1} \delta^t_{b \in B} (1-B)^{-1} \delta_{\lambda \in \Lambda}  \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial \lambda \partial \lambda'} &= \delta_{\lambda \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda' \in \Lambda} \\
-& + \delta_{\lambda' \in \Lambda}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{\lambda \in \Lambda}   \\
- \frac{\partial^2 \Omega(\Vparam)}{\partial b \partial b'} &= \Lambda^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \delta_{b' \in B}^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \Lambda \\
-& + \Lambda^t (1-B)^{-t} \Psi (1-B)^{-1} \delta_{b \in B} (1-B)^{-1} \delta_{b' \in B} (1-B)^{-1} \Lambda \\
-\end{align*}
-\section{First derivative: score}
-\label{sec:orga852a0f}
-The individual score is obtained by derivating the log-likelihood:
-\begin{align*}
-   \Score(\param|\VY_i,\VX_i) =& \dpartial{l_i(\Vparam|\VY_i,\VX_i)}{\param}\\
- =& - \frac{1}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param}\right) \\
- &+  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &+ \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}
-\end{align*}
-
-\section{Second derivative: Hessian and expected information}
-\label{SM:Information}
-The individual Hessian is obtained by derivating twice the
-log-likelihood:
-\begin{align*}
-   \Hessian_i(\param,\param') =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} + \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}\right) \\
- &+  \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}  \\
- &-  (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
- &+ \frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))} \\
-\end{align*}
-
-\clearpage
-
-Using that \(\Vmu(\param,\VX_i)\) and \(\Omega(\Vparam)\) are deterministic quantities,
-we can then take the expectation to obtain:
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \Vparam} + \Omega(\param)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}\right) \\
- &+  \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam)}{\param'}} \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param} \Omega(\Vparam)^{-1} \Ccancelto[red]{0}{\Esp\left[\trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right]}  \\
- &-  \Esp\left[(\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right] \\
- &+ \Esp \left[\frac{1}{2} (\VY_i-\Vmu(\Vparam,\VX_i)) \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Omega(\Vparam)^{-1} \trans{(\VY_i-\Vmu(\Vparam,\VX_i))}\right] \\
-\end{align*}
-
-The last two expectations can be re-written using that \(\Esp[\trans{x}Ax] = tr\left(A\Var[x]\right)+\trans{\Esp[x]}A\Esp[x]\):
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(-\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} + \Ccancel[red]{\Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'}}\right) \\
- &-  \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param'}} \\
- &- tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\left(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right]\right)} \right) \\
- &+ \Ccancel[red]{\frac{1}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param \partial \param'} \Ccancel[blue]{\Omega(\Vparam)^{-1}} \Ccancel[blue]{\trans{\left(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right]\right)}} \right)} \\
-\end{align*}
-where we have used that \(\Var\left[\VY_i-\Vmu(\Vparam,\VX_i)\right] =
-\Var\left[\VY_i|\VX_i\right] = \Omega(\Vparam)\). Finally we get:
-\begin{align*}
-\Esp\left[\Hessian_i(\param,\param')\right] =& -\frac{1}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param}\right) \\
- &-  \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-\end{align*}
-So we can deduce from the previous equation the expected information matrix:
-\begin{align*}
-\Information(\param,\param') =& \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \dpartial{\Omega(\Vparam)}{\param'} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param}\right) 
- + \sum_{i=1}^n \dpartial{\Vmu(\Vparam,\VX_i)}{\param} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}}
-\end{align*}
-
-\section{First derivatives of the information matrix}
-\label{sec:org27a8d69}
-\begin{align*}
-\frac{\partial \Information(\param,\param')}{\partial \param''} 
-=& - \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& + \frac{n}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial\param\partial\param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& - \frac{n}{2} tr\left(\Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param'}\right) \\
-& + \frac{n}{2} tr\left( \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial\param} \Omega(\Vparam)^{-1} \frac{\partial^2 \Omega(\Vparam)}{\partial \param' \partial \param''}\right) \\
-& + \sum_{i=1}^n \frac{\partial^2 \Vmu(\Vparam,\VX_i)}{\partial\param\partial\param''} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-& + \sum_{i=1}^n \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \trans{\ddpartial{\Vmu(\Vparam,\VX_i)}{\Vparam'}{\param''}} \\
-& - \sum_{i=1}^n \frac{\partial \Vmu(\Vparam,\VX_i)}{\partial \param} \Omega(\Vparam)^{-1} \frac{\partial \Omega(\Vparam)}{\partial \param''} \Omega(\Vparam)^{-1} \trans{\dpartial{\Vmu(\Vparam,\VX_i)}{\param'}} \\
-\end{align*}
-\end{document}
\ No newline at end of file
diff --git a/man/autoplot.modelsearch2.Rd b/man/autoplot.modelsearch2.Rd
index 0263549..cb1c8c2 100644
--- a/man/autoplot.modelsearch2.Rd
+++ b/man/autoplot.modelsearch2.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/methods-modelsearch2.R
+% Please edit documentation in R/modelsearch2-methods.R
 \name{autplot-modelsearch2}
 \alias{autplot-modelsearch2}
 \alias{autoplot.modelsearch2}
diff --git a/man/calcDistMax.Rd b/man/calcDistMax.Rd
index 02de12c..db1d2c1 100644
--- a/man/calcDistMax.Rd
+++ b/man/calcDistMax.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/calcDistMax.R
+% Please edit documentation in R/modelsearch2-calcDistMax.R
 \name{calcDistMax}
 \alias{calcDistMax}
 \alias{calcDistMaxIntegral}
diff --git a/man/calibrateType1.Rd b/man/calibrateType1.Rd
index 52624af..1976b74 100644
--- a/man/calibrateType1.Rd
+++ b/man/calibrateType1.Rd
@@ -98,7 +98,7 @@ Can also be \code{NULL}: in such a case the results are not exported.}
 
 \item{label.file}{[character] element to include in the file name.}
 
-\item{seed}{[integer, >0] seed value that will be set at the beginning of the simulation to enable eproducibility of the results.
+\item{seed}{[integer, >0] value that will be set before adjustment for multiple comparisons to ensure reproducible results.
 Can also be \code{NULL}: in such a case no seed is set.}
 
 \item{cpus}{[integer >0] the number of processors to use.
diff --git a/man/coef2-internal.Rd b/man/coef2-internal.Rd
deleted file mode 100644
index 702b1ca..0000000
--- a/man/coef2-internal.Rd
+++ /dev/null
@@ -1,41 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-nlme.R
-\name{coef2-internal}
-\alias{coef2-internal}
-\alias{.coef2}
-\alias{.coef2.lm}
-\alias{.coef2.gls}
-\alias{.coef2.lme}
-\title{Export Mean and Variance Coefficients}
-\usage{
-.coef2(object)
-
-\method{.coef2}{lm}(object)
-
-\method{.coef2}{gls}(object)
-
-\method{.coef2}{lme}(object)
-}
-\arguments{
-\item{object}{a \code{lm}, \code{gls} or \code{lme} object.}
-
-\item{name.Y}{[character] the name of the endogenous variable. Used to name certain variance parameters.}
-}
-\value{
-A numeric vector named with the names of the coefficient with three attributes:
-\itemize{
-\item mean.coef: the name of the mean coefficients.
-\item var.coef: the name of the variance coefficients.
-\item cor.coef:  the name of the correlation coefficients.
-}
-}
-\description{
-Export mean and variance coefficients
-from a \code{lm}, \code{gls}, or \code{lme} object.
-}
-\details{
-The variance coefficients that are exported are the residual variance of each outcome. 
-This is \eqn{\sigma^2} for the first one and \eqn{k^2 \sigma^2} for the remaining ones.
-}
-\concept{extractor}
-\keyword{internal}
diff --git a/man/coef2.Rd b/man/coef2.Rd
new file mode 100644
index 0000000..7b182c3
--- /dev/null
+++ b/man/coef2.Rd
@@ -0,0 +1,51 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-coef2.R
+\name{coef2}
+\alias{coef2}
+\alias{coef2.lvmfit}
+\title{Model Coefficients With Small Sample Correction}
+\usage{
+coef2(object, as.lava, ...)
+
+\method{coef2}{lvmfit}(object, as.lava = TRUE, ssc = lava.options()$ssc, ...)
+}
+\arguments{
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+}
+\value{
+A numeric vector named with the names of the coefficients.
+}
+\description{
+Extract the coefficients from a latent variable model.
+Similar to \code{lava::compare} but with small sample correction.
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the model coefficients.
+}
+\examples{
+#### simulate data ####
+set.seed(10)
+dW <- sampleRepeated(10, format = "wide")
+set.seed(10)
+dL <- sampleRepeated(10, format = "long")
+dL$time2 <- paste0("visit",dL$time)
+
+#### latent variable models ####
+e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+coef(e.lvm)
+coef2(e.lvm)
+coef2(e.lvm, as.lava = FALSE)
+}
+\seealso{
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/combination.Rd b/man/combination.Rd
index 0cdcdbe..0b55488 100644
--- a/man/combination.Rd
+++ b/man/combination.Rd
@@ -1,14 +1,16 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/skeleton.R
+% Please edit documentation in R/Utils.R
 \name{combination}
 \alias{combination}
 \alias{.combination}
 \title{Form all Unique Combinations Between two Vectors}
 \usage{
-.combination(...)
+.combination(..., levels = FALSE)
 }
 \arguments{
 \item{...}{[vectors] elements to be combined.}
+
+\item{levels}{[logical] should a label for each combination be output as an attribute named levels.}
 }
 \value{
 A matrix, each row being a different combination.
@@ -24,6 +26,8 @@ Form all unique combinations between two vectors (removing symmetric combination
 .combination(c(1:2,1:2),1:2)
 
 .combination(alpha = 1:2, beta = 3:4)
+.combination(alpha = 1:2, beta = 3:4, gamma = 1:4)
+.combination(alpha = 1:3, beta = 1:3, gamma = 1:3)
 
 }
 \keyword{internal}
diff --git a/man/combineFormula.Rd b/man/combineFormula.Rd
index fe00a64..42910e1 100644
--- a/man/combineFormula.Rd
+++ b/man/combineFormula.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-formula.R
+% Please edit documentation in R/Utils.R
 \name{combineFormula}
 \alias{combineFormula}
 \title{Combine formula}
diff --git a/man/compare2.Rd b/man/compare2.Rd
index 82e3d21..a890609 100644
--- a/man/compare2.Rd
+++ b/man/compare2.Rd
@@ -1,90 +1,101 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/compare2.R
+% Please edit documentation in R/sCorrect-compare2.R
 \name{compare2}
 \alias{compare2}
-\alias{compare2.lm}
-\alias{compare2.gls}
-\alias{compare2.lme}
 \alias{compare2.lvmfit}
-\alias{compare2.lm2}
-\alias{compare2.gls2}
-\alias{compare2.lme2}
 \alias{compare2.lvmfit2}
-\alias{.compare2}
-\title{Test Linear Hypotheses with small sample correction}
+\alias{compare.lvmfit2}
+\title{Test Linear Hypotheses With Small Sample Correction}
 \usage{
-compare2(object, df, bias.correct, ...)
-
-\method{compare2}{lm}(object, df = TRUE, bias.correct = TRUE, ...)
-
-\method{compare2}{gls}(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...)
-
-\method{compare2}{lme}(object, df = TRUE, bias.correct = TRUE, ...)
-
-\method{compare2}{lvmfit}(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...)
-
-\method{compare2}{lm2}(object, ...)
-
-\method{compare2}{gls2}(object, ...)
+compare2(
+  object,
+  linfct,
+  rhs,
+  robust,
+  cluster,
+  as.lava,
+  F.test,
+  conf.level,
+  ...
+)
 
-\method{compare2}{lme2}(object, ...)
+\method{compare2}{lvmfit}(
+  object,
+  linfct = NULL,
+  rhs = NULL,
+  robust = FALSE,
+  cluster = NULL,
+  as.lava = TRUE,
+  F.test = TRUE,
+  conf.level = 0.95,
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
+  ...
+)
 
-\method{compare2}{lvmfit2}(object, ...)
+\method{compare2}{lvmfit2}(
+  object,
+  linfct = NULL,
+  rhs = NULL,
+  robust = FALSE,
+  cluster = NULL,
+  as.lava = TRUE,
+  F.test = TRUE,
+  conf.level = 0.95,
+  ...
+)
 
-.compare2(
+\method{compare}{lvmfit2}(
   object,
-  par = NULL,
-  contrast = NULL,
-  null = NULL,
+  linfct = NULL,
   rhs = NULL,
   robust = FALSE,
   cluster = NULL,
-  df = object$sCorrect$args$df,
   as.lava = TRUE,
   F.test = TRUE,
-  level = 0.95
+  conf.level = 0.95,
+  ...
 )
 }
 \arguments{
-\item{object}{an object that inherits from lm/gls/lme/lvmfit.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{df}{[logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.}
+\item{linfct}{[matrix or vector of character] the linear hypotheses to be tested. Same as the argument \code{par} of \code{\link{createContrast}}.}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias? Argument passed to \code{sCorrect}.}
+\item{rhs}{[vector] the right hand side of the linear hypotheses to be tested.}
 
-\item{...}{[internal] only used by the generic method.}
+\item{robust}{[logical] should the robust standard errors be used instead of the model based standard errors?}
 
 \item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
 
-\item{par}{[vector of characters] expression defining the linear hypotheses to be tested.
-See the examples section.}
-
-\item{contrast}{[matrix] a contrast matrix defining the left hand side of the linear hypotheses to be tested.}
+\item{as.lava}{[logical] should the output be similar to the one return by \code{lava::compare}?}
 
-\item{null, rhs}{[vector] the right hand side of the linear hypotheses to be tested.}
+\item{F.test}{[logical] should a joint test be performed?}
 
-\item{robust}{[logical] should the robust standard errors be used instead of the model based standard errors?}
+\item{conf.level}{[numeric 0-1] level of the confidence intervals.}
 
-\item{as.lava}{[logical] should the output be similar to the one return by \code{lava::compare}?}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{F.test}{[logical] should a joint test be performed?}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
 
-\item{level}{[numeric 0-1] the confidence level of the confidence interval.}
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
 }
 \value{
 If \code{as.lava=TRUE} an object of class \code{htest}.
 Otherwise a \code{data.frame} object.
 }
 \description{
-Test Linear Hypotheses using a multivariate Wald statistic.
+Test Linear Hypotheses using Wald statistics in a latent variable model.
 Similar to \code{lava::compare} but with small sample correction.
 }
 \details{
-The \code{par} argument or the arguments \code{contrast} and \code{null} (or equivalenty \code{rhs})
-specify the set of linear hypotheses to be tested. They can be written:
+The \code{linfct} argument and \code{rhs} specify the set of linear hypotheses to be tested. They can be written:
 \deqn{
-  contrast * \theta = null
+  linfct * \theta = rhs
 }
 where \eqn{\theta} is the vector of the model coefficients. \cr
 The \code{par} argument must contain expression(s) involving the model coefficients.
@@ -94,10 +105,7 @@ A contrast matrix and the right hand side will be generated inside the function.
 When directly specified, the contrast matrix must contain as many columns as there are coefficients in the model (mean and variance coefficients).
 Each hypothesis correspond to a row in the contrast matrix. \cr
 
-The null vector should contain as many elements as there are row in the contrast matrix. \cr
-
-Argument rhs and null are equivalent.
-This redondance enable compatibility between \code{lava::compare}, \code{compare2}, \code{multcomp::glht}, and \code{glht2}.
+The rhs vector should contain as many elements as there are row in the contrast matrix. \cr
 }
 \examples{
 #### simulate data ####
@@ -107,40 +115,17 @@ categorical(mSim, labels = c("a","b","c")) <- ~X1
 transform(mSim, Id~Y) <- function(x){1:NROW(x)}
 df.data <- lava::sim(mSim, 1e2)
 
-#### with lm ####
-## direct use of compare2
-e.lm <- lm(Y~X1+X2, data = df.data)
-anova(e.lm)
-compare2(e.lm, par = c("X1b=0","X1c=0"))
-
-## or first compute the derivative of the information matrix
-sCorrect(e.lm) <- TRUE
-
-## and define the contrast matrix
-C <- createContrast(e.lm, par = c("X1b=0","X1c=0"), add.variance = TRUE)
-
-## run compare2
-compare2(e.lm, contrast = C$contrast, null = C$null)
-compare2(e.lm, contrast = C$contrast, null = C$null, robust = TRUE)
-
-#### with gls ####
-library(nlme)
-e.gls <- gls(Y~X1+X2, data = df.data, method = "ML")
-
-## first compute the derivative of the information matrix
-sCorrect(e.gls, cluster = 1:NROW(df.data)) <- TRUE
-
-compare2(e.gls, par = c("5*X1b+2*X2 = 0","(Intercept) = 0"))
-
 #### with lvm ####
 m <- lvm(Y~X1+X2)
 e.lvm <- estimate(m, df.data)
 
-compare2(e.lvm, par = c("-Y","Y~X1b+Y~X1c"))
-compare2(e.lvm, par = c("-Y","Y~X1b+Y~X1c"), robust = TRUE)
+compare2(e.lvm, linfct = c("Y~X1b","Y~X1c","Y~X2"))
+compare2(e.lvm, linfct = c("Y~X1b","Y~X1c","Y~X2"), robust = TRUE)
+
 }
 \seealso{
 \code{\link{createContrast}} to create contrast matrices. \cr
-\code{\link{sCorrect}} to pre-compute quantities for the small sample correction.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{inference}
+\keyword{smallSampleCorrection}
diff --git a/man/conditionalMoment.Rd b/man/conditionalMoment.Rd
deleted file mode 100644
index 24f32c3..0000000
--- a/man/conditionalMoment.Rd
+++ /dev/null
@@ -1,122 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/conditionalMoment.R
-\name{conditionalMoment}
-\alias{conditionalMoment}
-\alias{conditionalMoment.lm}
-\alias{conditionalMoment.gls}
-\alias{conditionalMoment.lme}
-\alias{conditionalMoment.lvm}
-\alias{conditionalMoment.lvmfit}
-\title{Prepare the Computation of score2}
-\usage{
-conditionalMoment(object, ...)
-
-\method{conditionalMoment}{lm}(
-  object,
-  data,
-  param,
-  name.endogenous,
-  first.order,
-  second.order,
-  ...
-)
-
-\method{conditionalMoment}{gls}(
-  object,
-  data,
-  formula,
-  param,
-  attr.param,
-  ref.group,
-  first.order,
-  second.order,
-  index.Omega,
-  vec.OmegaMat,
-  cluster,
-  n.cluster,
-  name.endogenous,
-  n.endogenous,
-  ...
-)
-
-\method{conditionalMoment}{lme}(object, attr.param, ...)
-
-\method{conditionalMoment}{lvm}(
-  object,
-  data,
-  first.order,
-  second.order,
-  name.endogenous,
-  name.latent,
-  ...
-)
-
-\method{conditionalMoment}{lvmfit}(object, data, param, first.order, second.order, usefit, ...)
-}
-\arguments{
-\item{object, x}{a latent variable model.}
-
-\item{...}{[internal] only used by the generic method or by the <- methods.}
-
-\item{data}{[data.frame] data set.}
-
-\item{param, p}{[numeric vector] the fitted coefficients.}
-
-\item{name.endogenous}{[character vector, optional] name of the endogenous variables}
-
-\item{second.order}{[logical] should the terms relative to the third derivative of the likelihood be be pre-computed?}
-
-\item{formula}{[formula] two-sided linear formula.}
-
-\item{attr.param}{[character vector] the type of each coefficient
-(e.g. mean or variance coefficient).}
-
-\item{ref.group}{[character vector] the levels of the variable defining the variance component in a generic covariance matrix.}
-
-\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
-
-\item{n.cluster}{[integer >0] the number of i.i.d. observations.}
-
-\item{n.endogenous}{[integer >0] the number of outcomes.}
-
-\item{name.latent}{[character vector, optional] name of the latent variables}
-
-\item{usefit, value}{[logical] If TRUE the coefficients estimated by the model are used to pre-compute quantities. Only for lvmfit objects.}
-}
-\description{
-Compute the conditional mean and variance,
-and their first and second derivative regarding the model parameters.
-}
-\details{
-For lvmfit objects, there are two levels of pre-computation:
-\itemize{
-\item a basic one that do no involve the model coefficient (\code{conditionalMoment.lvm}).
-\item an advanced one that require the model coefficients (\code{conditionalMoment.lvmfit}). 
-}
-}
-\examples{
-m <- lvm(Y1~eta,Y2~eta,Y3~eta)
-latent(m) <- ~eta
-
-d <- lava::sim(m,1e2)
-e <- estimate(m, d)
-
-## basic pre-computation
-res1 <- conditionalMoment(e, data = d,
-                         first.order = FALSE, second.order = FALSE,
-                         name.endogenous = endogenous(e),
-                         name.latent = latent(e), usefit = FALSE)
-res1$skeleton$Sigma
-
-## full pre-computation
-res2 <- conditionalMoment(e, param = coef(e), data = d,
-                         first.order = FALSE, second.order = FALSE,
-                         name.endogenous = endogenous(e),
-                         name.latent = latent(e), usefit = TRUE
-)
-res2$value$Sigma
-
-}
-\concept{derivative of the score equation}
-\concept{small sample inference}
-\keyword{internal}
diff --git a/man/confint2.Rd b/man/confint2.Rd
new file mode 100644
index 0000000..67d96cd
--- /dev/null
+++ b/man/confint2.Rd
@@ -0,0 +1,80 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Scorrect-confint2.R, R/Scorrect-model.tables.R
+\name{confint2}
+\alias{confint2}
+\alias{confint2.lvmfit}
+\alias{model.tables2}
+\title{Confidence Intervals With Small Sample Correction}
+\usage{
+confint2(object, robust, cluster, transform, as.lava, conf.level, ...)
+
+\method{confint2}{lvmfit}(
+  object,
+  robust = FALSE,
+  cluster = NULL,
+  transform = NULL,
+  as.lava = TRUE,
+  conf.level = 0.95,
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
+  ...
+)
+
+model.tables2(object, robust, cluster, transform, as.lava, conf.level, ...)
+}
+\arguments{
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{robust}{[logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.}
+
+\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
+
+\item{transform}{[function] transformation to be applied.}
+
+\item{as.lava}{[logical] when \code{TRUE} uses the same names as when using \code{stats::coef}.}
+
+\item{conf.level}{[numeric, 0-1] level of the confidence intervals.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
+}
+\value{
+A data.frame with a row per coefficient.
+
+A data.frame with a row per coefficient.
+}
+\description{
+Extract confidence intervals of the coefficients from a latent variable model.
+Similar to \code{lava::confint} but with small sample correction.
+
+Extract estimate, standard error, confidence intervals and p-values associated to each coefficient of a latent variable model.
+Similar to \code{lava::confint} but with small sample correction.
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
+}
+\examples{
+#### simulate data ####
+set.seed(10)
+dW <- sampleRepeated(10, format = "wide")
+set.seed(10)
+dL <- sampleRepeated(10, format = "long")
+dL$time2 <- paste0("visit",dL$time)
+
+#### latent variable models ####
+e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+confint(e.lvm)
+confint2(e.lvm)
+confint2(e.lvm, as.lava = FALSE)
+}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/contrast2name.Rd b/man/contrast2name.Rd
index 01268b3..693f7be 100644
--- a/man/contrast2name.Rd
+++ b/man/contrast2name.Rd
@@ -5,12 +5,14 @@
 \alias{.contrast2name}
 \title{Create Rownames for a Contrast Matrix}
 \usage{
-.contrast2name(contrast, null = NULL)
+.contrast2name(contrast, null = NULL, sep = c("[", "]"))
 }
 \arguments{
 \item{contrast}{[matrix] a contrast matrix defining the left hand side of the linear hypotheses to be tested.}
 
 \item{null}{[vector, optional] the right hand side of the linear hypotheses to be tested.}
+
+\item{sep}{[character of length 2, optional] character used in rownames to wrap the left hand side of the equation.}
 }
 \value{
 a character vector.
diff --git a/man/convFormulaCharacter.Rd b/man/convFormulaCharacter.Rd
index 2f50e2e..affce69 100644
--- a/man/convFormulaCharacter.Rd
+++ b/man/convFormulaCharacter.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-formula.R
+% Please edit documentation in R/Utils.R
 \name{convFormulaCharacter}
 \alias{convFormulaCharacter}
 \alias{formula2character}
diff --git a/man/createContrast.Rd b/man/createContrast.Rd
index 7e2329d..51ada36 100644
--- a/man/createContrast.Rd
+++ b/man/createContrast.Rd
@@ -3,56 +3,38 @@
 \name{createContrast}
 \alias{createContrast}
 \alias{createContrast.character}
-\alias{createContrast.lm}
-\alias{createContrast.gls}
-\alias{createContrast.lme}
 \alias{createContrast.lvmfit}
+\alias{createContrast.lvmfit2}
 \alias{createContrast.list}
 \alias{createContrast.mmm}
 \title{Create Contrast matrix}
 \usage{
 createContrast(object, ...)
 
-\method{createContrast}{character}(
-  object,
-  name.param,
-  diff.first = FALSE,
-  add.rowname = TRUE,
-  rowname.rhs = TRUE,
-  ...
-)
+\method{createContrast}{character}(object, ...)
 
-\method{createContrast}{lm}(object, par, add.variance, ...)
+\method{createContrast}{lvmfit}(object, linfct, ...)
 
-\method{createContrast}{gls}(object, par, add.variance, ...)
+\method{createContrast}{lvmfit2}(object, linfct, ...)
 
-\method{createContrast}{lme}(object, par, add.variance, ...)
+\method{createContrast}{list}(object, linfct = NULL, ...)
 
-\method{createContrast}{lvmfit}(object, par = NULL, var.test = NULL, ...)
-
-\method{createContrast}{list}(object, par = NULL, add.variance = NULL, var.test = NULL, ...)
-
-\method{createContrast}{mmm}(object, par = NULL, add.variance = NULL, var.test = NULL, ...)
+\method{createContrast}{mmm}(object, linfct = NULL, ...)
 }
 \arguments{
-\item{object}{a \code{ls.lvmfit} object.}
-
-\item{...}{[internal] Only used by the generic method.}
-
-\item{name.param}{[internal] the names of all the model coefficients.}
-
-\item{diff.first}{[logical] should the contrasts between the first and any of the other coefficients define the null hypotheses.}
-
-\item{add.rowname}{[internal] should a name be defined for each hypothesis.}
+\item{object}{a \code{lvmfit} object or a list of  a \code{lvmfit} objects.}
 
-\item{rowname.rhs}{should the right hand side of the null hypothesis be added to the name.}
-
-\item{par}{[vector of characters] expression defining the linear hypotheses to be tested. See the examples section.}
-
-\item{add.variance}{[logical] should the variance coefficients be considered as model coefficients?
-Required for lm, gls, and lme models.}
-
-\item{var.test}{[character] a regular expression that is used to identify the coefficients to be tested using \code{grep}. Each coefficient will be tested in a separate hypothesis. When this argument is used, the argument \code{par} is disregarded.}
+\item{...}{Argument to be passed to \code{.createContrast}:
+\itemize{
+\item diff.first [logical] should the contrasts between the first and any of the other coefficients define the null hypotheses.
+\item add.rowname [logical] add rownames to the contrast matrix and names to the right-hand side.
+\item rowname.rhs [logical] when naming the hypotheses, add the right-hand side (i.e. "X1-X2=0" instead of "X1-X2").
+\item sep [character vector of length2] character surrounding the left part of the row names.
+}}
+
+\item{linfct}{[vector of characters] expression defining the linear hypotheses to be tested.
+Can also be a regular expression (of length 1) that is used to identify the coefficients to be tested using \code{grep}.
+See the examples section.}
 }
 \value{
 A list containing
@@ -69,10 +51,7 @@ Returns a contrast matrix corresponding an object.
 The contrast matrix will contains the hypotheses in rows and the model coefficients in columns.
 }
 \details{
-One can initialize an empty contrast matrix setting the argument\code{par} to \code{character(0)}. \cr \cr
-
-When using \code{multcomp::glht} one should set the argument \code{add.variance} to \code{FALSE}. \cr
-When using \code{lavaSearch2::glht2} one should set the argument \code{add.variance} to \code{TRUE}.
+One can initialize an empty contrast matrix setting the argument\code{linfct} to \code{character(0)}. \cr \cr
 }
 \examples{
 ## Simulate data
@@ -94,20 +73,22 @@ lvmZ <- lava::estimate(lvm(c(Z1,Z2,Z3) ~ -1 + 1*eta, eta ~ -1 + Treatment),
                  data = df.data)
 
 ## Contrast matrix for a given model
-createContrast(lmX, par = "X~Age")
-createContrast(lmX, par = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"))
-createContrast(lmX, par = character(0))
+createContrast(lmX, linfct = "X~Age")
+createContrast(lmX, linfct = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"))
+createContrast(lmX, linfct = c("X~Age=0","X~Age+5*X~TreatmentSSRI=0"), sep = NULL)
+createContrast(lmX, linfct = character(0))
 
 ## Contrast matrix for the join model
 ls.lvm <- list(X = lmX, Y = lmY, Z = lvmZ)
-createContrast(ls.lvm, var.test = "Treatment", add.variance = FALSE)
-createContrast(ls.lvm, par = character(0), add.variance = FALSE)
+createContrast(ls.lvm, linfct = "TreatmentSSRI=0")
+createContrast(ls.lvm, linfct = "TreatmentSSRI=0", rowname.rhs = FALSE)
+createContrast(ls.lvm, linfct = character(0))
 
 ## Contrast for multigroup models
 m <- lava::lvm(Y~Age+Treatment)
 e <- lava::estimate(list(m,m), data = split(df.data, df.data$Gender))
 print(coef(e))
-createContrast(e, par = "Y~TreatmentSSRI@1 - Y~TreatmentSSRI@2 = 0")
-createContrast(e, par = "Y~TreatmentSSRI@2 - Y~TreatmentSSRI@1 = 0")
+createContrast(e, linfct = "Y~TreatmentSSRI@1 - Y~TreatmentSSRI@2 = 0")
+createContrast(e, linfct = "Y~TreatmentSSRI@2 - Y~TreatmentSSRI@1 = 0")
+
 }
-\concept{small sample inference}
diff --git a/man/dfSigma.Rd b/man/dfSigma.Rd
index 5c1e605..35dbdbe 100644
--- a/man/dfSigma.Rd
+++ b/man/dfSigma.Rd
@@ -1,21 +1,34 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/compare2.R
+% Please edit documentation in R/sCorrect-compare2.R
 \name{dfSigma}
 \alias{dfSigma}
 \title{Degree of Freedom for the Chi-Square Test}
 \usage{
-dfSigma(contrast, vcov, dVcov, keep.param)
+dfSigma(contrast, score, vcov, rvcov, dVcov, dRvcov, keep.param, type)
 }
 \arguments{
 \item{contrast}{[numeric vector] the linear combination of parameters to test}
 
-\item{vcov}{[numeric matrix] the variance-covariance matrix of the parameters.}
+\item{score}{[numeric matrix] the individual score for each parameter.}
 
-\item{dVcov}{[numeric array] the first derivative of the variance-covariance matrix of the parameters.}
+\item{vcov}{[numeric matrix] the model-based variance-covariance matrix of the parameters.}
+
+\item{rvcov}{[numeric matrix] the robust variance-covariance matrix of the parameters.}
+
+\item{dVcov}{[numeric array] the first derivative of the model-based variance-covariance matrix of the parameters.}
+
+\item{dRvcov}{[numeric array] the first derivative of the robust variance-covariance matrix of the parameters.}
 
 \item{keep.param}{[character vector] the name of the parameters with non-zero first derivative of their variance parameter.}
+
+\item{type}{[integer] 1 corresponds to the Satterthwaite approximation of the the degrees of freedom applied to the model-based variance,
+2 to the Satterthwaite approximation of the the degrees of freedom applied to the robust variance,
+3 to the approximation described in (Pan, 2002) section 2 and 3.1.}
 }
 \description{
 Computation of the degrees of freedom of the chi-squared distribution
 relative to the model-based variance
 }
+\references{
+Wei Pan and Melanie M. Wall, Small-sample adjustments in using the sandwich variance estiamtor in generalized estimating equations. Statistics in medicine (2002) 21:1429-1441.
+}
diff --git a/man/dfSigmaRobust.Rd b/man/dfSigmaRobust.Rd
deleted file mode 100644
index 80b5754..0000000
--- a/man/dfSigmaRobust.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/compare2.R
-\name{dfSigmaRobust}
-\alias{dfSigmaRobust}
-\title{Degree of Freedom for the Robust Chi-Square Test}
-\usage{
-dfSigmaRobust(contrast, vcov, rvcov, score)
-}
-\arguments{
-\item{contrast}{[numeric vector] the linear combination of parameters to test}
-
-\item{vcov}{[numeric matrix] the variance-covariance matrix of the parameters.}
-
-\item{rvcov}{[numeric matrix] the robust variance-covariance matrix of the parameters.}
-
-\item{score}{[numeric matrix] the individual score for each parameter.}
-}
-\description{
-Computation of the degrees of freedom of the chi-squared distribution
-relative to the robust-based variance
-}
-\details{
-When contrast is the identity matrix, this function compute the moments of the sandwich estimator
-and the degrees of freedom of the approximate t-test as described in (Pan, 2002) section 2 and 3.1.
-}
-\references{
-Wei Pan and Melanie M. Wall, Small-sample adjustments in using the sandwich variance estiamtor in generalized estimating equations. Statistics in medicine (2002) 21:1429-1441.
-}
diff --git a/man/dInformation2-internal.Rd b/man/dot-dinformation2-internal.Rd
similarity index 54%
rename from man/dInformation2-internal.Rd
rename to man/dot-dinformation2-internal.Rd
index 43c1c9a..b0417a5 100644
--- a/man/dInformation2-internal.Rd
+++ b/man/dot-dinformation2-internal.Rd
@@ -1,30 +1,34 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/information2.R
-\name{dInformation2-internal}
-\alias{dInformation2-internal}
+% Please edit documentation in R/sCorrect-dInformation2.R
+\name{.dinformation2-internal}
+\alias{.dinformation2-internal}
 \alias{.dInformation2}
 \title{Compute the First Derivative of the Expected Information Matrix}
 \usage{
 .dInformation2(
   dmu,
-  d2mu,
   dOmega,
+  d2mu,
   d2Omega,
-  Omega,
   OmegaM1,
-  n.corrected,
-  index.Omega,
+  missing.pattern,
+  unique.pattern,
+  name.pattern,
+  grid.3varD1,
+  grid.2meanD1.1varD1,
+  grid.2meanD2.1meanD1,
+  grid.2varD2.1varD1,
+  name.param,
   leverage,
   n.cluster,
-  name.param,
-  name.3deriv
+  weights
 )
 }
 \description{
 Compute the first derivative of the expected information matrix.
 }
 \details{
-\code{.dInformation2} will perform the computation individually when the
+\code{calc_dinformation} will perform the computation individually when the
 argument \code{index.Omega} is not null.
 }
 \keyword{internal}
diff --git a/man/effects2.Rd b/man/effects2.Rd
index 7589fa5..bcc2733 100644
--- a/man/effects2.Rd
+++ b/man/effects2.Rd
@@ -1,33 +1,83 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/effects2.R
+% Please edit documentation in R/sCorrect-effects2.R
 \name{effects2}
 \alias{effects2}
 \alias{effects2.lvmfit}
 \alias{effects2.lvmfit2}
-\title{Effects from a fitted model}
+\alias{effects.lvmfit2}
+\title{Effects Through Pathways With Small Sample Correction}
 \usage{
-effects2(object, link, ...)
+effects2(object, linfct, robust, cluster, conf.level, ...)
 
-\method{effects2}{lvmfit}(object, link, df = TRUE, bias.correct = TRUE, ...)
+\method{effects2}{lvmfit}(
+  object,
+  linfct,
+  robust = FALSE,
+  cluster = NULL,
+  conf.level = 0.95,
+  to = NULL,
+  from = NULL,
+  df = lava.options()$df,
+  ssc = lava.options()$ssc,
+  ...
+)
 
-\method{effects2}{lvmfit2}(object, link, ...)
+\method{effects2}{lvmfit2}(
+  object,
+  linfct,
+  robust = FALSE,
+  cluster = NULL,
+  conf.level = 0.95,
+  to = NULL,
+  from = NULL,
+  ...
+)
+
+\method{effects}{lvmfit2}(
+  object,
+  linfct,
+  robust = FALSE,
+  cluster = NULL,
+  conf.level = 0.95,
+  to = NULL,
+  from = NULL,
+  ...
+)
 }
 \arguments{
-\item{object}{an object that inherits from lvmfit.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{link}{[character vector] The path for which the effect should be assessed (e.g. \code{"A~B"}),
+\item{linfct}{[character vector] The path for which the effect should be assessed (e.g. \code{"A~B"}),
 i.e. the effect of the right variable (B) on the left variable (A).}
 
-\item{...}{[internal] only used by the generic method.}
+\item{robust}{[logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.}
+
+\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
+
+\item{conf.level}{[numeric, 0-1] level of the confidence intervals.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{df}{[logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.}
+\item{from, to}{alternative to argument \code{linfct}. See \code{lava::effects}.}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias? Argument passed to \code{sCorrect}.}
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+}
+\value{
+A data.frame with a row per path.
 }
 \description{
 Test whether a path in the latent variable model correspond to a null effect.
-Similar to \code{lava::effects} but with small sample correction.
-So far it only work for paths composed of two edges.
+Similar to \code{lava::effects} but with small sample correction (if any).
+So far it only work for a single path related two variable composed of one or two edges.
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
 }
-\concept{small sample inference}
+\concept{inference}
+\keyword{smallSampleCorrection}
diff --git a/man/estfun.Rd b/man/estfun.Rd
deleted file mode 100644
index 3b34396..0000000
--- a/man/estfun.Rd
+++ /dev/null
@@ -1,62 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/multcomp.R
-\name{estfun}
-\alias{estfun}
-\alias{estfun.lvmfit}
-\alias{estfun.gls}
-\alias{estfun.lme}
-\title{Extract Empirical Estimating Functions (lvmfit Object)}
-\usage{
-\method{estfun}{lvmfit}(x, ...)
-
-\method{estfun}{gls}(x, ...)
-
-\method{estfun}{lme}(x, ...)
-}
-\arguments{
-\item{x}{an \code{lvmfit} object.}
-
-\item{...}{arguments passed to methods.}
-}
-\description{
-Extract the empirical estimating functions of a lvmfit object.
-This function is for internal use but need to be public to enable its use by \code{multcomp::glht}.
-}
-\details{
-This function enables to use the \code{glht} function with lvmfit object.
-Otherwise when calling \code{multcomp:::vcov.mmm} then \code{sandwich::sandwich} and then \code{sandwich::meat}, \code{sandwich::meat} will complain that \code{estfun} is not defined for \code{lvmfit} objects.
-}
-\examples{
-library(multcomp)
-
-#### generative model ####
-mSim <- lvm(X ~ Age + 0.5*Treatment,
-            Y ~ Gender + 0.25*Treatment,
-            c(Z1,Z2,Z3) ~ eta, eta ~ 0.75*treatment,
-            Age[40:5]~1)
-latent(mSim) <- ~eta
-categorical(mSim, labels = c("placebo","SSRI")) <- ~Treatment
-categorical(mSim, labels = c("male","female")) <- ~Gender
-
-#### simulate data ####
-n <- 5e1
-set.seed(10)
-df.data <- lava::sim(mSim, n = n, latent = FALSE)
-
-#### fit separate models ####
-lmX <- lm(X ~ Age + Treatment, data = df.data)
-lvmY <- estimate(lvm(Y ~ Gender + Treatment), data = df.data)
-lvmZ <- estimate(lvm(c(Z1,Z2,Z3) ~ eta, eta ~ Treatment), 
-                 data = df.data)
-
-#### create mmm object #### 
-e.mmm <- mmm(X = lmX, Y = lvmY, Z = lvmZ)
-
-#### create contrast matrix ####
-resC <- createContrast(e.mmm, var.test = "Treatment", add.variance = FALSE)
-
-#### adjust for multiple comparisons ####
-e.glht <- glht(e.mmm, linfct = resC$mlf)
-summary(e.glht)
-}
-\concept{multiple comparison}
diff --git a/man/estimate2.Rd b/man/estimate2.Rd
index b5864a9..bb86833 100644
--- a/man/estimate2.Rd
+++ b/man/estimate2.Rd
@@ -1,29 +1,116 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/estimate2.R
+% Please edit documentation in R/sCorrect-estimate2.R, R/sCorrect-sscResiduals.R
 \name{estimate2}
 \alias{estimate2}
-\alias{.estimate2}
-\title{Compute Bias Corrected Quantities.}
+\alias{estimate2.lvm}
+\alias{estimate2.lvmfit}
+\alias{estimate2.list}
+\alias{estimate2.mmm}
+\alias{.sscResiduals}
+\title{Satterthwaite Correction and Small Sample Correction}
 \usage{
-.estimate2(
+estimate2(
   object,
-  epsilon,
-  n.cluster,
-  name.param,
-  name.endogenous,
-  name.meanparam,
-  name.varparam,
-  index.Omega,
-  adjust.Omega,
-  adjust.n,
-  tol,
-  n.iter,
-  trace
+  param,
+  data,
+  ssc,
+  df,
+  derivative,
+  hessian,
+  dVcov.robust,
+  iter.max,
+  tol.max,
+  trace,
+  ...
 )
+
+\method{estimate2}{lvm}(
+  object,
+  param = NULL,
+  data = NULL,
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
+  derivative = "analytic",
+  hessian = FALSE,
+  dVcov.robust = FALSE,
+  iter.max = 100,
+  tol.max = 1e-06,
+  trace = 0,
+  ...
+)
+
+\method{estimate2}{lvmfit}(
+  object,
+  param = NULL,
+  data = NULL,
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
+  derivative = "analytic",
+  hessian = FALSE,
+  dVcov.robust = FALSE,
+  iter.max = 100,
+  tol.max = 1e-06,
+  trace = 0,
+  ...
+)
+
+\method{estimate2}{list}(object, ...)
+
+\method{estimate2}{mmm}(object, ...)
+
+.sscResiduals(object, ssc, algorithm = "2")
+}
+\arguments{
+\item{object}{a \code{lvm} object.}
+
+\item{param}{[numeric vector, optional] the values of the parameters at which to perform the correction.}
+
+\item{data}{[data.frame, optional] the dataset relative to which the correction should be performed.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
+
+\item{derivative}{[character] should the first derivative of the information matrix be computed using a formula (\code{"analytic"}) or numerical derivative (\code{"numeric"})?}
+
+\item{hessian}{[logical] should the hessian be stored? Can be \code{NULL} to indicate only if computed during the small sample correction.}
+
+\item{dVcov.robust}{[logical] should the first derivative of robust variance-covariance matrix be stored?}
+
+\item{iter.max}{[integer >0] the maximum number of iterations used to estimate the bias correction.}
+
+\item{tol.max}{[numeric >0] the largest acceptable absolute difference between two succesive estimates of the bias correction.}
+
+\item{trace}{[logical] should the execution of the function be traced.}
+
+\item{...}{arguments passed to \code{lava::estimate} when using a \code{lvm} object.}
 }
 \description{
+Correct the bias of the ML estimate of the variance and compute the first derivative of the information matrix.
+
 Compute bias corrected residuals variance covariance matrix
 and information matrix.
 Also provides the leverage values and corrected sample size when adjust.n is set to TRUE.
 }
+\details{
+The argument \code{value} is equivalent to the argument \code{bias.correct} of the function \code{summary2}.
+}
+\examples{
+#### simulate data ####
+set.seed(10)
+dW <- sampleRepeated(10, format = "wide")
+
+#### latent variable model ####
+m.lvm <- lvm(Y1~X1+X2+Z1)
+
+e2.lvm <- estimate2(m.lvm, data = dW)
+summary2(e2.lvm)
+
+}
+\concept{estimator}
 \keyword{internal}
+\keyword{smallSampleCorrection}
diff --git a/man/evalInParentEnv.Rd b/man/evalInParentEnv.Rd
index 898ffed..cb657c4 100644
--- a/man/evalInParentEnv.Rd
+++ b/man/evalInParentEnv.Rd
@@ -1,16 +1,16 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/package-butils-evalInParentEnv.R
-\name{evalInParentEnv}
-\alias{evalInParentEnv}
-\title{Find Object in the Parent Environments}
-\usage{
-evalInParentEnv(name)
-}
-\arguments{
-\item{name}{[character] the name of the object to get.}
-}
-\description{
-Search an object in the parent environments. For internal use.
-}
-\concept{extractor}
-\keyword{internal}
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/evalInParentEnv.R
+\name{evalInParentEnv}
+\alias{evalInParentEnv}
+\title{Find Object in the Parent Environments}
+\usage{
+evalInParentEnv(name)
+}
+\arguments{
+\item{name}{[character] the name of the object to get.}
+}
+\description{
+Search an object in the parent environments. For internal use.
+}
+\concept{extractor}
+\keyword{internal}
diff --git a/man/extractData.Rd b/man/extractData.Rd
index 29bac61..b9e4529 100644
--- a/man/extractData.Rd
+++ b/man/extractData.Rd
@@ -1,57 +1,18 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/package-butils-extractData.R
+% Please edit documentation in R/sCorrect-extractData.R
 \name{extractData}
 \alias{extractData}
-\alias{extractData.lm}
-\alias{extractData.coxph}
-\alias{extractData.cph}
 \alias{extractData.lvmfit}
-\alias{extractData.gls}
-\alias{extractData.lme}
-\title{Extract Data From a Model}
+\title{Extract Data From a Latent Variable Model}
 \usage{
-extractData(object, design.matrix, as.data.frame, envir)
-
-\method{extractData}{lm}(
-  object,
-  design.matrix = FALSE,
-  as.data.frame = TRUE,
-  envir = environment()
-)
-
-\method{extractData}{coxph}(
-  object,
-  design.matrix = FALSE,
-  as.data.frame = TRUE,
-  envir = environment()
-)
-
-\method{extractData}{cph}(
-  object,
-  design.matrix = FALSE,
-  as.data.frame = TRUE,
-  envir = environment()
-)
+extractData(object, design.matrix, as.data.frame, envir, rm.na)
 
 \method{extractData}{lvmfit}(
   object,
   design.matrix = FALSE,
   as.data.frame = TRUE,
-  envir = environment()
-)
-
-\method{extractData}{gls}(
-  object,
-  design.matrix = FALSE,
-  as.data.frame = TRUE,
-  envir = environment()
-)
-
-\method{extractData}{lme}(
-  object,
-  design.matrix = FALSE,
-  as.data.frame = TRUE,
-  envir = environment()
+  envir = environment(),
+  rm.na = TRUE
 )
 }
 \arguments{
@@ -63,60 +24,32 @@ Otherwise the original data will be returned.}
 \item{as.data.frame}{[logical] should the output be converted into a \code{data.frame} object?}
 
 \item{envir}{[environment] the environment from which to search the data.}
+
+\item{rm.na}{[logical] should the lines containing missing values in the dataset be removed?}
 }
 \value{
 a dataset.
 }
 \description{
-Extract data from a model using \code{nlme::getData}, \code{riskRegression::coxDesign} or \code{model.frame}.. 
-If it fails it will try to extract it by its name according to \code{model$call$data}.
+Extract data from a latent variable model.
 }
 \examples{
+#### simulate data ####
 set.seed(10)
 n <- 101
 
-#### linear regression ####
 Y1 <- rnorm(n, mean = 0)
 Y2 <- rnorm(n, mean = 0.3)
 Id <- findInterval(runif(n), seq(0.1,1,0.1))
 data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
-           data.frame(Y=Y2,G="2",Id = Id)
+           data.frame(Y=Y2,G="2",Id = Id)       
            )
-m.lm <- lm(Y ~ G, data = data.df)
-a <- extractData(m.lm, design.matrix = TRUE)
-b <- extractData(m.lm, design.matrix = FALSE)
-
-library(nlme)
-m.gls <- gls(Y ~ G, weights = varIdent(form = ~ 1|Id), data = data.df)
-c <- extractData(m.gls)
-m.lme <- lme(Y ~ G, random = ~ 1|Id, data = data.df)
-d <- extractData(m.lme)
 
+#### latent variable model ####
 library(lava)
 e.lvm <- estimate(lvm(Y ~ G), data = data.df)
-e <- extractData(e.lvm)
-e <- extractData(e.lvm, design.matrix = TRUE)
-
-#### survival #### 
-library(survival)
+extractData(e.lvm)
+extractData(e.lvm, design.matrix = TRUE)
 
-\dontrun{
-  library(riskRegression) ## needs version >=1.4.3
-  dt.surv <- sampleData(n, outcome = "survival")
-  m.cox <- coxph(Surv(time, event) ~ X1 + X2, data = dt.surv, x = TRUE, y = TRUE)
-  f <- extractData(m.cox, design.matrix = FALSE)
-  f <- extractData(m.cox, design.matrix = TRUE)
-  m.cox <- coxph(Surv(time, event) ~ strata(X1) + X2, data = dt.surv, x = TRUE, y = TRUE)
-  f <- extractData(m.cox, design.matrix = TRUE)
-}
-
-#### nested fuuctions ####
-fct1 <- function(m){
-   fct2(m)
-}
-fct2 <- function(m){ 
-   extractData(m)
-}
-g <- fct1(m.gls)
 }
 \concept{extractor}
diff --git a/man/gaussian_weight.Rd b/man/gaussian_weight.Rd
new file mode 100644
index 0000000..424aa81
--- /dev/null
+++ b/man/gaussian_weight.Rd
@@ -0,0 +1,95 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Objective_gaussian_weight.R
+\docType{data}
+\name{gaussian_weight}
+\alias{gaussian_weight}
+\alias{gaussian_weight.estimate.hook}
+\alias{gaussian_weight_method.lvm}
+\alias{gaussian_weight_logLik.lvm}
+\alias{gaussian_weight_objective.lvm}
+\alias{gaussian_weight_score.lvm}
+\alias{gaussian_weight_gradient.lvm}
+\alias{gaussian_weight_hessian.lvm}
+\title{Estimate LVM With Weights}
+\format{
+An object of class \code{character} of length 1.
+}
+\usage{
+gaussian_weight.estimate.hook(x, data, estimator, ...)
+
+gaussian_weight_method.lvm
+
+gaussian_weight_logLik.lvm(object, type = "cond", p, data, weights, ...)
+
+gaussian_weight_objective.lvm(x, ...)
+
+gaussian_weight_score.lvm(
+  x,
+  data,
+  p,
+  S,
+  n,
+  mu = NULL,
+  weights = NULL,
+  debug = FALSE,
+  reindex = FALSE,
+  mean = TRUE,
+  constrain = TRUE,
+  indiv = FALSE,
+  ...
+)
+
+gaussian_weight_gradient.lvm(...)
+
+gaussian_weight_hessian.lvm(x, p, n, weights = NULL, ...)
+}
+\arguments{
+\item{x, object}{A latent variable model}
+
+\item{data}{dataset}
+
+\item{estimator}{name of the estimator to be used}
+
+\item{...}{passed to lower level functions.}
+
+\item{type}{must be "cond"}
+
+\item{p}{parameter value}
+
+\item{weights}{weight associated to each iid replicate.}
+
+\item{S}{empirical variance-covariance matrix between variable}
+
+\item{n}{number of iid replicates}
+
+\item{mu}{empirical mean}
+
+\item{debug, reindex, mean, constrain, indiv}{additional arguments not used}
+}
+\description{
+Estimate LVM with weights.
+}
+\examples{
+#### linear regression with weights ####
+
+## data
+df <- data.frame(Y = c(1,2,2,1,2),
+                 X = c(1,1,2,2,2),
+                 missing = c(0,0,0,0,1),
+                 weights = c(1,1,2,1,NA))
+
+## using lm
+e.lm.GS <- lm(Y~X, data = df)
+e.lm.test <- lm(Y~X, data = df[df$missing==0,], weights = df[df$missing==0,"weights"])
+
+## using lvm
+m <- lvm(Y~X)
+e.GS <- estimate(m, df)
+## e.lava.test <- estimate(m, df[df$missing==0,], weights = df[df$missing==0,"weights"])
+## warnings!!
+e.test <- estimate(m, data = df[df$missing==0,],
+                   weights = df[df$missing==0,"weights"],
+                   estimator = "gaussian_weight")
+
+}
+\keyword{datasets}
diff --git a/man/getCluster2-internal.Rd b/man/getCluster2-internal.Rd
deleted file mode 100644
index 23267b9..0000000
--- a/man/getCluster2-internal.Rd
+++ /dev/null
@@ -1,37 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-nlme.R
-\name{getCluster2-internal}
-\alias{getCluster2-internal}
-\alias{.getCluster2}
-\alias{.getCluster2.gls}
-\alias{.getCluster2.lme}
-\title{Reconstruct the Cluster Variable from a nlme Model}
-\usage{
-.getCluster2(object, ...)
-
-\method{.getCluster2}{gls}(object, cluster, data, ...)
-
-\method{.getCluster2}{lme}(object, ...)
-}
-\arguments{
-\item{object}{a \code{gls} or \code{lme} object.}
-
-\item{...}{[internal] Only used by the generic method.}
-
-\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.
-Only required for \code{gls} models with no correlation argument.}
-
-\item{data}{[data.frame] the data set.}
-}
-\value{
-A list containing:
-\itemize{
-\item cluster: the cluster index for each observation.
-\item n.cluster: the number of clusters.
-}
-}
-\description{
-Reconstruct the cluster variable from a nlme model.
-}
-\concept{extractor}
-\keyword{internal}
diff --git a/man/getIndexOmega.Rd b/man/getIndexOmega.Rd
new file mode 100644
index 0000000..52665fb
--- /dev/null
+++ b/man/getIndexOmega.Rd
@@ -0,0 +1,42 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-getIndexOmega.R
+\name{getIndexOmega}
+\alias{getIndexOmega}
+\alias{.getIndexOmega}
+\alias{.getIndexOmega.lvm}
+\alias{.getIndexOmega.lvmfit}
+\title{Identify the Endogenous Variables}
+\usage{
+.getIndexOmega(object, data, ...)
+
+\method{.getIndexOmega}{lvm}(object, data, ...)
+
+\method{.getIndexOmega}{lvmfit}(object, data, ...)
+}
+\arguments{
+\item{object}{a \code{lvmfit} object.}
+
+\item{data}{dataset.}
+
+\item{...}{[internal] Only used by the generic method.}
+}
+\description{
+Identify the endogenous variables, i.e., returns a vector with length the number of observations,
+whose values are the index of the repetitions.
+}
+\examples{
+\dontrun{
+#### simulate data ####
+set.seed(10)
+dW <- sampleRepeated(10, format = "wide")
+set.seed(10)
+dL <- sampleRepeated(10, format = "long")
+dL$time2 <- paste0("visit",dL$time)
+
+#### lvm model ####
+e.lvm <- estimate(lvm(c(Y1,Y2,Y3) ~ 1*eta + X1, eta ~ Z1), data = dW)
+## lavaSearch2:::.getIndexOmega(e.lvm, data = dW)
+}
+}
+\concept{extractor}
+\keyword{internal}
diff --git a/man/getIndexOmega2-internal.Rd b/man/getIndexOmega2-internal.Rd
deleted file mode 100644
index 8de38a3..0000000
--- a/man/getIndexOmega2-internal.Rd
+++ /dev/null
@@ -1,62 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-nlme.R
-\name{getIndexOmega2-internal}
-\alias{getIndexOmega2-internal}
-\alias{.getIndexOmega2}
-\alias{.getIndexOmega2.gls}
-\alias{.getIndexOmega2.lme}
-\title{Extract the name of the endogenous variables}
-\usage{
-.getIndexOmega2(object, ...)
-
-\method{.getIndexOmega2}{gls}(
-  object,
-  param,
-  attr.param,
-  name.Y,
-  cluster,
-  levels.cluster,
-  data
-)
-
-\method{.getIndexOmega2}{lme}(
-  object,
-  param,
-  attr.param,
-  name.Y,
-  cluster,
-  levels.cluster,
-  data
-)
-}
-\arguments{
-\item{object}{a \code{gls} or \code{lme} object.}
-
-\item{...}{[internal] Only used by the generic method.}
-
-\item{param}{[numeric vector] the mean and variance coefficients.}
-
-\item{attr.param}{[character vector] the type of each coefficients (mean or variance).}
-
-\item{name.Y}{[character] name of the endogenous variable.}
-
-\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.
-Only required for \code{gls} models with no correlation argument.}
-
-\item{data}{[data.frame] the data set.}
-}
-\value{
-A list containing:
-\itemize{
-\item index.Omega: [list of integer vector] For each cluster of observations,
-the index of the endogenous variable relative to each observation.
-\item n.endogenous: [integer] the number of endogenous variables.
-\item name.endogenous: [character vector] the name of the endogenous variables.
-\item ref.group: [character vector] the levels of the variable defining the variance component in a generic covariance matrix.
-}
-}
-\description{
-Extract the name of the endogenous variables from a nlme model.
-}
-\concept{extractor}
-\keyword{internal}
diff --git a/man/getNewLink.Rd b/man/getNewLink.Rd
index 1c90992..b557d81 100644
--- a/man/getNewLink.Rd
+++ b/man/getNewLink.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/methods-modelsearch2.R
+% Please edit documentation in R/modelsearch2-methods.R
 \name{getNewLink}
 \alias{getNewLink}
 \alias{getNewLink.modelsearch2}
diff --git a/man/getNewModel.Rd b/man/getNewModel.Rd
index 9b4ccac..476ff1e 100644
--- a/man/getNewModel.Rd
+++ b/man/getNewModel.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/methods-modelsearch2.R
+% Please edit documentation in R/modelsearch2-methods.R
 \name{getNewModel}
 \alias{getNewModel}
 \alias{getNewModel.modelsearch2}
diff --git a/man/getStep.Rd b/man/getStep.Rd
index 0c7a72c..2e74a53 100644
--- a/man/getStep.Rd
+++ b/man/getStep.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/methods-modelsearch2.R
+% Please edit documentation in R/modelsearch2-methods.R
 \name{getStep}
 \alias{getStep}
 \alias{getStep.modelsearch2}
diff --git a/man/getVarCov2-internal.Rd b/man/getVarCov2-internal.Rd
deleted file mode 100644
index ffd100a..0000000
--- a/man/getVarCov2-internal.Rd
+++ /dev/null
@@ -1,46 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-nlme.R
-\name{getVarCov2-internal}
-\alias{getVarCov2-internal}
-\alias{.getVarCov2}
-\alias{.getVarCov2.gls}
-\alias{.getVarCov2.lme}
-\title{Reconstruct the Marginal Variance Covariance Matrix from a nlme Model}
-\usage{
-.getVarCov2(object, ...)
-
-\method{.getVarCov2}{gls}(
-  object,
-  param,
-  attr.param,
-  name.endogenous,
-  n.endogenous,
-  ref.group,
-  ...
-)
-
-\method{.getVarCov2}{lme}(object, param, attr.param, ...)
-}
-\arguments{
-\item{object}{a \code{gls} or \code{lme} object}
-
-\item{...}{[internal] Only used by the generic method.}
-
-\item{param}{[numeric vector] the mean and variance coefficients.}
-
-\item{attr.param}{[character vector] the type of each coefficients (mean or variance).}
-
-\item{name.endogenous}{[character vector] name of each repetition of the endogenous variable.}
-
-\item{n.endogenous}{[integer >0] number of repetitions of the endogenous variable.}
-
-\item{ref.group}{[character vector] the levels of the variable defining the variance component in a generic covariance matrix.}
-}
-\value{
-[matrix] the marginal variance covariance matrix for a full sample.
-}
-\description{
-Reconstruct the marginal variance covariance matrix from a nlme model.
-}
-\concept{extractor}
-\keyword{internal}
diff --git a/man/getVarCov2.Rd b/man/getVarCov2.Rd
index 539f789..17c98d3 100644
--- a/man/getVarCov2.Rd
+++ b/man/getVarCov2.Rd
@@ -1,104 +1,53 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/getVarCov2.R
+% Please edit documentation in R/sCorrect-getVarCov2.R
 \name{getVarCov2}
 \alias{getVarCov2}
-\alias{getVarCov2.gls}
-\alias{getVarCov2.lme}
 \alias{getVarCov2.lvmfit}
-\title{Reconstruct the Conditional Variance Covariance Matrix}
+\alias{getVarCov2.lvmfit2}
+\title{Residual Variance-Covariance Matrix With Small Sample Correction.}
 \usage{
 getVarCov2(object, ...)
 
-\method{getVarCov2}{gls}(object, data = NULL, cluster, ...)
+\method{getVarCov2}{lvmfit}(object, ssc = lava.options()$ssc, ...)
 
-\method{getVarCov2}{lme}(object, data = NULL, cluster, ...)
-
-\method{getVarCov2}{lvmfit}(object, data = NULL, param = NULL, ...)
+\method{getVarCov2}{lvmfit2}(object, ...)
 }
 \arguments{
-\item{object}{a \code{gls} or \code{lme} object}
-
-\item{...}{[internal] only used by the generic method.}
-
-\item{data}{[data.frame] the data set.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{param}{[numeric vector] values for the model parameters.}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
 }
 \value{
-A list containing the residual variance-covariance matrix in the element Omega.
+A matrix with as many rows and column as the number of endogenous variables
 }
 \description{
-Reconstruct the conditional variance covariance matrix from a nlme or lvm model.
-Only compatible with specific correlation and variance structure.
+Reconstruct the residual variance-covariance matrix from a latent variable model. 
+It is similar to \code{nlme::getVarCov} but with small sample correction.
 }
 \details{
-The compound symmetry variance-covariance matrix in a gls model is of the form:
-\tabular{cccc}{
-\eqn{\Sigma =} \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \rho} \tab \eqn{\sigma^2 \rho} \cr
-\tab . \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \rho} \cr
-\tab . \tab . \tab \eqn{\sigma^2}
-}
-
-The unstructured variance-covariance matrix in a gls model is of the form:
- \tabular{cccc}{
-\eqn{\Sigma =} \tab \eqn{\sigma^2} \tab \eqn{\sigma^2 \sigma_2 \rho_{1,2}} \tab \eqn{\sigma^2 \sigma_3 \rho_{1,3}} \cr
-\tab . \tab \eqn{\sigma^2 \sigma_2^2} \tab \eqn{\sigma^2 \sigma_2 \sigma_3 \rho_{2,3}} \cr
-\tab . \tab . \tab \eqn{\sigma^2 \sigma_3^2}
-}
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the residuals.
 }
 \examples{
-
-## simulate data 
-library(nlme)
-n <- 5e1
-mSim <- lvm(c(Y1~1*eta,Y2~1*eta,Y3~1*eta,eta~G))
-latent(mSim) <- ~eta
-transform(mSim,Id~Y1) <- function(x){1:NROW(x)}
+#### simulate data ####
 set.seed(10)
-dW <- lava::sim(mSim,n,latent = FALSE)
-dW <- dW[order(dW$Id),,drop=FALSE]
-dL <- reshape2::melt(dW,id.vars = c("G","Id"), variable.name = "time")
-dL <- dL[order(dL$Id),,drop=FALSE]
-dL$Z1 <- rnorm(NROW(dL))
-dL$time.num <- as.numeric(as.factor(dL$time))
-
-#### iid model #### 
-e1.gls <- nlme::gls(Y1 ~ G, data = dW, method = "ML")
-getVarCov2(e1.gls, cluster = 1:n)$Omega
-
-#### heteroschedasticity ####
-dW$group <- rbinom(n, size = 1, prob = 1/2)
-dW$repetition <- as.numeric(as.factor(dW$group))
-e2a.gls <- nlme::gls(Y1 ~ G, data = dW, method = "ML",
-                    weights = varIdent(form =~ repetition|group))
-getVarCov2(e2a.gls, cluster = 1:n)$Omega
-
-
-e2b.gls <- nlme::gls(value ~ 0+time + time:G,
-                   weight = varIdent(form = ~ time.num|time),
-                   data = dL, method = "ML")
-getVarCov2(e2b.gls, cluster = "Id")$Omega
-
-#### compound symmetry ####
-e3.gls <- nlme::gls(value ~ time + G,
-                   correlation = corCompSymm(form = ~1| Id),
-                   data = dL, method = "ML")
-getVarCov2(e3.gls)$Omega
+n <- 101
 
-#### unstructured ####
-e4.gls <- nlme::gls(value ~ time,
-                    correlation = corSymm(form = ~time.num| Id),
-                    weight = varIdent(form = ~ 1|time),
-                    data = dL, method = "ML")
-getVarCov2(e4.gls)$Omega
+Y1 <- rnorm(n, mean = 0)
+Y2 <- rnorm(n, mean = 0.3)
+Id <- findInterval(runif(n), seq(0.1,1,0.1))
+data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
+           data.frame(Y=Y2,G="2",Id = Id)
+           )
 
-#### lvm model ####
-m <- lvm(c(Y1~1*eta,Y2~1*eta,Y3~1*eta,eta~G))
-latent(m) <- ~eta
-e <- estimate(m, dW)
-getVarCov2(e)
+#### latent variable models ####
+library(lava)
+e.lvm <- estimate(lvm(Y ~ G), data = data.df)
+getVarCov2(e.lvm)
 
 }
 \concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/glht2.Rd b/man/glht2.Rd
index ff39f17..1765ea6 100644
--- a/man/glht2.Rd
+++ b/man/glht2.Rd
@@ -1,55 +1,59 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/multcomp.R
+% Please edit documentation in R/sCorrect-glht2.R
 \name{glht2}
 \alias{glht2}
 \alias{glht2.lvmfit}
+\alias{glht2.lvmfit2}
 \alias{glht2.mmm}
-\title{General Linear Hypothesis}
+\alias{glht.lvmfit2}
+\title{General Linear Hypothesis Testing With Small Sample Correction}
 \usage{
-glht2(model, linfct, rhs, bias.correct, df, robust, cluster)
+glht2(object, ...)
 
 \method{glht2}{lvmfit}(
-  model,
+  object,
   linfct,
-  rhs = 0,
-  bias.correct = TRUE,
-  df = TRUE,
+  rhs = NULL,
   robust = FALSE,
-  cluster = NULL
+  cluster = NULL,
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
+  ...
 )
 
-\method{glht2}{mmm}(
-  model,
-  linfct,
-  rhs = 0,
-  bias.correct = TRUE,
-  df = TRUE,
-  robust = FALSE,
-  cluster = NULL
-)
+\method{glht2}{lvmfit2}(object, linfct, rhs = NULL, robust = FALSE, cluster = NULL, ...)
+
+\method{glht2}{mmm}(object, linfct, rhs = 0, robust = FALSE, cluster = NULL, ...)
+
+\method{glht}{lvmfit2}(model, linfct, rhs = NULL, robust = FALSE, cluster = NULL, ...)
 }
 \arguments{
-\item{model}{a \code{lvmfit} or \code{mmm} object.
-The \code{mmm} object can only contain lm/gls/lme/lvmfit objects.}
+\item{object, model}{a \code{lvmfit}, \code{lvmfit2}, or \code{mmm} object.}
+
+\item{...}{[logical] arguments passed to lower level methods.}
 
 \item{linfct}{[matrix or vector of character] the linear hypotheses to be tested. Same as the argument \code{par} of \code{\link{createContrast}}.}
 
 \item{rhs}{[vector] the right hand side of the linear hypotheses to be tested.}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias?}
-
-\item{df}{[logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?}
-
 \item{robust}{[logical] should robust standard error be used? 
 Otherwise rescale the influence function with the standard error obtained from the information matrix.}
 
 \item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
 }
 \value{
 A \code{glht} object.
 }
 \description{
-Test general linear hypotheses and across latent variable models with small sample corrections.
+Test linear hypotheses on coefficients from a latent variable models with small sample corrections.
 }
 \details{
 Whenever the argument linfct is not a matrix, it is passed to the function \code{createContrast} to generate the contrast matrix and, if not specified, rhs. \cr \cr
@@ -76,25 +80,25 @@ summary(glht2(e.lvm, linfct = c("Y1~E + Y1","Y1")))
 
 #### Inference on separate models ####
 ## fit separate models
-lmX <- lm(Z1 ~ E, data = df.data)
+lvmX <- estimate(lvm(Z1 ~ E), data = df.data)
 lvmY <- estimate(lvm(Z2 ~ E + Age), data = df.data)
 lvmZ <- estimate(lvm(c(Y1,Y2,Y3) ~ eta, eta ~ E), 
                  data = df.data)
 
 #### create mmm object #### 
-e.mmm <- mmm(X = lmX, Y = lvmY, Z = lvmZ)
+e.mmm <- mmm(X = lvmX, Y = lvmY, Z = lvmZ)
 
 #### create contrast matrix ####
-resC <- createContrast(e.mmm, var.test = "E", add.variance = TRUE)
+resC <- createContrast(e.mmm, linfct = "E")
 
 #### adjust for multiple comparisons ####
-e.glht2 <- glht2(e.mmm, linfct = resC$contrast, df = FALSE)
+e.glht2 <- glht2(e.mmm, linfct = c(X="E"), df = FALSE)
 summary(e.glht2)
 
 }
 \seealso{
 \code{\link{createContrast}} to create contrast matrices. \cr
-\code{\link{sCorrect}} to pre-compute quantities for the small sample correction.
+\code{\link{estimate2}} to pre-compute quantities for the small sample correction.
 }
 \concept{multiple comparison}
 \concept{multiple comparisons}
diff --git a/man/hessian2-internal.Rd b/man/hessian2-internal.Rd
new file mode 100644
index 0000000..5b70c29
--- /dev/null
+++ b/man/hessian2-internal.Rd
@@ -0,0 +1,34 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-hessian2.R
+\name{hessian2-internal}
+\alias{hessian2-internal}
+\alias{.hessian2}
+\title{Compute the Hessian Matrix From the Conditional Moments}
+\usage{
+.hessian2(
+  dmu,
+  dOmega,
+  d2mu,
+  d2Omega,
+  epsilon,
+  OmegaM1,
+  missing.pattern,
+  unique.pattern,
+  name.pattern,
+  grid.mean,
+  grid.var,
+  grid.hybrid,
+  name.param,
+  leverage,
+  n.cluster,
+  weights
+)
+}
+\description{
+Compute the Hessian matrix from the conditional moments.
+}
+\details{
+\code{calc_hessian} will perform the computation individually when the
+argument \code{index.Omega} is not null.
+}
+\keyword{internal}
diff --git a/man/hessian2.Rd b/man/hessian2.Rd
new file mode 100644
index 0000000..f9d76f7
--- /dev/null
+++ b/man/hessian2.Rd
@@ -0,0 +1,68 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-hessian2.R
+\name{hessian2}
+\alias{hessian2}
+\alias{hessian2.lvmfit}
+\alias{hessian2.lvmfit2}
+\title{Hessian With Small Sample Correction.}
+\usage{
+hessian2(object, indiv, cluster, as.lava, ...)
+
+\method{hessian2}{lvmfit}(
+  object,
+  indiv = FALSE,
+  cluster = NULL,
+  as.lava = TRUE,
+  ssc = lava.options()$ssc,
+  ...
+)
+
+\method{hessian2}{lvmfit2}(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...)
+}
+\arguments{
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{indiv}{[logical] If \code{TRUE}, the hessian relative to each observation is returned. Otherwise the total hessian is returned.}
+
+\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
+
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+}
+\value{
+An array containing the second derivative of the likelihood relative to each sample (dim 3)
+and each pair of model coefficients (dim 1,2).
+}
+\description{
+Extract the hessian from a latent variable model, with small sample correction
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the hessian.
+}
+\examples{
+#### simulate data ####
+n <- 5e1
+p <- 3
+X.name <- paste0("X",1:p)
+link.lvm <- paste0("Y~",X.name)
+formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
+
+m <- lvm(formula.lvm)
+distribution(m,~Id) <- Sequence.lvm(0)
+set.seed(10)
+d <- lava::sim(m,n)
+
+#### latent variable models ####
+e.lvm <- estimate(lvm(formula.lvm),data=d)
+hessian2(e.lvm)
+
+}
+\seealso{
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+}
+\concept{small sample inference}
diff --git a/man/iid2.Rd b/man/iid2.Rd
index 4d4cfca..fd007cb 100644
--- a/man/iid2.Rd
+++ b/man/iid2.Rd
@@ -1,68 +1,54 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/iid2.R
+% Please edit documentation in R/sCorrect-iid2.R
 \name{iid2}
 \alias{iid2}
-\alias{iid2.lm}
-\alias{iid2.gls}
-\alias{iid2.lme}
 \alias{iid2.lvmfit}
-\alias{iid2.lm2}
-\alias{iid2.gls2}
-\alias{iid2.lme2}
 \alias{iid2.lvmfit2}
-\title{Extract corrected i.i.d. decomposition}
+\alias{iid.lvmfit2}
+\title{Influence Function With Small Sample Correction.}
 \usage{
 iid2(object, ...)
 
-\method{iid2}{lm}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{iid2}{gls}(
+\method{iid2}{lvmfit}(
   object,
+  robust = TRUE,
   cluster = NULL,
-  param = NULL,
-  data = NULL,
-  bias.correct = TRUE,
+  as.lava = TRUE,
+  ssc = lava.options()$ssc,
   ...
 )
 
-\method{iid2}{lme}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{iid2}{lvmfit}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{iid2}{lm2}(object, cluster = NULL, param = NULL, data = NULL, robust = TRUE, ...)
-
-\method{iid2}{gls2}(object, cluster = NULL, param = NULL, data = NULL, robust = TRUE, ...)
-
-\method{iid2}{lme2}(object, cluster = NULL, param = NULL, data = NULL, robust = TRUE, ...)
+\method{iid2}{lvmfit2}(object, robust = TRUE, cluster = NULL, as.lava = TRUE, ...)
 
-\method{iid2}{lvmfit2}(object, cluster = NULL, data = NULL, ...)
+\method{iid}{lvmfit2}(x, robust = TRUE, cluster = NULL, as.lava = TRUE, ...)
 }
 \arguments{
-\item{object}{a linear model or a latent variable model}
+\item{object, x}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{...}{arguments to be passed to \code{sCorrect}.}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{param}{[named numeric vector] the fitted parameters.}
-
-\item{data}{[data.frame] the data set.}
-
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.}
+\item{robust}{[logical] if \code{FALSE}, the influence function is rescaled such its the squared sum equals the model-based standard error (instead of the robust standard error).
+Do not match the model-based correlation though.}
 
 \item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
 
-\item{robust}{[logical] if \code{FALSE}, the i.i.d. decomposition is rescaled such its the squared sum equals the model-based standard error (instead of the robust standard error).}
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients (\code{"none"}, \code{"residual"}, \code{"cox"}). Only relevant when using a \code{lvmfit} object.}
 }
 \value{
 A matrix containing the 1st order influence function relative to each sample (in rows)
 and each model coefficient (in columns).
 }
 \description{
-Extract corrected i.i.d. decomposition from a gaussian linear model.
+Extract the influence function from a latent variable model.
+It is similar to \code{lava::iid} but with small sample correction.
 }
 \details{
-If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the variance-covariance matrix.
 }
 \examples{
+#### simulate data ####
 n <- 5e1
 p <- 3
 X.name <- paste0("X",1:p)
@@ -74,24 +60,14 @@ distribution(m,~Id) <- Sequence.lvm(0)
 set.seed(10)
 d <- sim(m,n)
 
-## linear model
-e.lm <- lm(formula.lvm,data=d)
-iid.tempo <- iid2(e.lm, bias.correct = FALSE)
-range(iid.tempo[,1:4]-iid(e.lm))
-
-## latent variable model
+#### latent variable model ####
 e.lvm <- estimate(lvm(formula.lvm),data=d)
-iid.tempo <- iid2(e.lvm, bias.correct = FALSE)
-range(iid.tempo-iid(e.lvm))
-## difference due to the use of the observed info matrix vs. the expected one.
+iid.tempo <- iid2(e.lvm)
 
-## rescale i.i.d using model-based standard error
-iid.tempo <- iid2(e.lvm, robust = FALSE, bias.correct = FALSE)
-diag(crossprod(iid.tempo))-diag(vcov(e.lvm))
 
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{iid decomposition}
-\concept{small sample inference}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/iid2plot.Rd b/man/iid2plot.Rd
new file mode 100644
index 0000000..5c7d1d6
--- /dev/null
+++ b/man/iid2plot.Rd
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-iid2.R
+\name{iid2plot}
+\alias{iid2plot}
+\title{Display the i.i.d. Decomposition}
+\usage{
+iid2plot(object, param)
+}
+\arguments{
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{param}{[character] name of one of the model parameters.}
+}
+\description{
+Extract the i.i.d. decomposition and display it along with the corresponding coefficient.
+}
diff --git a/man/iidJack.Rd b/man/iidJack.Rd
index c96aa57..2b2563d 100644
--- a/man/iidJack.Rd
+++ b/man/iidJack.Rd
@@ -48,53 +48,15 @@ Extract iid decomposition (i.e. influence function) from model object.
 }
 \examples{
 n <- 20
-
-#### glm ####
-set.seed(10)
-m <- lvm(y~x+z)
-distribution(m, ~y+z) <- binomial.lvm("logit")
-d <- lava::sim(m,n)
-g <- glm(y~x+z,data=d,family="binomial")
-iid1 <- iidJack(g, cpus = 1)
-iid2 <- lava::iid(g)
-quantile(iid1-iid2)
-vcov(g)
-colSums(iid2^2)
-colSums(iid1^2)
-
-#### Cox model ####
-\dontrun{
-library(survival)
-data(Melanoma, package = "riskRegression")
-m <- coxph(Surv(time,status==1)~ici+age, data = Melanoma, x = TRUE, y = TRUE)
-
-## require riskRegression > 1.4.3
-if(utils::packageVersion("riskRegression") > "1.4.3"){
-library(riskRegression)
-iid1 <- iidJack(m)
-iid2 <- iidCox(m)$IFbeta
-
-apply(iid1,2,sd)
-
-print(iid2)
-
-apply(iid2,2,sd)
-  }
-}
-
-#### LVM ####
-\dontrun{
 set.seed(10)
-
 mSim <- lvm(c(Y1,Y2,Y3,Y4,Y5) ~ 1*eta)
 latent(mSim) <- ~eta
 categorical(mSim, K=2) <- ~G
 transform(mSim, Id ~ eta) <- function(x){1:NROW(x)}
 dW <- lava::sim(mSim, n, latent = FALSE)
-dL <- reshape2::melt(dW, id.vars = c("G","Id"),
-                     variable.name = "time", value.name = "Y")
-dL$time <- gsub("Y","",dL$time)
 
+#### LVM ####
+\dontrun{
 m1 <- lvm(c(Y1,Y2,Y3,Y4,Y5) ~ 1*eta)
 latent(m1) <- ~eta
 regression(m1) <- eta ~ G
@@ -109,15 +71,6 @@ apply(iid2,2,sd)
 quantile(iid2 - iid1)
 }
 
-#### lme ####
-\dontrun{
-library(nlme)
-e2 <- lme(Y~G+time, random = ~1|Id, weights = varIdent(form =~ 1|Id), data = dL)
-e2 <- lme(Y~G, random = ~1|Id, data = dL)
-
-iid3 <- iidJack(e2)
-apply(iid3,2,sd)
-}
 
 }
 \concept{iid decomposition}
diff --git a/man/information2-internal.Rd b/man/information2-internal.Rd
index 06674b3..849edfb 100644
--- a/man/information2-internal.Rd
+++ b/man/information2-internal.Rd
@@ -1,56 +1,30 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/information2.R
+% Please edit documentation in R/sCorrect-information2.R
 \name{information2-internal}
 \alias{information2-internal}
 \alias{.information2}
-\alias{.hessian2}
 \title{Compute the Expected Information Matrix From the Conditional Moments}
 \usage{
 .information2(
   dmu,
   dOmega,
-  Omega,
-  n.corrected,
-  index.Omega,
-  leverage,
-  n.cluster,
-  grid.meanparam,
-  n.grid.meanparam,
-  grid.varparam,
-  n.grid.varparam,
+  OmegaM1,
+  missing.pattern,
+  unique.pattern,
+  name.pattern,
+  grid.mean,
+  grid.var,
   name.param,
-  n.param
-)
-
-.hessian2(
-  dmu,
-  d2mu,
-  dOmega,
-  d2Omega,
-  Omega,
-  n.corrected,
-  index.Omega,
   leverage,
-  n.cluster,
-  grid.meanparam,
-  n.grid.meanparam,
-  grid.varparam,
-  n.grid.varparam,
-  name.param,
-  n.param,
-  residuals
+  weights = NULL,
+  n.cluster
 )
 }
 \description{
 Compute the expected information matrix from the conditional moments.
-
-Compute the Hessian matrix from the conditional moments.
 }
 \details{
-\code{.information2} will perform the computation individually when the
-argument \code{index.Omega} is not null.
-
-\code{.hessian} will perform the computation individually when the
+\code{calc_information} will perform the computation individually when the
 argument \code{index.Omega} is not null.
 }
 \keyword{internal}
diff --git a/man/information2.Rd b/man/information2.Rd
index e6e62c3..926089b 100644
--- a/man/information2.Rd
+++ b/man/information2.Rd
@@ -1,47 +1,43 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/information2.R
+% Please edit documentation in R/sCorrect-information2.R
 \name{information2}
 \alias{information2}
-\alias{information2.lm}
-\alias{information2.gls}
-\alias{information2.lme}
 \alias{information2.lvmfit}
-\alias{information2.lm2}
-\alias{information2.gls2}
-\alias{information2.lme2}
 \alias{information2.lvmfit2}
-\title{Extract The full Information Matrix}
+\alias{information.lvmfit2}
+\title{Expected Information With Small Sample Correction.}
 \usage{
-information2(object, ...)
+information2(object, as.lava, ssc, ...)
 
-\method{information2}{lm}(object, ...)
+\method{information2}{lvmfit}(object, as.lava = TRUE, ssc = lava.options()$ssc, ...)
 
-\method{information2}{gls}(object, ...)
+\method{information2}{lvmfit2}(object, as.lava = TRUE, ...)
 
-\method{information2}{lme}(object, ...)
-
-\method{information2}{lvmfit}(object, ...)
-
-\method{information2}{lm2}(object, ...)
-
-\method{information2}{gls2}(object, ...)
-
-\method{information2}{lme2}(object, ...)
-
-\method{information2}{lvmfit2}(object, ...)
+\method{information}{lvmfit2}(x, ...)
 }
 \arguments{
-\item{object}{a linear model or a latent variable model}
+\item{object, x}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
 
-\item{...}{arguments to be passed to \code{vcov2}.}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 }
 \value{
-A matrix.
+A matrix with as many rows and columns as the number of coefficients.
 }
 \description{
-Extract the full information matrix from a Gaussian linear model.
+Extract the expected information matrix from a latent variable model.
+Similar to \code{lava::information} but with small sample correction.
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the information matrix.
 }
 \examples{
+#### simulate data ####
 n <- 5e1
 p <- 3
 X.name <- paste0("X",1:p)
@@ -53,18 +49,18 @@ distribution(m,~Id) <- Sequence.lvm(0)
 set.seed(10)
 d <- lava::sim(m,n)
 
-## linear model
+#### linear models ####
 e.lm <- lm(formula.lvm,data=d)
-info.tempo <- vcov2(e.lm, bias.correct = TRUE)
-info.tempo[names(coef(e.lm)),names(coef(e.lm))] - vcov(e.lm)
 
-## latent variable model
+#### latent variable models ####
 e.lvm <- estimate(lvm(formula.lvm),data=d)
-vcov.tempo <- vcov2(e.lvm, bias.correct = FALSE)
-round(vcov.tempo \%*\% information(e.lvm), 5)
+information(e.lvm)
+information2(e.lvm)
+information2(e.lvm)[1:4,1:4] -  solve(vcov(e.lm))
 
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/leverage2.Rd b/man/leverage2.Rd
index cf2fc31..27f714f 100644
--- a/man/leverage2.Rd
+++ b/man/leverage2.Rd
@@ -1,50 +1,35 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/leverage.R
+% Please edit documentation in R/sCorrect-leverage2.R
 \name{leverage2}
 \alias{leverage2}
-\alias{leverage2.lm}
-\alias{leverage2.gls}
-\alias{leverage2.lme}
 \alias{leverage2.lvmfit}
-\alias{leverage2.lm2}
-\alias{leverage2.gls2}
-\alias{leverage2.lme2}
 \alias{leverage2.lvmfit2}
-\title{Extract Leverage Values}
+\title{Leverage With Small Sample Correction.}
 \usage{
-leverage2(object, ...)
+leverage2(object, format, ssc, ...)
 
-\method{leverage2}{lm}(object, param = NULL, data = NULL, ...)
+\method{leverage2}{lvmfit}(object, format = "wide", ssc = lava.options()$ssc, ...)
 
-\method{leverage2}{gls}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{lme}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{lvmfit}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{lm2}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{gls2}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{lme2}(object, param = NULL, data = NULL, ...)
-
-\method{leverage2}{lvmfit2}(object, param = NULL, data = NULL, ...)
+\method{leverage2}{lvmfit2}(object, format = "wide", ...)
 }
 \arguments{
-\item{object}{a \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} object.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{...}{arguments to be passed to \code{sCorrect}.}
+\item{format}{[character] Use \code{"wide"} to return the residuals in the wide format (one row relative to each sample).
+Otherwise use \code{"long"} to return the residuals in the long format.}
 
-\item{param}{[optional] the fitted parameters.}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
 
-\item{data}{[optional] the data set.}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 }
 \value{
 a matrix containing the leverage relative to each sample (in rows)
 and each endogenous variable (in column).
 }
 \description{
-Extract leverage values from a Gaussian linear model.
+Extract leverage values from a latent variable model, with small sample correction.
 }
 \details{
 The leverage are defined as the partial derivative of the fitted values with respect to the observations.
@@ -53,24 +38,17 @@ leverage_i = \frac{\partial \hat{Y}_i}{\partial Y_i}
 }
 See Wei et al. (1998). \cr \cr
 
-If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the residuals.
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the leverage.
 }
 \examples{
-## simulate data
+#### simulate data ####
 set.seed(10)
 m <- lvm(Y1~eta,Y2~eta,Y3~eta)
 latent(m) <- ~eta
 d <- lava::sim(m,20, latent = FALSE)
 
-## standard linear model
-e.lm <- lm(Y1~Y2, data = d)
-
-sCorrect(e.lm) <- TRUE
-range(as.double(leverage2(e.lm)) - influence(e.lm)$hat)
-
-## latent variable model
+#### latent variable models ####
 e.lvm <- estimate(m, data = d)
-sCorrect(e.lvm) <- TRUE
 leverage2(e.lvm)
 
 }
@@ -78,6 +56,7 @@ leverage2(e.lvm)
 Bo-Cheng Wei et al., Generalized Leverage and its applications (1998), Scandinavian Journal of Statistics 25:1:25-37.
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{estimator}
+\keyword{smallSampleCorrection}
diff --git a/man/moments2.Rd b/man/moments2.Rd
new file mode 100644
index 0000000..a2f16ed
--- /dev/null
+++ b/man/moments2.Rd
@@ -0,0 +1,140 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-moments2.R
+\name{moments2}
+\alias{moments2}
+\alias{moments2.lvm}
+\alias{moments2.lvmfit}
+\title{Compute Key Quantities of a Latent Variable Model}
+\usage{
+moments2(
+  object,
+  param,
+  data,
+  weights,
+  Omega,
+  Psi,
+  initialize,
+  usefit,
+  update.dmoment,
+  update.d2moment,
+  score,
+  information,
+  hessian,
+  vcov,
+  dVcov,
+  dVcov.robust,
+  residuals,
+  leverage,
+  derivative
+)
+
+\method{moments2}{lvm}(
+  object,
+  param = NULL,
+  data = NULL,
+  weights = NULL,
+  Omega = NULL,
+  Psi = NULL,
+  initialize = TRUE,
+  usefit = TRUE,
+  update.dmoment = TRUE,
+  update.d2moment = TRUE,
+  score = TRUE,
+  information = TRUE,
+  hessian = TRUE,
+  vcov = TRUE,
+  dVcov = TRUE,
+  dVcov.robust = TRUE,
+  residuals = TRUE,
+  leverage = TRUE,
+  derivative = "analytic"
+)
+
+\method{moments2}{lvmfit}(
+  object,
+  param = NULL,
+  data = NULL,
+  weights = NULL,
+  Omega = NULL,
+  Psi = NULL,
+  initialize = TRUE,
+  usefit = TRUE,
+  update.dmoment = TRUE,
+  update.d2moment = TRUE,
+  score = TRUE,
+  information = TRUE,
+  hessian = TRUE,
+  vcov = TRUE,
+  dVcov = TRUE,
+  dVcov.robust = TRUE,
+  residuals = TRUE,
+  leverage = TRUE,
+  derivative = "analytic"
+)
+}
+\arguments{
+\item{object}{a latent variable model.}
+
+\item{param}{[numeric vector] value of the model parameters if different from the estimated ones.}
+
+\item{data}{[data.frame] dataset if different from the one used to fit the model.}
+
+\item{Psi}{[matrix]  Average first order bias in the residual variance. Only necessary for computing adjusted residuals.}
+
+\item{initialize}{[logical] Pre-compute quantities dependent on the data but not on the parameters values.}
+
+\item{usefit}{[logical] Compute key quantities based on the parameter values.}
+
+\item{update.dmoment}{[logical] should the first derivative of the moments be computed/updated?}
+
+\item{update.d2moment}{[logical] should the second derivative of the the moments be computed/updated?}
+
+\item{score}{[logical] should the score be output?}
+
+\item{information}{[logical] should the expected information be output?}
+
+\item{hessian}{[logical] should the hessian be output?}
+
+\item{vcov}{[logical] should the variance-covariance matrix based on the expected information be output?}
+
+\item{dVcov}{[logical] should the derivative of the variance-covariance matrix be output?}
+
+\item{dVcov.robust}{[logical]  should the derivative of the robust variance-covariance matrix be output?}
+
+\item{...}{[internal] only used by the generic method or by the <- methods.}
+}
+\description{
+Compute conditional mean, conditional variance, their first and second derivative regarding model parameters, as well as various derivatives of the log-likelihood.
+}
+\details{
+For lvmfit objects, there are two levels of pre-computation:
+\itemize{
+\item a basic one that do no involve the model coefficient (\code{conditionalMoment.lvm}).
+\item an advanced one that require the model coefficients (\code{conditionalMoment.lvmfit}). 
+}
+}
+\examples{
+m <- lvm(Y1~eta,Y2~eta,Y3~eta)
+latent(m) <- ~eta
+
+d <- lava::sim(m,1e2)
+e <- estimate(m, d)
+
+## basic pre-computation
+res1 <- moments2(e, data = d, initialize = TRUE, usefit = FALSE,
+                score = TRUE, information = TRUE, hessian = TRUE, vcov = TRUE,
+                dVcov = TRUE, dVcov.robust = TRUE, residuals = TRUE, leverage = FALSE,
+                derivative = "analytic")
+res1$skeleton$param$Sigma
+
+## full pre-computation
+res2 <- moments2(e, param = coef(e), data = d, initialize = TRUE, usefit = TRUE,
+                score = TRUE, information = TRUE, hessian = TRUE, vcov = TRUE,
+                dVcov = TRUE, dVcov.robust = TRUE, residuals = TRUE, leverage = FALSE,
+                derivative = "analytic")
+res2$moment$Omega
+
+}
+\concept{derivative of the score equation}
+\concept{small sample inference}
+\keyword{internal}
diff --git a/man/nStep.Rd b/man/nStep.Rd
index 3688076..f350565 100644
--- a/man/nStep.Rd
+++ b/man/nStep.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/methods-modelsearch2.R
+% Please edit documentation in R/modelsearch2-methods.R
 \name{nStep}
 \alias{nStep}
 \alias{nStep.modelsearch2}
diff --git a/man/nobs2.Rd b/man/nobs2.Rd
new file mode 100644
index 0000000..6dbd81c
--- /dev/null
+++ b/man/nobs2.Rd
@@ -0,0 +1,37 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-nobs2.R
+\name{nobs2}
+\alias{nobs2}
+\alias{nobs2.lvmfit}
+\alias{nobs2.lvmfit2}
+\title{Effective Sample Size.}
+\usage{
+nobs2(object, ssc, ...)
+
+\method{nobs2}{lvmfit}(object, ssc = lava.options()$ssc, ...)
+
+\method{nobs2}{lvmfit2}(object, ...)
+}
+\arguments{
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
+}
+\value{
+Numeric vector of length the number of endogenous variables.
+}
+\description{
+Extract the effective sample size, i.e. sample size minus the loss in degrees of freedom caused by the estimation of the parameters.
+}
+\details{
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the leverage.
+}
+\seealso{
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
+}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/residuals2.Rd b/man/residuals2.Rd
index a59aa86..25eb716 100644
--- a/man/residuals2.Rd
+++ b/man/residuals2.Rd
@@ -1,44 +1,47 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/residuals2.R
+% Please edit documentation in R/sCorrect-residuals2.R
 \name{residuals2}
 \alias{residuals2}
-\alias{residuals2.lm2}
-\alias{residuals2.gls2}
-\alias{residuals2.lme2}
-\alias{residuals2.lvmfit2}
-\title{Extract Corrected Residuals}
+\alias{residuals2.lvmfit}
+\title{Residuals With Small Sample Correction.}
 \usage{
-residuals2(object, param, data, type)
+residuals2(object, type, format, ssc, ...)
 
-\method{residuals2}{lm2}(object, param = NULL, data = NULL, type = "response")
-
-\method{residuals2}{gls2}(object, param = NULL, data = NULL, type = "response")
-
-\method{residuals2}{lme2}(object, param = NULL, data = NULL, type = "response")
-
-\method{residuals2}{lvmfit2}(object, param = NULL, data = NULL, type = "response")
+\method{residuals2}{lvmfit}(
+  object,
+  type = "response",
+  format = "wide",
+  ssc = lava.options()$ssc,
+  ...
+)
 }
 \arguments{
-\item{object}{a \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} object.}
-
-\item{param}{[named numeric vector] the fitted parameters.}
-
-\item{data}{[data.frame] the data set.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
 \item{type}{[character] the type of residual to extract:
 \code{"response"} for raw residuals,
 \code{"studentized"} for studentized residuals,
 \code{"normalized"} for normalized residuals.}
+
+\item{format}{[character] Use \code{"wide"} to return the residuals in the wide format (one row relative to each sample).
+Otherwise use \code{"long"} to return the residuals in the long format.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 }
 \value{
 a matrix containing the residuals relative to each sample (in rows)
 and each endogenous variable (in column).
 }
 \description{
-Extract correct residuals from a gaussian linear model.
+Extract residuals from a latent variable model.
+Similar to \code{stats::residuals} but with small sample correction.
 }
 \details{
-If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the residuals. \cr
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the residuals.
 
 The raw residuals are defined by  observation minus the fitted value:
 \deqn{
@@ -54,27 +57,27 @@ The normalized residuals multiply the raw residuals by the inverse of the square
 }
 }
 \examples{
-## simulate data
+#### simulate data ####
 set.seed(10)
-m <- lvm(Y1~eta,Y2~eta,Y3~eta)
-latent(m) <- ~eta
-d <- lava::sim(m,20, latent = FALSE)
-
-## standard linear model
-e.lm <- lm(Y1~Y2, data = d)
-sCorrect(e.lm) <- TRUE
+n <- 101
 
-sigma(e.lm)^2
-mean(residuals(e.lm)^2)
-mean(residuals2(e.lm)^2)
+Y1 <- rnorm(n, mean = 0)
+Y2 <- rnorm(n, mean = 0.3)
+Id <- findInterval(runif(n), seq(0.1,1,0.1))
+data.df <- rbind(data.frame(Y=Y1,G="1",Id = Id),
+           data.frame(Y=Y2,G="2",Id = Id)
+           )
 
-## latent variable model
-e.lvm <- estimate(m, data = d)
-sCorrect(e.lvm) <- TRUE
-mean(residuals2(e.lvm)^2)
+#### latent variable models ####
+library(lava)
+e.lvm <- estimate(lvm(Y ~ G), data = data.df)
+residuals(e.lvm)
+residuals2(e.lvm)
+residuals(e.lvm) - residuals2(e.lvm)
 
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/sCorrect.Rd b/man/sCorrect.Rd
index fc93886..ff200bb 100644
--- a/man/sCorrect.Rd
+++ b/man/sCorrect.Rd
@@ -2,189 +2,26 @@
 % Please edit documentation in R/sCorrect.R
 \name{sCorrect}
 \alias{sCorrect}
-\alias{sCorrect.lm}
-\alias{sCorrect.lm2}
-\alias{sCorrect.gls}
-\alias{sCorrect.gls2}
-\alias{sCorrect.lme}
-\alias{sCorrect.lme2}
-\alias{sCorrect.lvmfit}
-\alias{sCorrect.lvmfit2}
+\alias{sCorrect.default}
 \alias{sCorrect<-}
-\alias{sCorrect<-.lm}
-\alias{sCorrect<-.lm2}
-\alias{sCorrect<-.gls}
-\alias{sCorrect<-.gls2}
-\alias{sCorrect<-.lme}
-\alias{sCorrect<-.lme2}
-\alias{sCorrect<-.lvmfit}
-\alias{sCorrect<-.lvmfit2}
-\title{Satterthwaite Correction and Small Sample Correction}
+\alias{sCorrect<-.default}
+\title{Depreciated Method For Small Sample Correction}
 \usage{
-sCorrect(
-  object,
-  adjust.Omega,
-  adjust.n,
-  df,
-  numeric.derivative,
-  param,
-  data,
-  tol,
-  n.iter,
-  trace,
-  ...
-)
+sCorrect(object, ...)
 
-\method{sCorrect}{lm}(
-  object,
-  adjust.Omega = TRUE,
-  adjust.n = TRUE,
-  df = TRUE,
-  numeric.derivative = FALSE,
-  param = NULL,
-  data = NULL,
-  tol = 1e-05,
-  n.iter = 20,
-  trace = 0,
-  ...
-)
-
-\method{sCorrect}{lm2}(object, ...)
-
-\method{sCorrect}{gls}(
-  object,
-  adjust.Omega = TRUE,
-  adjust.n = TRUE,
-  df = TRUE,
-  numeric.derivative = FALSE,
-  param = NULL,
-  data = NULL,
-  tol = 1e-05,
-  n.iter = 20,
-  trace = 0,
-  cluster,
-  ...
-)
-
-\method{sCorrect}{gls2}(object, ...)
-
-\method{sCorrect}{lme}(
-  object,
-  adjust.Omega = TRUE,
-  adjust.n = TRUE,
-  df = TRUE,
-  numeric.derivative = FALSE,
-  param = NULL,
-  data = NULL,
-  tol = 1e-05,
-  n.iter = 20,
-  trace = 0,
-  cluster,
-  ...
-)
-
-\method{sCorrect}{lme2}(object, ...)
-
-\method{sCorrect}{lvmfit}(
-  object,
-  adjust.Omega = TRUE,
-  adjust.n = TRUE,
-  df = TRUE,
-  numeric.derivative = FALSE,
-  param = NULL,
-  data = NULL,
-  tol = 1e-05,
-  n.iter = 20,
-  trace = 0,
-  ...
-)
-
-\method{sCorrect}{lvmfit2}(object, ...)
+\method{sCorrect}{default}(object, ...)
 
 sCorrect(x, ...) <- value
 
-\method{sCorrect}{lm}(x, ...) <- value
-
-\method{sCorrect}{lm2}(x, ...) <- value
-
-\method{sCorrect}{gls}(x, ...) <- value
-
-\method{sCorrect}{gls2}(x, ...) <- value
-
-\method{sCorrect}{lme}(x, ...) <- value
-
-\method{sCorrect}{lme2}(x, ...) <- value
-
-\method{sCorrect}{lvmfit}(x, ...) <- value
-
-\method{sCorrect}{lvmfit2}(x, ...) <- value
+\method{sCorrect}{default}(x, ...) <- value
 }
 \arguments{
-\item{object, x}{a \code{gls}, \code{lme}, or \code{lvm} object.}
-
-\item{adjust.Omega}{[logical] should the standard errors of the coefficients be corrected for small sample bias?}
-
-\item{adjust.n}{[logical] should the correction for the degree of freedom be performed?}
+\item{object, x}{a \code{lvmfit} object.}
 
-\item{df}{[logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.}
+\item{...}{not used.}
 
-\item{numeric.derivative}{[logical] should a numerical derivative be used to compute the first derivative of the information matrix?
-Otherwise an analytic formula is used.}
-
-\item{param}{[numeric vector, optional] the values of the parameters at which to perform the correction.}
-
-\item{data}{[data.frame, optional] the dataset relative to which the correction should be performed.}
-
-\item{tol}{[numeric >0] the minimum absolute difference between two estimation of the small sample bias.
-Below this value, the algorithm used to estimate the bias stop.}
-
-\item{n.iter}{[integer >0] the maximum number of iterations used to estimate the small sample bias of the residual variance-covariance matrix.}
-
-\item{trace}{[logical] should the execution of the function be traced.}
-
-\item{...}{[internal] only used by the generic method or by the <- methods.}
-
-\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.
-Only required for \code{gls} models with no correlation argument.}
-
-\item{value}{[logical] value for the arguments \code{adjust.Omega} and \code{adjust.n}.}
+\item{value}{not used.}
 }
 \description{
-Correct the bias of the ML estimate of the variance and compute the first derivative of the information matrix.
-}
-\details{
-The argument \code{value} is equivalent to the argument \code{bias.correct} of the function \code{summary2}.
-}
-\examples{
-n <- 5e1
-p <- 3
-X.name <- paste0("X",1:p)
-link.lvm <- paste0("Y~",X.name)
-formula.lvm <- as.formula(paste0("Y~",paste0(X.name,collapse="+")))
-
-m <- lvm(formula.lvm)
-distribution(m,~Id) <- Sequence.lvm(0)
-set.seed(10)
-d <- lava::sim(m,n)
-
-## linear model
-e.lm <- lm(formula.lvm,data=d)
-system.time(
-sCorrect(e.lm) <- TRUE ## i.e. bias.correct = TRUE
-)
-
-## gls model
-library(nlme)
-e.gls <- gls(formula.lvm, data = d, method = "ML")
-sCorrect(e.gls, cluster = 1:NROW(d)) <- TRUE ## i.e. bias.correct = TRUE
-summary2(e.gls)
-
-## latent variable model
-e.lvm <- estimate(lvm(formula.lvm),data=d)
-sCorrect(e.lvm) <- TRUE ## i.e. bias.correct = TRUE
-summary2(e.lvm)
-
+Depreciated method for small sample correction, now replaced by the \code{\link{estimate2}} method.
 }
-\concept{derivative of the score equation}
-\concept{small sample inference}
diff --git a/man/sampleRepeated.Rd b/man/sampleRepeated.Rd
new file mode 100644
index 0000000..9374f41
--- /dev/null
+++ b/man/sampleRepeated.Rd
@@ -0,0 +1,30 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sampleRepeated.R
+\name{sampleRepeated}
+\alias{sampleRepeated}
+\title{Simulate Repeated Measurements over time}
+\usage{
+sampleRepeated(n, n.Xcont = 2, n.Xcat = 2, n.rep = 5, format = "long")
+}
+\arguments{
+\item{n}{[integer] sample size.}
+
+\item{n.Xcont}{[integer] number of continuous covariates acting on the latent variable.}
+
+\item{n.Xcat}{[integer] number of categorical covariates acting on the latent variable.}
+
+\item{n.rep}{[integer] number of measurement of the response variable.}
+
+\item{format}{[character] should the dataset be returned in the \code{"long"} format or in the \code{"wide"} format.}
+}
+\value{
+a \code{data.frame} object.
+}
+\description{
+Simulate repeated measurements over time (one factor model).
+}
+\examples{
+
+sampleRepeated(10, format = "wide")
+sampleRepeated(10, format = "long")
+}
diff --git a/man/score2-internal.Rd b/man/score2-internal.Rd
index b137d5a..000d9bf 100644
--- a/man/score2-internal.Rd
+++ b/man/score2-internal.Rd
@@ -1,28 +1,29 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/score2.R
+% Please edit documentation in R/sCorrect-score2.R
 \name{score2-internal}
 \alias{score2-internal}
 \alias{.score2}
 \title{Compute the Corrected Score.}
 \usage{
 .score2(
-  epsilon,
-  Omega,
-  OmegaM1,
   dmu,
   dOmega,
+  epsilon,
+  OmegaM1,
+  missing.pattern,
+  unique.pattern,
+  name.pattern,
   name.param,
   name.meanparam,
   name.varparam,
-  index.Omega,
   n.cluster,
-  indiv
+  weights
 )
 }
 \arguments{
 \item{n.cluster}{[integer >0] the number of observations.}
 }
 \description{
-Compute the corrected score when there is no missing value.
+Compute the corrected score.
 }
 \keyword{internal}
diff --git a/man/score2.Rd b/man/score2.Rd
index 759c060..3976f10 100644
--- a/man/score2.Rd
+++ b/man/score2.Rd
@@ -1,57 +1,55 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/score2.R
+% Please edit documentation in R/sCorrect-score2.R
 \name{score2}
 \alias{score2}
-\alias{score2.lm}
-\alias{score2.gls}
-\alias{score2.lme}
 \alias{score2.lvmfit}
-\alias{score2.lm2}
-\alias{score2.gls2}
-\alias{score2.lme2}
 \alias{score2.lvmfit2}
-\title{Extract the Individual Score}
+\alias{score.lvmfit2}
+\title{Score With Small Sample Correction}
 \usage{
-score2(object, ...)
+score2(object, indiv, cluster, as.lava, ...)
 
-\method{score2}{lm}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
+\method{score2}{lvmfit}(
+  object,
+  indiv = FALSE,
+  cluster = NULL,
+  as.lava = TRUE,
+  ssc = lava.options()$ssc,
+  ...
+)
 
-\method{score2}{gls}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
+\method{score2}{lvmfit2}(object, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...)
 
-\method{score2}{lme}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{score2}{lvmfit}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{score2}{lm2}(object, param = NULL, data = NULL, ...)
-
-\method{score2}{gls2}(object, param = NULL, data = NULL, ...)
-
-\method{score2}{lme2}(object, param = NULL, data = NULL, ...)
-
-\method{score2}{lvmfit2}(object, param = NULL, data = NULL, ...)
+\method{score}{lvmfit2}(x, indiv = FALSE, cluster = NULL, as.lava = TRUE, ...)
 }
 \arguments{
-\item{object}{a linear model or a latent variable model}
+\item{object, x}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{indiv}{[logical] If \code{TRUE}, the score relative to each observation is returned. Otherwise the total score is returned.}
 
-\item{...}{arguments to be passed to \code{sCorrect}.}
+\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
 
-\item{param}{[optional] the fitted parameters.}
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
 
-\item{data}{[optional] the data set.}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
 }
 \value{
-A matrix containing the score relative to each sample (in rows)
-and each model coefficient (in columns).
+When argument indiv is \code{TRUE}, a matrix containing the score relative to each sample (in rows)
+and each model coefficient (in columns). Otherwise a numeric vector of length the number of model coefficients.
 }
 \description{
-Extract the individual score from a Gaussian linear model.
+Extract the (individual) score a the latent variable model.
+Similar to \code{lava::score} but with small sample correction.
 }
 \details{
-If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the confidence intervals.
 }
 \examples{
+#### simulate data ####
 n <- 5e1
 p <- 3
 X.name <- paste0("X",1:p)
@@ -63,18 +61,19 @@ distribution(m,~Id) <- Sequence.lvm(0)
 set.seed(10)
 d <- lava::sim(m,n)
 
-## linear model
-e.lm <- lm(formula.lvm,data=d)
-score.tempo <- score2(e.lm, bias.correct = FALSE)
-colMeans(score.tempo)
+#### linear models ####
+e.lm <- lm(Y~X1+X2+X3, data = d)
 
-## latent variable model
-e.lvm <- estimate(lvm(formula.lvm),data=d)
-score.tempo <- score2(e.lvm, bias.correct = FALSE)
-range(score.tempo-score(e.lvm, indiv = TRUE))
+#### latent variable models ####
+m.lvm <- lvm(formula.lvm)
+e.lvm <- estimate(m.lvm,data=d)
+e2.lvm <- estimate2(m.lvm,data=d)
+score.tempo <- score(e2.lvm, indiv = TRUE)
+colSums(score.tempo)
 
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/man/selectRegressor.Rd b/man/selectRegressor.Rd
index 56eb64b..1a44dab 100644
--- a/man/selectRegressor.Rd
+++ b/man/selectRegressor.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-formula.R
+% Please edit documentation in R/Utils.R
 \name{selectRegressor}
 \alias{selectRegressor}
 \alias{selectRegressor.formula}
diff --git a/man/selectResponse.Rd b/man/selectResponse.Rd
index 44cb5be..feb2e22 100644
--- a/man/selectResponse.Rd
+++ b/man/selectResponse.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/Utils-formula.R
+% Please edit documentation in R/Utils.R
 \name{selectResponse}
 \alias{selectResponse}
 \alias{selectResponse.formula}
diff --git a/man/skeleton.Rd b/man/skeleton.Rd
index ae72a98..9f47ceb 100644
--- a/man/skeleton.Rd
+++ b/man/skeleton.Rd
@@ -1,152 +1,44 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/skeleton.R
+% Please edit documentation in R/sCorrect-skeleton.R
 \name{skeleton}
 \alias{skeleton}
-\alias{skeleton.lvm}
-\alias{skeleton.lvmfit}
 \alias{skeletonDtheta}
-\alias{skeletonDtheta.gls}
-\alias{skeletonDtheta.lme}
-\alias{skeletonDtheta.lvm}
-\alias{skeletonDtheta.lvmfit}
 \alias{skeletonDtheta2}
-\alias{skeletonDtheta2.gls}
-\alias{skeletonDtheta2.lme}
-\alias{skeletonDtheta2.lm}
-\alias{skeletonDtheta2.lvm}
-\alias{skeletonDtheta2.lvmfit}
 \title{Pre-computation for the Score}
 \usage{
-skeleton(object, ...)
+skeleton(object, X, endogenous, latent, n.cluster, index.Omega)
 
-\method{skeleton}{lvm}(object, as.lava, name.endogenous, name.latent, ...)
-
-\method{skeleton}{lvmfit}(object, param, data, name.endogenous, name.latent, ...)
-
-skeletonDtheta(object, ...)
-
-\method{skeletonDtheta}{gls}(
+skeletonDtheta(
   object,
-  class.cor,
-  class.var,
   X,
-  sigma2.base0,
-  Msigma2.base0,
-  M.corcoef,
-  ref.group,
-  index.lower.tri,
-  indexArr.lower.tri,
-  name.endogenous,
-  n.endogenous,
-  cluster,
+  endogenous,
+  latent,
+  missing.pattern,
+  unique.pattern,
+  name.pattern,
   n.cluster,
-  var.coef,
-  name.varcoef,
-  name.otherVar,
-  n.varcoef,
-  cor.coef,
-  name.corcoef,
-  n.corcoef,
-  index.Omega,
-  update.mean,
-  update.variance,
-  ...
-)
-
-\method{skeletonDtheta}{lme}(object, name.endogenous, n.endogenous, name.rancoef, ...)
-
-\method{skeletonDtheta}{lvm}(
-  object,
-  data,
-  df.param.all,
-  param2originalLink,
-  name.endogenous,
-  name.latent,
-  ...
+  index.Omega
 )
 
-\method{skeletonDtheta}{lvmfit}(object, name.endogenous, name.latent, ...)
-
-skeletonDtheta2(object, ...)
-
-\method{skeletonDtheta2}{gls}(
-  object,
-  dOmega = NULL,
-  class.cor = NULL,
-  class.var = NULL,
-  M.corcoef = NULL,
-  n.endogenous = NULL,
-  index.lower.tri = NULL,
-  indexArr.lower.tri = NULL,
-  var.coef = NULL,
-  name.otherVar = NULL,
-  name.varcoef = NULL,
-  n.varcoef = NULL,
-  cor.coef = NULL,
-  name.corcoef = NULL,
-  ...
-)
-
-\method{skeletonDtheta2}{lme}(
-  object,
-  dOmega = NULL,
-  class.cor = NULL,
-  class.var = NULL,
-  M.corcoef = NULL,
-  n.endogenous = NULL,
-  index.lower.tri = NULL,
-  indexArr.lower.tri = NULL,
-  var.coef = NULL,
-  name.otherVar = NULL,
-  name.varcoef = NULL,
-  n.varcoef = NULL,
-  cor.coef = NULL,
-  name.corcoef = NULL,
-  ...
-)
-
-\method{skeletonDtheta2}{lm}(object, ...)
-
-\method{skeletonDtheta2}{lvm}(
-  object,
-  data,
-  df.param.all,
-  param2originalLink,
-  name.latent,
-  ...
-)
-
-\method{skeletonDtheta2}{lvmfit}(object, ...)
+skeletonDtheta2(object)
 }
 \arguments{
 \item{object}{a \code{lvm} object.}
 
-\item{...}{[internal] only used by the generic method.}
-
-\item{as.lava}{[logical] should the name of the links be used to name the coefficient?
-Otherwise uses the labels (when defined) of each coefficient.}
+\item{X}{[matrix] design matrix containing the covariates for each endogeneous and latent variable.}
 
-\item{name.endogenous}{[character vector] name of the endogenous variables}
+\item{latent}{[character vector] the name of the latent variables.}
 
-\item{name.latent}{[character vector] name of the latent variables}
+\item{endogeneous}{[character vector] the name of the endogeneous variables.}
 
-\item{data}{[data.frame, optional] data set.}
-
-\item{df.param.all}{[data.frame] output of \code{\link{coefType}} containing the type of each coefficient.}
-
-\item{param2originalLink}{[named character vector] matching between the name of the coefficient in lava and their label.}
-
-\item{B, alpha.XGamma, Lambda, Psi}{[matrix] pre-computed matrix.}
-
-\item{OD}{[list] the pre-computed quantities for the second derivatives.}
-
-\item{p}{[numeric vector, optional] vector of coefficients at which to evaluate the score.}
+\item{...}{[internal] only used by the generic method.}
 }
 \description{
 Pre-compute quantities that are necessary to compute the score of a lvm model.
 }
 \details{
-When the use specify names for the coefficients (e.g. Y1[mu:sigma]) or uses constrains (Y1~beta*X1), \code{as.lava=FALSE} will use the names specified by the user (e.g. mu, sigma, beta) while \code{as.lava=TRUE} will use the name of the first link defining the coefficient.
+When the user specifies names for the coefficients (e.g. Y1[mu:sigma]) or uses constraints (Y1~beta*X1), \code{as.lava=FALSE} will use the names specified by the user (e.g. mu, sigma, beta)
+while \code{as.lava=TRUE} will use the name of the first link defining the coefficient.
 }
 \examples{
 \dontrun{
diff --git a/man/summary.glht2.Rd b/man/summary.glht2.Rd
new file mode 100644
index 0000000..1e004ef
--- /dev/null
+++ b/man/summary.glht2.Rd
@@ -0,0 +1,36 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-summary.glht2.R
+\name{summary.glht2}
+\alias{summary.glht2}
+\title{Outcome of Linear Hypothesis Testing}
+\usage{
+\method{summary}{glht2}(
+  object,
+  confint = TRUE,
+  conf.level = 0.95,
+  transform = NULL,
+  seed = NULL,
+  rowname.rhs = TRUE,
+  ...
+)
+}
+\arguments{
+\item{object}{a \code{glht2} object.}
+
+\item{confint}{[logical] should confidence intervals be output}
+
+\item{conf.level}{[numeric 0-1] level of the confidence intervals.}
+
+\item{transform}{[function] function to backtransform the estimates, standard errors, null hypothesis, and the associated confidence intervals
+(e.g. \code{exp} if the outcomes have been log-transformed).}
+
+\item{seed}{[integer] value that will be set before adjustment for multiple comparisons to ensure reproducible results.
+Can also be \code{NULL}: in such a case no seed is set.}
+
+\item{rowname.rhs}{[logical] when naming the hypotheses, add the right-hand side (i.e. "X1-X2=0" instead of "X1-X2").}
+
+\item{...}{argument passed to \code{multcomp:::summary.glht}, e.g. argument \code{test} to choose the type of adjustment for multiple comparisons.}
+}
+\description{
+Estimates, p-values, and confidence intevals for linear hypothesis testing, possibly adjusted for multiple comparisons.
+}
diff --git a/man/summary.modelsearch2.Rd b/man/summary.modelsearch2.Rd
index 90887f3..b0d4b2a 100644
--- a/man/summary.modelsearch2.Rd
+++ b/man/summary.modelsearch2.Rd
@@ -1,5 +1,5 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/summary.modelsearch2.R
+% Please edit documentation in R/modelsearch2-summary.R
 \name{summary.modelsearch2}
 \alias{summary.modelsearch2}
 \title{summary Method for modelsearch2 Objects}
diff --git a/man/summary2.Rd b/man/summary2.Rd
index 55cf0df..64a0e32 100644
--- a/man/summary2.Rd
+++ b/man/summary2.Rd
@@ -1,72 +1,62 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/summary2.R
+% Please edit documentation in R/sCorrect-summary2.R
 \name{summary2}
 \alias{summary2}
-\alias{summary2.lm}
-\alias{summary2.gls}
-\alias{summary2.lme}
 \alias{summary2.lvmfit}
-\alias{summary2.lm2}
-\alias{summary2.gls2}
-\alias{summary2.lme2}
 \alias{summary2.lvmfit2}
-\title{Summary with Small Sample Correction}
+\alias{summary.lvmfit2}
+\title{Latent Variable Model Summary After Small Sample Correction}
 \usage{
-summary2(object, ...)
+summary2(object, robust, cluster, digit, ...)
 
-\method{summary2}{lm}(object, df = TRUE, bias.correct = TRUE, ...)
-
-\method{summary2}{gls}(object, df = TRUE, bias.correct = TRUE, cluster = NULL, ...)
-
-\method{summary2}{lme}(object, df = TRUE, bias.correct = TRUE, ...)
-
-\method{summary2}{lvmfit}(object, df = TRUE, bias.correct = TRUE, ...)
-
-\method{summary2}{lm2}(
+\method{summary2}{lvmfit}(
   object,
-  digit = max(3, getOption("digit")),
   robust = FALSE,
-  df = TRUE,
+  cluster = NULL,
+  digit = max(5, getOption("digit")),
+  ssc = lava.options()$ssc,
+  df = lava.options()$df,
   ...
 )
 
-\method{summary2}{gls2}(
+\method{summary2}{lvmfit2}(
   object,
-  digit = max(3, getOption("digit")),
   robust = FALSE,
-  df = TRUE,
+  cluster = NULL,
+  digit = max(5, getOption("digit")),
   ...
 )
 
-\method{summary2}{lme2}(
+\method{summary}{lvmfit2}(
   object,
-  digit = max(3, getOption("digit")),
   robust = FALSE,
-  df = TRUE,
+  cluster = NULL,
+  digit = max(5, getOption("digit")),
   ...
 )
-
-\method{summary2}{lvmfit2}(object, cluster = NULL, robust = FALSE, df = TRUE, ...)
 }
 \arguments{
-\item{object}{a \code{gls}, \code{lme} or \code{lvm} object.}
-
-\item{...}{arguments passed to the \code{summary} method of the object.}
-
-\item{df}{[logical] should the degree of freedoms of the Wald statistic be computed using the Satterthwaite correction?
-Otherwise the degree of freedoms are set to \code{Inf}, i.e. a normal distribution is used instead of a Student's t distribution when computing the p-values.}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias?
-See \code{\link{sCorrect}} for more details.}
+\item{robust}{[logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.}
 
 \item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
 
 \item{digit}{[integer > 0] the number of decimal places to use when displaying the summary.}
 
-\item{robust}{[logical] should the robust standard errors be used instead of the model based standard errors?}
+\item{...}{[logical] arguments passed to lower level methods.}
+
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
+
+\item{df}{[character] method used to estimate the degree of freedoms of the Wald statistic: Satterthwaite \code{"satterthwaite"}. 
+Otherwise (\code{"none"}/code{FALSE}/code{NA}) the degree of freedoms are set to \code{Inf}.
+Only relevant when using a \code{lvmfit} object.}
 }
 \description{
-Summary with small sample correction.
+Summarize a fitted latent variable model.
+Similar to \code{stats::summary} with small sample correction.
 }
 \details{
 \code{summary2} is the same as \code{summary}
@@ -74,37 +64,25 @@ except that it first computes the small sample correction (but does not store it
 So if \code{summary2} is to be called several times,
 it is more efficient to pre-compute the quantities for the small sample correction
 using \code{sCorrect} and then call \code{summary2}.
+
+\code{summary2} returns an object with an element \code{table2} containing the estimates, standard errors, degrees of freedom,
+upper and lower limits of the confidence intervals, test statistics, and p-values.
 }
 \examples{
+#### simulate data ####
 m <- lvm(Y~X1+X2)
 set.seed(10)
 d <- lava::sim(m, 2e1)
 
-## Gold standard
-summary(lm(Y~X1+X2, d))$coef
-
-## gls models
-library(nlme)
-e.gls <- gls(Y~X1+X2, data = d, method = "ML")
-summary(e.gls)$tTable
-sCorrect(e.gls, cluster = 1:NROW(d)) <- FALSE ## no small sample correction
-summary2(e.gls)$tTable
-
-sCorrect(e.gls, cluster = 1:NROW(d)) <- TRUE ## small sample correction
-summary2(e.gls)$tTable
-
-## lvm models
+#### latent variable models ####
 e.lvm <- estimate(m, data = d)
 summary(e.lvm)$coef
 
-sCorrect(e.lvm) <- FALSE ## no small sample correction
-summary2(e.lvm)$coef
-
-sCorrect(e.lvm) <- TRUE ## small sample correction
-summary2(e.lvm)$coef
+summary2(e.lvm)
+summary2(e.lvm, ssc = "none")
 
 }
 \seealso{
-\code{\link{sCorrect}} for more detail about the small sample correction.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
 \concept{small sample inference}
diff --git a/man/transformSummaryTable.Rd b/man/transformSummaryTable.Rd
new file mode 100644
index 0000000..f58e2e4
--- /dev/null
+++ b/man/transformSummaryTable.Rd
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/transformSummaryTable.R
+\name{transformSummaryTable}
+\alias{transformSummaryTable}
+\title{Apply Transformation to Summary Table}
+\usage{
+transformSummaryTable(object, transform = NULL)
+}
+\arguments{
+\item{object}{A data.frame with columns estimate, se, lower, upper.}
+
+\item{transform}{the name of a transformation or a function.}
+}
+\value{
+a data.frame
+}
+\description{
+Update summary table according to a transformation, e.g. log-transformtion.
+P-values are left unchanged but estimates, standard errors, and confidence intervals are updated.
+}
diff --git a/man/validFCTs.Rd b/man/validFCTs.Rd
deleted file mode 100644
index f3477a9..0000000
--- a/man/validFCTs.Rd
+++ /dev/null
@@ -1,156 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/package-butils-valid.R
-\name{validFCTs}
-\alias{validFCTs}
-\alias{validCharacter}
-\alias{validClass}
-\alias{validDimension}
-\alias{validInteger}
-\alias{validLogical}
-\alias{validNames}
-\alias{validNumeric}
-\alias{validPath}
-\title{Check Arguments of a function.}
-\usage{
-validCharacter(
-  value1,
-  name1 = as.character(substitute(value1)),
-  valid.length,
-  valid.values = "character",
-  refuse.NULL = TRUE,
-  refuse.duplicates = FALSE,
-  method = NULL,
-  addPP = TRUE
-)
-
-validClass(
-  value1,
-  name1 = as.character(substitute(value1)),
-  validClass,
-  super.classes = TRUE,
-  method = NULL,
-  addPP = TRUE
-)
-
-validDimension(
-  value1,
-  value2 = NULL,
-  name1 = as.character(substitute(value1)),
-  name2 = as.character(substitute(value2)),
-  validDimension = NULL,
-  type = c("NROW", "NCOL"),
-  method = NULL,
-  addPP = TRUE
-)
-
-validInteger(
-  value1,
-  name1 = as.character(substitute(value1)),
-  valid.length,
-  valid.values = NULL,
-  min = NULL,
-  max = NULL,
-  refuse.NA = TRUE,
-  refuse.NULL = TRUE,
-  refuse.duplicates = FALSE,
-  method = NULL,
-  addPP = TRUE
-)
-
-validLogical(
-  value1,
-  name1 = as.character(substitute(value1)),
-  valid.length,
-  refuse.NULL = TRUE,
-  refuse.NA = TRUE,
-  method = NULL,
-  addPP = TRUE
-)
-
-validNames(
-  value1,
-  name1 = as.character(substitute(value1)),
-  refuse.NULL = TRUE,
-  valid.length = NULL,
-  valid.values = NULL,
-  required.values = NULL,
-  refuse.values = NULL,
-  method = NULL,
-  addPP = TRUE
-)
-
-validNumeric(
-  value1,
-  name1 = as.character(substitute(value1)),
-  valid.length,
-  valid.values = NULL,
-  min = NULL,
-  max = NULL,
-  refuse.NA = TRUE,
-  refuse.NULL = TRUE,
-  refuse.duplicates = FALSE,
-  method = NULL,
-  addPP = TRUE
-)
-
-validPath(
-  value1,
-  name1 = as.character(substitute(value1)),
-  type,
-  method = NULL,
-  addPP = TRUE,
-  extension = NULL,
-  check.fsep = FALSE
-)
-}
-\arguments{
-\item{value1}{the value of the (first) argument to be checked}
-
-\item{name1}{the name of the (first) argument.}
-
-\item{valid.length}{the acceptable length(s) for the argument. If \code{NULL} no test is performed.}
-
-\item{valid.values}{the acceptable value(s) for the argument. If \code{NULL} no test is performed. Can also be "character" or "character_or_logical".}
-
-\item{refuse.NULL}{should an error be output if value is \code{NULL}.}
-
-\item{refuse.duplicates}{should an error be output if value contains duplicated values.}
-
-\item{method}{the name of the function using the argument.}
-
-\item{addPP}{add ": " after the name of the function in the error message.}
-
-\item{validClass}{the acceptable classes(s) for the argument.}
-
-\item{super.classes}{uses the \code{is} function instead of \code{class} to test the class of the object.}
-
-\item{value2}{the second value of a second argument whose dimensions should be consistent with the first one}
-
-\item{name2}{the name of the second argument.}
-
-\item{validDimension}{the acceptable dimension for the argument. If \code{NULL} then name2 is used as a reference.}
-
-\item{type}{For \code{validDimension}: the type of operator used to check the dimensions. For \code{validPath} either "dir" or "file" to check whether to path points to an existing directory or file.}
-
-\item{min}{the minimum acceptable value}
-
-\item{max}{the maximum acceptable value}
-
-\item{refuse.NA}{should an error be output if value contains \code{NA}.}
-
-\item{required.values}{values that must appear in the argument}
-
-\item{refuse.values}{values that must not appear in the argument}
-
-\item{extension}{filter the files by the type of extension.}
-
-\item{check.fsep}{display a warning when the separator is not correctly specified in}
-}
-\value{
-An invisible \code{TRUE} or an error message.
-}
-\description{
-Check the validity of the arguments in functions.
-}
-\concept{check}
-\keyword{internal}
diff --git a/man/vcov2-internal.Rd b/man/vcov2-internal.Rd
new file mode 100644
index 0000000..1da5b24
--- /dev/null
+++ b/man/vcov2-internal.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/sCorrect-moments2.R
+\name{vcov2-internal}
+\alias{vcov2-internal}
+\alias{.info2vcov}
+\title{Inverse the Information Matrix}
+\usage{
+.info2vcov(information, attr.info = FALSE)
+}
+\arguments{
+\item{attr.info}{[logical] should the information matrix be returned as an attribute?}
+
+\item{...}{arguments passed to .information2}
+}
+\description{
+Compute the inverse of the information matrix.
+}
+\keyword{internal}
diff --git a/man/vcov2.Rd b/man/vcov2.Rd
index 4d3d314..182137b 100644
--- a/man/vcov2.Rd
+++ b/man/vcov2.Rd
@@ -1,56 +1,54 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/vcov2.R
+% Please edit documentation in R/sCorrect-vcov2.R
 \name{vcov2}
 \alias{vcov2}
-\alias{vcov2.lm}
-\alias{vcov2.gls}
-\alias{vcov2.lme}
 \alias{vcov2.lvmfit}
-\alias{vcov2.lm2}
-\alias{vcov2.gls2}
-\alias{vcov2.lme2}
 \alias{vcov2.lvmfit2}
-\title{Extract the Variance Covariance Matrix of the Model Parameters}
+\alias{vcov.lvmfit2}
+\title{Variance-Covariance With Small Sample Correction}
 \usage{
-vcov2(object, ...)
+vcov2(object, robust, cluster, as.lava, ...)
 
-\method{vcov2}{lm}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
+\method{vcov2}{lvmfit}(
+  object,
+  robust = FALSE,
+  cluster = NULL,
+  as.lava = TRUE,
+  ssc = lava.options()$ssc,
+  ...
+)
 
-\method{vcov2}{gls}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
+\method{vcov2}{lvmfit2}(object, robust = FALSE, cluster = NULL, as.lava = TRUE, ...)
 
-\method{vcov2}{lme}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{vcov2}{lvmfit}(object, param = NULL, data = NULL, bias.correct = TRUE, ...)
-
-\method{vcov2}{lm2}(object, param = NULL, data = NULL, ...)
-
-\method{vcov2}{gls2}(object, param = NULL, data = NULL, ...)
-
-\method{vcov2}{lme2}(object, param = NULL, data = NULL, ...)
-
-\method{vcov2}{lvmfit2}(object, param = NULL, data = NULL, ...)
+\method{vcov}{lvmfit2}(object, robust = FALSE, cluster = NULL, as.lava = TRUE, ...)
 }
 \arguments{
-\item{object}{a linear model or a latent variable model}
+\item{object}{a \code{lvmfit} or \code{lvmfit2} object (i.e. output of \code{lava::estimate} or \code{lavaSearch2::estimate2}).}
+
+\item{robust}{[logical] should robust standard errors be used instead of the model based standard errors? Should be \code{TRUE} if argument cluster is not \code{NULL}.}
 
-\item{...}{arguments to be passed to \code{sCorrect}.}
+\item{cluster}{[integer vector] the grouping variable relative to which the observations are iid.}
 
-\item{param}{[optional] the fitted parameters.}
+\item{as.lava}{[logical] if \code{TRUE}, uses the same names as when using \code{stats::coef}.}
 
-\item{data}{[optional] the data set.}
+\item{...}{additional argument passed to \code{estimate2} when using a \code{lvmfit} object.}
 
-\item{bias.correct}{[logical] should the standard errors of the coefficients be corrected for small sample bias? Only relevant if the \code{sCorrect} function has not yet be applied to the object.}
+\item{ssc}{[character] method used to correct the small sample bias of the variance coefficients: no correction (code{"none"}/\code{FALSE}/\code{NA}),
+correct the first order bias in the residual variance (\code{"residual"}), or correct the first order bias in the estimated coefficients \code{"cox"}).
+Only relevant when using a \code{lvmfit} object.}
 }
 \value{
-A matrix.
+A matrix with as many rows and columns as the number of coefficients.
 }
 \description{
-Extract the variance covariance matrix of the model parameters from a Gaussian linear model.
+Extract the variance-covariance matrix from a latent variable model.
+Similar to \code{stats::vcov} but with small sample correction.
 }
 \details{
-If argument \code{p} or \code{data} is not null, then the small sample size correction is recomputed to correct the influence function.
+When argument object is a \code{lvmfit} object, the method first calls \code{estimate2} and then extract the variance-covariance matrix.
 }
 \examples{
+#### simulate data ####
 n <- 5e1
 p <- 3
 X.name <- paste0("X",1:p)
@@ -62,18 +60,20 @@ distribution(m,~Id) <- Sequence.lvm(0)
 set.seed(10)
 d <- lava::sim(m,n)
 
-## linear model
+#### linear models ####
 e.lm <- lm(formula.lvm,data=d)
-vcov.tempo <- vcov2(e.lm, bias.correct = TRUE)
-vcov.tempo[rownames(vcov(e.lm)),colnames(vcov(e.lm))]/vcov(e.lm)
 
-## latent variable model
+#### latent variable models ####
 e.lvm <- estimate(lvm(formula.lvm),data=d)
-vcov.tempo <- vcov2(e.lvm, bias.correct = FALSE)
-vcov.tempo/vcov(e.lvm)
+vcov0 <- vcov(e.lvm)
+vcovSSC <- vcov2(e.lvm)
+
+vcovSSC/vcov0
+vcovSSC[1:4,1:4]/vcov(e.lm)
 
 }
 \seealso{
-\code{\link{sCorrect}} to obtain \code{lm2}, \code{gls2}, \code{lme2}, or \code{lvmfit2} objects.
+\code{\link{estimate2}} to obtain \code{lvmfit2} objects.
 }
-\concept{small sample inference}
+\concept{extractor}
+\keyword{smallSampleCorrection}
diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp
index de89d25..ab5c1ae 100644
--- a/src/RcppExports.cpp
+++ b/src/RcppExports.cpp
@@ -6,6 +6,11 @@
 
 using namespace Rcpp;
 
+#ifdef RCPP_USE_GLOBAL_ROSTREAM
+Rcpp::Rostream<true>&  Rcpp::Rcout = Rcpp::Rcpp_cout_get();
+Rcpp::Rostream<false>& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get();
+#endif
+
 // OLS_cpp
 arma::vec OLS_cpp(const arma::mat& X, const arma::vec& y);
 RcppExport SEXP _lavaSearch2_OLS_cpp(SEXP XSEXP, SEXP ySEXP) {
diff --git a/tests/test-all.R b/tests/test-all.R
index 8f933f6..6023e76 100644
--- a/tests/test-all.R
+++ b/tests/test-all.R
@@ -1,3 +1,6 @@
 #library(lavaSearch2)
 suppressPackageStartupMessages(library("testthat"))
+suppressPackageStartupMessages(library("nlme"))
+suppressPackageStartupMessages(library("multcomp"))
+suppressPackageStartupMessages(library("Matrix"))
 test_check("lavaSearch2")
diff --git a/tests/testthat/test-coefType.R b/tests/testthat/test-coefType.R
index eb61d57..6031bdc 100644
--- a/tests/testthat/test-coefType.R
+++ b/tests/testthat/test-coefType.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: okt 12 2017 (14:52) 
 ## Version: 
-## last-updated: aug  6 2018 (15:34) 
+## last-updated: Jan 12 2022 (12:32) 
 ##           By: Brice Ozenne
-##     Update #: 79
+##     Update #: 82
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -16,7 +16,7 @@
 ### Code:
 
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(TRUE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
@@ -44,7 +44,7 @@ test_that("coefType - lm", {
     expect_equal(test,GS)
 
     test <- subset(coefType(m, as.lava = FALSE), !is.na(lava))
-    expect_equal(test$detail,df.truth$letter)
+    expect_equal(as.character(test$detail),df.truth$letter)
     expect_equal(test$name,df.truth$name)
 
     test <- coefType(e, as.lava = TRUE)
@@ -52,7 +52,7 @@ test_that("coefType - lm", {
     expect_equal(test,GS)
 
     test <- subset(coefType(e, as.lava = FALSE), !is.na(lava))
-    expect_equal(test$detail,df.truth$letter)
+    expect_equal(as.character(test$detail),df.truth$letter)
     expect_equal(test$name,df.truth$name)
 
 })
@@ -94,7 +94,6 @@ df.truth <- data.frame(name = c("Y","Y~X1","Y~X2b","Y~X2c","Y~X3","Y~~Y"),
                        )
 
 test_that("coefType - lm", {
-    
     test <- coefType(mSim, as.lava = TRUE)
     expect_equal(names(test),as.character(coef(mSim)))
 
@@ -103,7 +102,7 @@ test_that("coefType - lm", {
     expect_equal(test,GS)
     
     test <- subset(coefType(e, as.lava = FALSE), !is.na(lava))
-    expect_equal(test$detail,df.truth$letter)
+    expect_equal(as.character(test$detail),df.truth$letter)
     expect_equal(test$name,df.truth$name)
 })
 
@@ -132,7 +131,7 @@ test_that("coefType - lm", {
     expect_equal(test,GS)
     
     test <- subset(coefType(e, as.lava = FALSE), !is.na(lava))
-    expect_equal(test$detail,df.truth$letter)
+    expect_equal(as.character(test$detail),df.truth$letter)
     expect_equal(test$name,df.truth$name)
 })
 
@@ -142,7 +141,7 @@ m.sim <- lvm(Y1 ~ 1)
 categorical(m.sim, labels = c("control","concussion")) <- ~group
 
 test_that("coefType - lm with extra variable", {
-    coefType(m.sim)
+    expect_equal(unname(coefType(m.sim)), c("intercept", "variance", "extra"))    
 })
 
 ## m.sim <- lvm(Y1 ~ 1, group ~ 1) ## ERROR
@@ -172,6 +171,9 @@ df.truth <- rbind(c("Y2","intercept","nu"),
                   ##
                   c("Y1~X1","regression","K"),
                   ##
+                  c("eta2~X2b","regression","Gamma"),
+                  c("eta2~X2c","regression","Gamma"),
+                  ##
                   c("Y2~eta1","regression","Lambda"),
                   c("Y3~eta1","regression","Lambda"),
                   c("Z2~eta2","regression","Lambda"),
@@ -179,9 +181,6 @@ df.truth <- rbind(c("Y2","intercept","nu"),
                   ##
                   c("eta2~eta1","regression","B"),
                   ##
-                  c("eta2~X2b","regression","Gamma"),
-                  c("eta2~X2c","regression","Gamma"),
-                  ##
                   c("Y1~~Y1","variance","Sigma_var"),
                   c("Y2~~Y2","variance","Sigma_var"),
                   c("Y3~~Y3","variance","Sigma_var"),
@@ -189,15 +188,14 @@ df.truth <- rbind(c("Y2","intercept","nu"),
                   c("Z2~~Z2","variance","Sigma_var"),
                   c("Z3~~Z3","variance","Sigma_var"),
                   ##
-                  c("eta1~~eta1","variance","Psi_var"),
-                  c("eta2~~eta2","variance","Psi_var"),
+                  c("Y1~~Z1","covariance","Sigma_cov"),
                   ##
-                  c("Y1~~Z1","covariance","Sigma_cov")
+                  c("eta1~~eta1","variance","Psi_var"),
+                  c("eta2~~eta2","variance","Psi_var")
                   )
 
 df.truth <- as.data.frame(df.truth, stringsAsFactors = FALSE)
 names(df.truth) <- c("name","type","detail")
-df.truth <- df.truth[order(df.truth$type,df.truth$detail,df.truth$name),]
 
 test_that("coefType - lvm", {
 
@@ -209,7 +207,7 @@ test_that("coefType - lvm", {
     expect_equal(test,GS)
 
     test <- subset(coefType(e, as.lava = FALSE), !is.na(lava))
-    expect_equal(test$detail,df.truth$detail)
+    expect_equal(as.character(test$detail),df.truth$detail)
     expect_equal(test$name,df.truth$name)
 })
 ## ** constrains (0 mean 1 loading)
@@ -246,13 +244,12 @@ df.truth <- rbind(data.frame(name = "Y1", type = "intercept", detail = "nu", fix
                   data.frame(name = "Z2~~Z2", type = "variance", detail = "Sigma_var", fixed = FALSE, stringsAsFactors = FALSE),
                   data.frame(name = "Z3~~Z3", type = "variance", detail = "Sigma_var", fixed = FALSE, stringsAsFactors = FALSE),
                   ##
-                  data.frame(name = "eta1~~eta1", type = "variance", detail = "Psi_var", fixed = FALSE, stringsAsFactors = FALSE),
-                  data.frame(name = "eta2~~eta2", type = "variance", detail = "Psi_var", fixed = FALSE, stringsAsFactors = FALSE),
+                  data.frame(name = "Y1~~Y2", type = "covariance", detail = "Sigma_cov", fixed = FALSE, stringsAsFactors = FALSE),
                   ##
-                  data.frame(name = "Y1~~Y2", type = "covariance", detail = "Sigma_cov", fixed = FALSE, stringsAsFactors = FALSE)
+                  data.frame(name = "eta1~~eta1", type = "variance", detail = "Psi_var", fixed = FALSE, stringsAsFactors = FALSE),
+                  data.frame(name = "eta2~~eta2", type = "variance", detail = "Psi_var", fixed = FALSE, stringsAsFactors = FALSE)
                   )
 df.truth <- as.data.frame(df.truth)
-df.truth <- df.truth[order(df.truth$type,df.truth$detail,df.truth$name),]
 
 test_that("coefType - constrains 0/1", {
 
@@ -264,7 +261,7 @@ test_that("coefType - constrains 0/1", {
     expect_equal(test,GS)
     
     test <- coefType(e, as.lava = FALSE)
-    expect_equal(test$detail,df.truth$detail)
+    expect_equal(as.character(test$detail),df.truth$detail)
     expect_equal(test$name,df.truth$name)    
 })
 
diff --git a/tests/testthat/test-initVar.R b/tests/testthat/test-initVar.R
index aaa8680..490fe88 100644
--- a/tests/testthat/test-initVar.R
+++ b/tests/testthat/test-initVar.R
@@ -1,5 +1,5 @@
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(TRUE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
diff --git a/tests/testthat/test-matrixPower.R b/tests/testthat/test-matrixPower.R
index b496f19..d7c1c89 100644
--- a/tests/testthat/test-matrixPower.R
+++ b/tests/testthat/test-matrixPower.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: okt 24 2017 (09:21) 
 ## Version: 
-## last-updated: mar 13 2018 (13:24) 
+## last-updated: Jan 12 2022 (14:47) 
 ##           By: Brice Ozenne
-##     Update #: 16
+##     Update #: 19
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -16,8 +16,8 @@
 ### Code:
 
 ## * header
-rm(list = ls())
-if(TRUE){ ## already called in test-all.R
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)    
 }
@@ -41,5 +41,5 @@ test_that("inverse", {
     expect_equal(Sigma.m1 %*% Sigma,diag(1,NROW(Sigma),NCOL(Sigma)))
 })
 
-#----------------------------------------------------------------------
+##----------------------------------------------------------------------
 ### test-matrixPower.R ends here
diff --git a/tests/testthat/test-previousBug.R b/tests/testthat/test-previousBug.R
new file mode 100644
index 0000000..7515bad
--- /dev/null
+++ b/tests/testthat/test-previousBug.R
@@ -0,0 +1,138 @@
+### test-previousBug.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: nov 19 2019 (10:17) 
+## Version: 
+## Last-Updated: jan 18 2022 (09:53) 
+##           By: Brice Ozenne
+##     Update #: 35
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * header
+## rm(list = ls(all.names = TRUE))
+if(TRUE){ ## already called in test-all.R
+    library(testthat)
+    library(nlme)
+    library(lavaSearch2)
+}
+
+lava.options(symbols = c("~","~~"))
+
+context("Previous bugs")
+
+
+## * Brice, 01/23/20 1:41 , sCorrect
+## keep.row <- sample.int(139,30)
+## dd <- df.longiHCENRER[keep.row,c("neocortex.log","response.w4","age","sex","sert","sb.per.kg","group")]
+## dd$sex <- as.numeric(dd$sex)
+## dd$group <- as.numeric(dd$group)
+## dd$sert <- as.numeric(dd$sert)
+## dd$neocortex.log <- dd$neocortex.log+rnorm(NROW(dd), sd = 0.1)
+
+ddW <- data.frame("neocortex.log" = c(-0.5114974, -0.8249681, -0.3910322, -0.2941140, -0.4850710, -0.7354951, -0.3005508, -0.3956807, -0.5279078, -0.4705333, -0.4072993, -0.5494491, -0.9464054, -0.2632149, -0.2781923, -1.0308939, -0.5637023, -0.2689296, -0.4375162, -0.5351115, -0.8410153, -0.3022184, -0.5711485, -0.5155027, -0.2262801, -0.4262566, -0.4353830, -0.7079919, -0.1529665, -0.3954827), 
+                 "group" = c(rep(1,10),rep(2,20)),
+                 "id" = 1:30)
+ddW$group <- factor(ddW$group, levels = 1:2)
+
+e.GS <- gls(neocortex.log ~ group-1, method = "REML", weight = varIdent(form =~1|group), data = ddW)
+## same as
+e.lm1 <- lm(neocortex.log ~ 1, ddW[ddW$group==1, ]) 
+e.lm2 <- lm(neocortex.log ~ 1, ddW[ddW$group==2, ]) 
+
+ddL <- reshape2::dcast(ddW, value.var = "neocortex.log", id~group)
+names(ddL) <- c("id","G1","G2")
+m <- lvm(G1~1,G2~1)
+e.lvm <- estimate(m, data = ddL, missing = TRUE)
+
+test_that("sCorrect in stratified GLS equivalent to separate LM", {
+    eSSC.res <- estimate2(e.lvm)
+    ## GS1 <- estimate2(lvm(G1~1), data = ddL[!is.na(ddL$G1),])
+    ## GS2 <- estimate2(lvm(G2~1), data = ddL[!is.na(ddL$G2),])
+
+    GS <- c(mu1 = mean(ddL$G1, na.rm = TRUE),
+            mu2 = mean(ddL$G2, na.rm = TRUE),
+            sigma1 = var(ddL$G1, na.rm = TRUE),
+            sigma2 = var(ddL$G2, na.rm = TRUE))
+    expect_equivalent(coef2(eSSC.res), GS, tol = 1e-6)
+    ## sigma(e.GS)^2
+    expect_equivalent(vcov2(eSSC.res)[1:2,1:2], vcov(e.GS), tol = 1e-6)
+})
+
+
+
+## * Brice, 01/27/20 6:12, ssc residuals under constrains
+## path <- "C:/Users/hpl802/Downloads"
+## butils.base:::sourcePackage("lavaSearch2", path = path, c.code = TRUE, trace = TRUE) ## version 1.5.5
+
+dd <- data.frame("Y1" = c(-0.35, -0.87, -2.24, -0.7, 0.04, -1.46, -1.29, 0.6, -1.44, -1.64, -0.33, 1.12, -2, 0.66, 0.09, 1.18, -1.72, -1.02, 1.76, -0.48, -0.63, -1.95, -0.98, -2.8, -0.61), 
+                 "eta" = c(-0.37, -0.69, -0.87, -0.1, -0.25, -1.85, -0.08, 0.97, 0.18, -1.38, -1.44, 0.36, -1.76, -0.32, -0.65, 1.09, -0.76, -0.83, 0.83, -0.97, -0.03, 0.23, -0.3, -0.68, 0.66), 
+                 "Y2" = c(-0.77, -1.02, 0.5, 2.04, 0.25, -1.07, -0.98, 1.5, -0.46, -1.09, -2.67, -0.09, -2.59, 0.02, 0.41, 2.3, -0.03, -1.31, 1.4, -2.21, 0.35, -1.2, -1.35, -0.9, -0.83))
+
+m <- lvm(Y1[0:sigma1] ~ 1*eta,
+         Y2[0:sigma2] ~ 1*eta,
+         eta[mu:1]  ~ 1
+         )
+latent(m) <- ~eta
+
+mm <- lvm(Y1[mu:sigma1] ~ 1*eta,
+         Y2[mu:sigma2] ~ 1*eta,
+         eta[0:1]  ~ 1
+         )
+latent(mm) <- ~eta
+
+e <- estimate(m, dd)
+ee <- estimate(mm, dd)
+
+test_that("bug in version 1.5.4 (incorrect handling of the constrain when computing Omega)", {
+
+    expect_equal(logLik(e), logLik(ee), tol = 1e-6)
+    expect_equal(as.double(vcov(e)), as.double(vcov2(e, ssc = FALSE)), tol = 1e-6)
+    expect_equal(as.double(vcov(ee)), as.double(vcov2(ee, ssc = FALSE)), tol = 1e-6)
+    
+    test.res1 <- estimate2(e, df = "Satterthwaite", ssc = "residuals", derivative = "analytic")
+    test.res2 <- estimate2(ee, df = "Satterthwaite", ssc = "residuals", derivative = "analytic")
+
+    expect_equal(test.res1$sCorrect$param, c("eta" = -0.583625694846817, "Y1~~Y1" = 0.548634987452149, "Y2~~Y2" = 1.01248330967465),
+                 tol = 1e-6)
+    ## expect_equal(test.cox1$sCorrect$param, c("eta" = -0.58362569, "Y1~~Y1" = 0.50602817, "Y2~~Y2" = 0.95769503),
+    ##              tol = 1e-6)
+    
+})
+
+
+## * Brice 04/15/20 9:26 multcomp
+mSim <- lvm(Y1~X1,Y2~X1)
+n <- 25
+
+set.seed(10)
+d <- lava::sim(mSim, n)
+dNA <- rbind(d,c(Y1=NA,X1=1,Y2=2))
+dNA$id <- 1:26
+ls.lmALL <- list("Y1" = estimate(lvm(Y1~X1), data = d),
+                 "Y2" = estimate(lvm(Y2~X1), data = d))
+ls.lmRED <- list("Y1" = estimate(lvm(Y1~X1), data = d))
+ls.lmNA <- list("Y1" = estimate(lvm(Y1~X1), data = dNA),
+                 "Y2" = estimate(lvm(Y2~X1), data = dNA))
+class(ls.lmALL) <- "mmm"
+class(ls.lmRED) <- "mmm"
+class(ls.lmNA) <- "mmm"
+
+test_that("Stability by subset", {
+    glht.ALL <- glht2(ls.lmALL, linfct = "X1=0")
+    glht.RED <- glht2(ls.lmRED, linfct = "X1=0")
+    glht.NA <- glht2(ls.lmNA, linfct = "X1=0", cluster = "id")
+
+    index.model1 <- which(grepl("Y1: ",colnames(glht.ALL$vcov)))
+    expect_equal(glht.ALL$vcov[index.model1,index.model1], glht.RED$vcov, tol = 1e-6)
+    expect_equal(glht.RED$vcov[index.model1,index.model1], glht.NA$vcov[index.model1,index.model1], tol = 1e-6)
+})
+
+######################################################################
+### test-previousBug.R ends here
diff --git a/tests/testthat/test1-Utils-nlme.R b/tests/testthat/test1-Utils-nlme.R
deleted file mode 100644
index fefaeb0..0000000
--- a/tests/testthat/test1-Utils-nlme.R
+++ /dev/null
@@ -1,254 +0,0 @@
-### test-Utils-nlme.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: nov 16 2017 (10:36) 
-## Version: 
-## Last-Updated: dec 10 2018 (23:44) 
-##           By: Brice Ozenne
-##     Update #: 69
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-
-library(nlme)
-lava.options(symbols = c("~","~~"))
-
-context("Utils-nlme")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~1*eta,Y2~1*eta,Y3~1*eta,Y4~1*eta,eta~G+Gender))
-latent(mSim) <- ~eta
-categorical(mSim, labels = c("M","F")) <- ~Gender
-transform(mSim,Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-dW <- lava::sim(mSim,n,latent = FALSE)
-dW <- dW[order(dW$Id),,drop=FALSE]
-dL <- reshape2::melt(dW,id.vars = c("G","Id","Gender"), variable.name = "time")
-dL <- dL[order(dL$Id),,drop=FALSE]
-dL$time.num <- as.numeric(dL$time)
-
-## * t.test
-test_that("invariant to the order in the dataset", {
-    e1.gls <- gls(Y1 ~ Gender, data = dW[order(dW$Id),],
-                  weights = varIdent(form = ~1|Gender),
-                  method = "ML")
-
-    out1 <- getVarCov2(e1.gls, cluster = dW$Id)
-    index.cluster <- as.numeric(names(out1$index.Omega))
-    expect_true(all(diff(index.cluster)>0))
-
-    e2.gls <- gls(Y1 ~ Gender, data = dW[order(dW$Gender),],
-                  weights = varIdent(form = ~1|Gender),
-                  method = "ML")
-    out2 <- getVarCov2(e2.gls, cluster = dW$Id)
-    index.cluster <- as.numeric(names(out2$index.Omega))
-    expect_true(all(diff(index.cluster)>0))
-})
-
-## * Heteroschedasticity
-e.gls <- nlme::gls(value ~ time + G + Gender,
-                   weights = varIdent(form =~ 1|time),
-                   data = dL, method = "ML")
-
-test_that("Heteroschedasticity", {
-    vec.sigma <- c(1,coef(e.gls$modelStruct$varStruct, unconstrained = FALSE))
-    expect_equal(diag(vec.sigma^2 * sigma(e.gls)^2),
-                 unname(getVarCov2(e.gls, cluster = "Id")$Omega))
-})
-
-## * Compound symmetry
-e.lme <- nlme::lme(value ~ time + G + Gender,
-                   random = ~ 1|Id,
-                   data = dL,
-                   method = "ML")
-e.lme.bis <- nlme::lme(value ~ time + G + Gender,
-                       random = ~ 1|Id,
-                       correlation = corCompSymm(),
-                       data = dL,
-                       method = "ML")
-e.gls <- nlme::gls(value ~ time + G + Gender,
-                   correlation = corCompSymm(form=~ 1|Id),
-                   data = dL, method = "ML")
-
-test_that("Compound symmetry", {
-    expect_equal(unclass(getVarCov(e.gls)),
-                 unname(getVarCov2(e.gls)$Omega))
-
-    expect_equal(unname(getVarCov(e.lme, type = "marginal", individuals = 1)[[1]]),
-                 unname(getVarCov2(e.lme)$Omega))
-
-    expect_equal(unname(getVarCov(e.lme.bis, type = "marginal", individuals = 1)[[1]]),
-                 unname(getVarCov2(e.lme.bis)$Omega))
-})
-
-## * Unstructured 
-e.lme <- nlme::lme(value ~ time + G + Gender,
-                   random = ~ 1|Id,
-                   correlation = corSymm(form =~ time.num|Id),
-                   data = dL,
-                   method = "ML")
-e.gls <- nlme::gls(value ~ time + G + Gender,
-                   correlation = corSymm(form=~ time.num|Id),
-                   data = dL, method = "ML")
-
-
-test_that("Unstructured ", {
-    expect_equal(unclass(getVarCov(e.gls)),
-                 unname(getVarCov2(e.gls)$Omega))
-
-    expect_equal(unname(getVarCov(e.lme, type = "marginal", individuals = 1)[[1]]),
-                 unname(getVarCov2(e.lme)$Omega))
-})
-
-## * Unstructured with weights
-e.lme <- nlme::lme(value ~ time + G + Gender,
-                   random = ~ 1|Id,
-                   correlation = corSymm(form =~ time.num|Id),
-                   weight = varIdent(form = ~ 1|time),
-                   data = dL,
-                   method = "ML")
-e.gls <- nlme::gls(value ~ time + G + Gender,
-                   correlation = corSymm(form =~ time.num|Id),
-                   weight = varIdent(form = ~ 1|time),
-                   data = dL, method = "ML")
-
-test_that("Unstructured with weights", {
-    expect_equal(unclass(getVarCov(e.gls)),
-                 unname(getVarCov2(e.gls)$Omega))
-
-    expect_equal(unname(getVarCov(e.lme, type = "marginal", individuals = 1)[[1]]),
-                 unname(getVarCov2(e.lme)$Omega))
-})
-
-## * Unstructured with missing data
-## http://publicifsv.sund.ku.dk/~jufo/courses/rm2018/vasscores.txt
-## fix bug in the ordering of getVarCov2 due to different ordering of treatment in arguments weight and correlation
-
-## ** data management
-## butils::object2script(dfW.score)
-dfW <- data.frame("id" = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30"), 
-                  "group" = c("AC", "AB", "AB", "BC", "BC", "AC", "AB", "AC", "BC", "AC", "BC", "AB", "AB", "BC", "AB", "AC", "BC", "AC", "AC", "AC", "BC", "AB", "AB", "BC", "AB", "AB", "BC", "AC", "BC", "AC"), 
-                  "vasaucA" = c( 51.0,  42.0,  54.0, NA, NA,  16.5,  58.5, 129.0, NA,  52.5, NA,  23.5,  98.0, NA, 177.0,  67.0, NA,  55.0,  79.5,   3.5, NA,  33.0,   9.5, NA,  47.5,  66.5, NA,  85.5, NA, 143.5), 
-                  "vasaucB" = c(NA,  35.0,  62.0,  64.0,  80.5, NA,  33.5, NA,  59.0, NA,  32.5,  13.0, 120.0, 102.0, 166.5, NA, 138.0, NA, NA, NA, 161.5,  53.5,  13.5, 116.5,  68.0, 104.5, 103.0, NA,  36.0, NA), 
-                  "vasaucC" = c( 48.5, NA, NA,  65.0,  94.5,  19.5, NA, 102.0,  56.5,  78.5,  18.0, NA, NA,  14.0, NA,  51.0, 168.5,  10.0,  28.0,   3.5, 127.0, NA, NA,  36.5, NA, NA,  33.5,  45.0,   7.5, 132.0))
-
-level.Id <- sort(as.numeric(as.character(dfW$id)))
-
-dfW$id <- factor(dfW$id, levels = level.Id)
-dfW$group <- as.factor(dfW$group)
-dfL <- reshape2::melt(dfW, id.vars = c("id","group"),
-                      measure.vars = c("vasaucA","vasaucB","vasaucC"),
-                      value.name = "vasauc",
-                      variable.name = "treatment")
-dfL <- dfL[order(dfL$id, dfL$treatment),]
-dfL$treatment <- gsub("vasauc","",dfL$treatment)
-dfL$treatment <- as.factor(dfL$treatment)
-dfL$treatment.num <- as.numeric(dfL$treatment)
-
-
-dfL2 <- dfL
-dfL2$id <- as.character(dfL2$id)
-dfL2[dfL2$id == "2","id"] <- "0"
-dfL2[dfL2$id == "1","id"] <- "2"
-dfL2[dfL2$id == "0","id"] <- "1"
-dfL2$id <- factor(dfL2$id, levels = level.Id)
-dfL2 <- dfL2[order(dfL2$id,dfL2$treatment),]
-
-
-## ** fit model
-e.gls <- gls(vasauc ~ treatment,
-             correlation = corSymm(form =~ treatment.num | id),
-             weights = varIdent(form =~ 1|treatment),
-             na.action = na.omit,
-             data = dfL)
-logLik(e.gls)
-
-e.gls2 <- gls(vasauc ~ treatment,
-             correlation = corSymm(form =~ treatment.num | id),
-             weights = varIdent(form =~ 1|treatment),
-             na.action = na.omit,
-             data = dfL2)
-logLik(e.gls2)
-
-## ** extract covariance matrix
-Sigma <- unname(getVarCov2(e.gls)$Omega)
-Sigma2 <- unname(getVarCov2(e.gls2)$Omega)
-
-expect_equal(Sigma, Sigma2, tol = 1e-5)
-## allcoef <- lavaSearch2:::.coef2.gls(e.gls)
-## sigmaBase <- allcoef["sigma2"] * c(A=1,allcoef["B"],allcoef["C"])
-
-## AB
-expect_equal(Sigma[c(1,2),c(1,2)],
-             unclass(nlme::getVarCov(e.gls2, individual = 1)),
-             tol = 1e-5)
-expect_equal(Sigma[c(1,2),c(1,2)],
-             unclass(nlme::getVarCov(e.gls, individual = 2)),
-             tol = 1e-5)
-## sqrt(sigmaBase["A"] * sigmaBase["B"]) * allcoef["corCoefAB"]
-
-## AC
-expect_equal(Sigma[c(1,3),c(1,3)],
-             unclass(nlme::getVarCov(e.gls2, individual = 2)),
-             tol = 1e-5)
-expect_equal(Sigma[c(1,3),c(1,3)],
-             unclass(nlme::getVarCov(e.gls, individual = 1)),
-             tol = 1e-5)
-## sqrt(sigmaBase["A"] * sigmaBase["C"]) * allcoef["corCoefAC"]
-
-## BC
-expect_equal(Sigma[c(2,3),c(2,3)],
-             unclass(nlme::getVarCov(e.gls2, individual = 4)),
-             tol = 1e-5)
-expect_equal(Sigma[c(2,3),c(2,3)],
-             unclass(nlme::getVarCov(e.gls, individual = 4)),
-             tol = 1e-5)
-## sqrt(sigmaBase["B"] * sigmaBase["C"]) * allcoef["corCoefBC"]
-
-
-## * 2 random effect model (error)
-e.lme <- nlme::lme(value ~ time + G + Gender,
-                   random=~1|Id/Gender,
-                   data = dL,
-                   method = "ML")
-
-expect_error(getVarCov2(e.lme))
-
-## * PET dataset
-
-df.PET <- data.frame("ID" = c( 925, 2020, 2059, 2051, 2072, 2156, 2159, 2072, 2020, 2051, 2231,
-                              2738, 2231, 2777,  939,  539, 2738, 2777,  925, 2156, 2159, 2059), 
-                     "session" = c("V", "V", "V", "V", "V", "V", "V", "C", "C", "C", "C",
-                                   "C", "V", "C", "C", "V", "V", "V", "C", "C", "C", "C"), 
-                     "PET" = c(-2.53, -6.74, -8.17, -2.44, -3.54, -1.27, -0.55, -0.73, -1.42,  3.35,
-                               -2.11,  2.60, -4.52,  0.99, -1.02, -1.78, -5.86,  1.20, NA, NA, NA, NA)
-                     )
-df.PET$session.index <- as.numeric(as.factor(df.PET$session))
-
-
-e.lme <- lme(PET ~ session,
-             random = ~ 1 | ID,
-             weights = varIdent(form=~session.index|session),
-             na.action = "na.omit",
-             data = df.PET)
-test_that("getVarCov2 - NA", {
-    expect_equal(matrix(c( 7.893839, 1.583932, 1.583932, 4.436933), 2, 2),
-                 unname(getVarCov2(e.lme)$Omega), tol = 1e-6, scale = 1)
-})
-
-
-##----------------------------------------------------------------------
-### test-Utils-nlme.R ends here
diff --git a/tests/testthat/test1-adjustMoment.R b/tests/testthat/test1-adjustMoment.R
deleted file mode 100644
index af40c2b..0000000
--- a/tests/testthat/test1-adjustMoment.R
+++ /dev/null
@@ -1,144 +0,0 @@
-### test1-adjustMoment.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar 27 2018 (09:50) 
-## Version: 
-## Last-Updated: jul 16 2018 (16:49) 
-##           By: Brice Ozenne
-##     Update #: 22
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-
-## * header
-## rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-lava.options(symbols = c("~","~~"))
-
-.adjustMoment <- lavaSearch2:::.adjustMoment
-context("adjustMoment")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
-           measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
-dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
-
-## * multiple linear regression
-## ** no constrains
-e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
-
-test_that("linear regression - no constrains",{
-    Omega <- getVarCov2(e.lvm)
-    e.lvm$conditionalMoment <- conditionalMoment(e.lvm,
-                                                 data = d,
-                                                 param = coef(e.lvm),
-                                                 first.order = TRUE,
-                                                 second.order = FALSE,
-                                                 usefit = TRUE)    
-    solution <- .adjustMoment(e.lvm, Omega = Omega)
-
-    expect_equal(solution$param, coef(e.lvm)[names(solution$param)], tol = 1e-8)
-})
-
-## ** constrains and covariance link
-m <- lvm(Y1[mu1:sigma]~X1,
-         Y2[mu2:sigma]~X2,Y3~X1+X3,
-         Y2~~Y3)
-e.lvm <- estimate(m, data = d)
-
-test_that("linear regression - constrains and covariance",{
-    Omega <- getVarCov2(e.lvm)
-    e.lvm$conditionalMoment <- conditionalMoment(e.lvm,
-                                                 data = d,
-                                                 param = coef(e.lvm),
-                                                 first.order = TRUE,
-                                                 second.order = FALSE,
-                                                 usefit = TRUE)    
-    solution <- .adjustMoment(e.lvm, Omega = Omega)
-
-    expect_equal(solution$param, coef(e.lvm)[names(solution$param)], tol = 1e-8)
-})
-
-## * factor model
-m <- lvm(Y1~eta,
-         Y2~eta+X2,
-         Y3~eta,
-         Z1~eta, Z1~~Y1,Z1~~Y2,
-         eta~X1+X3)
-e.lvm <- estimate(m, d)
-
-test_that("factor model",{
-    Omega <- getVarCov2(e.lvm)
-    e.lvm$conditionalMoment <- conditionalMoment(e.lvm,
-                                                 data = d,
-                                                 param = coef(e.lvm),
-                                                 first.order = TRUE,
-                                                 second.order = FALSE,
-                                                 usefit = TRUE)    
-    solution <- .adjustMoment(e.lvm, Omega = Omega)
-
-    expect_equal(solution$param, coef(e.lvm)[names(solution$param)], tol = 1e-8)
-})
-
-## * two factor model
-## ** correlation
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3,
-           eta1~eta2))
-
-e.lvm <- estimate(m, d)
-
-test_that("two factor model - correlation",{
-    Omega <- getVarCov2(e.lvm)
-    e.lvm$conditionalMoment <- conditionalMoment(e.lvm,
-                                                 data = d,
-                                                 param = coef(e.lvm),
-                                                 first.order = TRUE,
-                                                 second.order = FALSE,
-                                                 usefit = TRUE)    
-    solution <- .adjustMoment(e.lvm, Omega = Omega)
-
-    expect_equal(solution$param, coef(e.lvm)[names(solution$param)], tol = 1e-8)
-})
-
-## ** covariance
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3,
-           eta1~~eta2))
-
-e.lvm <- estimate(m, d)
-
-test_that("two factor model - covariance",{
-    Omega <- getVarCov2(e.lvm)
-    e.lvm$conditionalMoment <- conditionalMoment(e.lvm,
-                                                 data = d,
-                                                 param = coef(e.lvm),
-                                                 first.order = TRUE,
-                                                 second.order = FALSE,
-                                                 usefit = TRUE)    
-    solution <- .adjustMoment(e.lvm, Omega = Omega)
-
-    expect_equal(solution$param, coef(e.lvm)[names(solution$param)], tol = 1e-8)
-})
-
-##
-##----------------------------------------------------------------------
-### test1-adjustMoment.R ends here
diff --git a/tests/testthat/test1-cluster-lava.R b/tests/testthat/test1-cluster-lava.R
deleted file mode 100644
index 124caf8..0000000
--- a/tests/testthat/test1-cluster-lava.R
+++ /dev/null
@@ -1,60 +0,0 @@
-### test1-cluster.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: apr 26 2018 (10:37) 
-## Version: 
-## Last-Updated: feb 11 2019 (11:03) 
-##           By: Brice Ozenne
-##     Update #: 6
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## Check that the robust standard error of lavaSearch2 matches those of lava
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-
-lava.options(symbols = c("~","~~"))
-context("cluster argument (replicate lava results)")
-
-## * simulation
-n <- 2e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){round(runif(n = n, min = 0, max = 30))}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-table(d$Id)
-
-## * linear regression [lvm]
-## ** model fit and sCorrect
-test_that("lm - robust vcov",{
-    e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
-    sCorrect(e.lvm) <- FALSE
-    eR.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d, cluster ="Id")
-    
-    n.Id <- length(unique(d$Id))
-    GS <- vcov(eR.lvm)/(n.Id/(n.Id-1))
-    test <- crossprod(iid2(e.lvm, cluster = "Id"))
-    expect_true(all(abs(GS/test-1)<1e-2))
-    ## GS/test
-
-    summary(eR.lvm)$coef
-    compare2(e.lvm, par = c("Y1~X1"),
-             robust = TRUE, cluster = "Id")
-##    compare2(e.lvm, par = c("Y1~X1"))
-})
-
-######################################################################
-### test1-cluster.R ends here
diff --git a/tests/testthat/test1-sCorrect-dVcov.R b/tests/testthat/test1-sCorrect-dVcov.R
deleted file mode 100644
index fbca33f..0000000
--- a/tests/testthat/test1-sCorrect-dVcov.R
+++ /dev/null
@@ -1,504 +0,0 @@
-### test1-sCorrect-dVcov.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar  8 2018 (14:56) 
-## Version: 
-## Last-Updated: feb 11 2019 (13:43) 
-##           By: Brice Ozenne
-##     Update #: 114
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-## rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-
-lava.options(symbols = c("~","~~"))
-library(nlme)
-context("sCorrect (dVcov-SatterthwaiteCorrection)")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-              Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
-                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
-dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
-
-## * linear regression [lm,gls,lvm]
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
-e.lm <- lm(Y1~X1+X2+Gender, data = d)
-e.gls <- gls(Y1~X1+X2+Gender, data = d, method = "ML")
-
-## ** check dVcov
-test_that("linear regression: Satterthwaite", {
-    X <- model.matrix(e.lm)
-    sigma2 <- coef(e.lvm)["Y1~~Y1"]
-    dI <- bdiag(crossprod(X)/sigma2^2,n/(sigma2^3))
-    vcov <- solve(bdiag(crossprod(X)/sigma2,n/(2*sigma2^2)))
-    GS <- vcov %*% dI %*% vcov
-
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(as.double(test.lvm), as.double(GS))
-
-    test.lm <- sCorrect(e.lm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lm <- sCorrect(e.lm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lm, GS.lm)
-    expect_equal(as.double(test.lm), as.double(GS))
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE, cluster = "Id")$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE, cluster = "Id")$dVcov.param
-    expect_equal(test.gls, GS.gls)
-    expect_equal(as.double(test.gls), as.double(GS))
-})
-
-## ** check dVcov (Satterthwaite + small sample correct) [should be moved to another file]
-test_that("linear regression: Satterthwaite + small sample correction (dVcov))", {
-    X <- model.matrix(e.lm)
-    sigma2 <- sigma(e.lm)^2
-    dI <- bdiag(crossprod(X)/sigma2^2,(n-NCOL(X))/(sigma2^3))
-    vcov <- solve(bdiag(crossprod(X)/sigma2,(n-NCOL(X))/(2*sigma2^2)))
-    GS <- vcov %*% dI %*% vcov
-
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = TRUE, adjust.n = TRUE, numeric.derivative = FALSE)$dVcov.param
-    expect_equal(as.double(test.lvm), as.double(GS), tol = 1e-5)
-
-    test.lm <- sCorrect(e.lm, adjust.Omega = TRUE, adjust.n = TRUE, numeric.derivative = FALSE)$dVcov.param
-    expect_equal(as.double(test.lm), as.double(GS), tol = 1e-5)
-    test.gls <- sCorrect(e.gls, adjust.Omega = TRUE, adjust.n = TRUE, numeric.derivative = FALSE, cluster = "Id")$dVcov.param
-    expect_equal(as.double(test.gls), as.double(GS), tol = 1e-5)
-})
-
-## * linear regression with constrains [lvm]
-## ** model fit
-e.lvm <- estimate(lvm(Y1[0:2]~X1+1*X2), data = d)
-
-e.lvm2 <- estimate(lvm(Y1~beta*X1+beta*X2), d)
-
-## ** check dVcov
-test_that("linear regression with constrains: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-
-    test.lvm2 <- sCorrect(e.lvm2, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm2 <- sCorrect(e.lvm2, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm2, GS.lvm2)    
-})
-
-## * multiple linear regression [lvm,gls]
-## ** model fit
-ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
-e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
-
-e.lvm2 <- estimate(lvm(Y1[mu:sigma1]~ beta1*X1 + beta2*X2,
-                       Y2[mu:sigma2]~ beta1*X1 + beta2*X2,
-                       Y3[mu:sigma3]~ beta1*X1 + beta2*X2),
-                       data = d)
-e.gls <- gls(value ~ X1 + X2,
-             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
-             weight = varIdent(form = ~1|variable),
-             method = "ML")
-
-test_that("gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm2)), as.double(logLik(e.gls)))
-})
-
-## ** check dVcov (Satterthwaite)
-test_that("multiple linear regression: Satterthwaite (dVcov)", {
-    X <- lapply(ls.lm, model.matrix)
-    sigma2 <- list(coef(e.lvm)["Y1~~Y1"],
-                   coef(e.lvm)["Y2~~Y2"],
-                   coef(e.lvm)["Y3~~Y3"])
-    dI <- mapply(X,sigma2, FUN = function(x,y){
-        bdiag(crossprod(x)/y^2,n/(y^3))
-    })
-    vcov <- mapply(X,sigma2, FUN = function(x,y){
-        solve(bdiag(crossprod(x)/y,n/(2*y^2)))
-    })
-    GS <- mapply(vcov, dI, FUN = function(x,y){
-        x %*% y %*% x
-    })
-    name.coef.lvm <- names(coef(e.lvm))
-    
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(as.double(test.lvm[grep("Y1",name.coef.lvm),grep("Y1",name.coef.lvm),"Y1~~Y1"]),
-                 as.double(GS[[1]]))
-    expect_equal(as.double(test.lvm[grep("Y2",name.coef.lvm),grep("Y2",name.coef.lvm),"Y2~~Y2"]),
-                 as.double(GS[[2]]))
-    expect_equal(as.double(test.lvm[grep("Y3",name.coef.lvm),grep("Y3",name.coef.lvm),"Y3~~Y3"]),
-                 as.double(GS[[3]]))
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE,
-                         numeric.derivative = FALSE, cluster = "Id")$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE,
-                       numeric.derivative = TRUE, cluster = "Id")$dVcov.param
-    expect_equal(test.gls, GS.gls)    
-})
-
-## ** multiple linear regression (Satterthwaite + small sample correct) [should be moved to another file]
-test_that("multiple linear regression: Satterthwaite + small sample correction (dVcov)", {
-    X <- lapply(ls.lm, model.matrix)
-    sigma2 <- lapply(ls.lm, function(x){sigma(x)^2})
-    dI <- mapply(X,sigma2, FUN = function(x,y){
-        bdiag(crossprod(x)/y^2,(n-NCOL(x))/(y^3))
-    })
-    vcov <- mapply(X,sigma2, FUN = function(x,y){
-        solve(bdiag(crossprod(x)/y,(n-NCOL(x))/(2*y^2)))
-    })
-    GS <- mapply(vcov, dI, FUN = function(x,y){
-        x %*% y %*% x
-    })
-    name.coef.lvm <- names(coef(e.lvm))
-
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = TRUE, adjust.n = TRUE, numeric.derivative = FALSE)$dVcov.param
-    expect_equal(as.double(test.lvm[grep("Y1",name.coef.lvm),grep("Y1",name.coef.lvm),"Y1~~Y1"]),
-                 as.double(GS[[1]]))
-    expect_equal(as.double(test.lvm[grep("Y2",name.coef.lvm),grep("Y2",name.coef.lvm),"Y2~~Y2"]),
-                 as.double(GS[[2]]))
-    expect_equal(as.double(test.lvm[grep("Y3",name.coef.lvm),grep("Y3",name.coef.lvm),"Y3~~Y3"]),
-                 as.double(GS[[3]]))
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = TRUE, adjust.n = TRUE, numeric.derivative = FALSE, cluster = "Id")$dVcov.param
-    
-})
-
-## * multiple linear regression with constrains [lvm]
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+1*X2,Y2~2*X3+2*X1,Y3~X2), data = d)
-
-## ** check dVcov
-test_that("multiple linear regression with constrains: Satterthwaite", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-})
-
-
-## * multiple linear regression with covariance links [lvm]
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+X2,Y2~X3+X1,Y3~X2,Y1~~Y2),d)
-
-## ** check dVcov
-test_that("multiple linear regression with covariance: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-})
-
-
-## * mixed model: Compound symmetry [lvm,gls,lme]
-## ** model fit
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corCompSymm(form=~ 1|Id),
-             data = dLred,
-             method = "ML")
-
-test_that("compound symmetry: lme/gls equivalent to lvm",{
-    expect_equal(as.double(logLik(e.lme)),as.double(logLik(e.lvm)))
-    expect_equal(as.double(logLik(e.gls)),as.double(logLik(e.lvm)))
-})
-
-
-## ** check dVcov
-test_that("compound symmetry: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.043665672, 0.001141085, 0.086044394),
-                 tol = 1e-8) ## compare with previous version
-   
-    test.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lme, GS.lme)
-   
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.gls, GS.gls)
-})
-
-## * mixed model: CS with different variances [lvm,lme]
-## ** model fit 
-m <- lvm(c(Y1[mu1:sigma1]~1*eta,
-           Y2[mu2:sigma2]~1*eta,
-           Y3[mu3:sigma3]~1*eta,
-           eta~X1+Gender))
-latent(m) <- ~eta
-e.lvm <- estimate(m, d)
-
-e.lme <- nlme::lme(value ~ variable + X1 + Gender,
-                   random =~1| Id,
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-e.gls <- nlme::gls(value ~ variable + X1 + Gender,
-                   correlation = corCompSymm(form = ~1| Id),
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-test_that("lme equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    ## gls does not give the same likelihood
-    ## expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-## ** check dVcov
-test_that("compound symmetry with different variances: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm) ## internal consistency
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.043665672, 0.001163405, 0.158962412),
-                 tol = 1e-8) ## compare with previous version
-    
-    test.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lme, GS.lme)
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.gls, GS.gls)
-})
-
-
-## * mixed model: Unstructured [lvm,gls,lme]
-## ** model fit 
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             correlation = corSymm(),
-             ## weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corSymm(form=~ 1|Id),
-             ## weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-## ** check dVcov
-test_that("Unstructured: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.069511713, 0.001092602, 0.129487510),
-                 tol = 1e-8) ## compare with previous version
-    ## ERROR: inversion?
-    ## test.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    ## GS.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    ## expect_equal(test.lme, GS.lme)
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.gls, GS.gls)
-})
-
-
-## * mixed model: Unstructured with weights [lvm,gls,lme]
-## ** model fit
-m <- lvm(Y1~1*eta,
-         Y2~1*eta,
-         Y3~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             correlation = corSymm(),
-             weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corSymm(form=~ 1|Id),
-             weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-## ** check dVcov
-test_that("Unstructured with different variances: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.08912488, 0.00188713, 0.18430283),
-                 tol = 1e-8) ## compare with previous version
-    ## summary2(e.lvm)
-    
-    ## ERROR: the model is overparametrized
-    ## test.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    ## GS.lme <- sCorrect(e.lme, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    ## expect_equal(test.lme, GS.lme)
-
-    test.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.gls <- sCorrect(e.gls, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.gls, GS.gls)
-})
-
-
-## * LVM: factor model
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1))
-regression(m) <- eta1~X1+X2
-
-e.lvm <- estimate(m,d)
-
-## ** check dVcov
-test_that("factor model: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.297105651, 0.001184003, 0.360920233),
-                 tol = 1e-8) ## compare with previous version
-})
-
-
-
-## * LVM: factor model with constrains
-## ** model fit
-e.lvm <- estimate(lvm(Y1~1*eta+1*X2,Y2~1*eta,Y3~1*eta),
-                  data = d)
-
-e.lvm2 <- estimate(lvm(Y1~1*eta+X2,
-                       Y2~lambda*eta+X2,
-                       Y3~lambda*eta,
-                       eta ~ beta*X2+beta*X1),
-                   data = d)
-
-## ** check dVcov
-test_that("factor model: Satterthwaite", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.035030251, 0.004314892, 0.289075943),
-                 tol = 1e-8) ## compare with previous version    
-})
-
-## * LVM: 2 factor model
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta1~X1+X2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-
-## ** check dVcov
-test_that("2 factor model: Satterthwaite", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-1.407457972, 0.000378982, 1.299890022),
-                 tol = 1e-8) ## compare with previous version   
-})
-
-## * LVM: 2 factor model with constrains
-## ** model fit
-m <- lvm(Y1~1*eta1+X2,Y2~lambda*eta1+X2,Y3~lambda*eta1,eta1 ~ beta*X2+beta*X1,
-         Z1~0+eta2,Z2~lambda*eta2,Z3~eta2)
-e.lvm <- estimate(m, d)
-e2.lvm <- e.lvm
-
-## ** check dVcov
-test_that("2 factor model with constrains: Satterthwaite", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.9719462803, 0.0005765745, 1.0173093948),
-                 tol = 1e-8) ## compare with previous version   
-})
-
-## * LVM: 2 factor model (covariance)
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-covariance(m) <- eta1 ~ eta2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-
-## ** check dVcov
-test_that("2 factor model with covariance: Satterthwaite", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-1.0091216732, 0.0004489561, 1.0301898081),
-                 tol = 1e-8) ## compare with previous version   
-})
-
-## * LVM: 2 factor model (correlation LV)
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta2 ~ X1
-regression(m) <- eta1 ~ eta2+X2+X3
-
-e.lvm <- estimate(m,d)
-
-## ** check dVcov
-test_that("2 factor model with correlation: Satterthwaite (dVcov)", {
-    test.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = FALSE)$dVcov.param
-    GS.lvm <- sCorrect(e.lvm, adjust.Omega = FALSE, adjust.n = FALSE, numeric.derivative = TRUE)$dVcov.param
-    expect_equal(test.lvm, GS.lvm)
-    expect_equal(c(min(test.lvm),mean(test.lvm),max(test.lvm)),
-                 c(-0.7338483344, 0.0002132907, 0.8107582027),
-                 tol = 1e-8) ## compare with previous version
-})
diff --git a/tests/testthat/test1-sCorrect-effects2.R b/tests/testthat/test1-sCorrect-effects2.R
new file mode 100644
index 0000000..ca35b97
--- /dev/null
+++ b/tests/testthat/test1-sCorrect-effects2.R
@@ -0,0 +1,81 @@
+### test1-sCorrect-effects2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: jan 19 2022 (11:40) 
+## Version: 
+## Last-Updated: jan 19 2022 (11:57) 
+##           By: Brice Ozenne
+##     Update #: 3
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * header
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R
+    library(testthat)
+    library(lavaSearch2)
+}
+
+lava.options(symbols = c("~","~~"))
+context("sCorrect (effects2)")
+
+## * simulation
+n <- 5e1
+mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
+              Z1~eta2,Z2~eta2,Z3~eta2+X3))
+regression(mSim) <- eta1~X1+Gender
+latent(mSim) <- ~eta1+eta2
+categorical(mSim, labels = c("Male","Female")) <- ~Gender
+transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
+set.seed(10)
+d <- lava::sim(mSim, n = n, latent = FALSE)
+
+## * effects2
+m <- lvm(Y1~eta1,Y2~eta1+X2,Y3~eta1,
+         Z1~eta2+X2,Z2~eta2,Z3~eta2,
+         eta2~eta1+X2)
+
+test_that("LM - effects2 correction", {
+
+    e <- estimate(lvm(Y1~X2+Z1,Z1~X2), d)
+    e2 <- estimate2(e)
+
+    test <- effects2(e2, linfct = c("Y1~X2|total","Y1~X2|direct","Y1~X2|indirect"))
+    expect_equal(summary(test, test = adjusted("none"))$table2$df,
+                 c(51.82375, 47.00000, 48.04664), tol = 1e-3)
+})
+
+test_that("LVM - effects2 no correction", {
+
+    e <- estimate(m, d)
+    e0 <- estimate2(e, ssc = FALSE, df = FALSE) ## no correction
+
+    GS <- effects(e, from = "X2", to = "Z1")
+    test1 <- effects(e0, linfct = "Z1~X2")
+    test3 <- effects(e0, linfct = c("Z1~X2|total","Z1~X2|direct","Z1~X2|indirect"))
+
+    expect_equal(as.double(GS$coef[c("total","direct","indirect")]),
+                 summary(test3, test = adjusted("none"))$table2[,"estimate"],
+                 tol = 1e-6)
+    expect_equal(as.double(summary(test1, test = adjusted("none"))$table2[1,]),
+                 as.double(summary(test3, test = adjusted("none"))$table2[1,]),
+                 tol = 1e-6)
+    expect_equal(as.double(sqrt(diag(GS$vcov[1:3,1:3]))),
+                 summary(test3, test = adjusted("none"))$table2[,"se"],
+                 tol = 1e-6)
+
+
+    e2 <- estimate2(e) ## correction
+    test12 <- effects(e2, linfct = "Z1~X2")
+    test32 <- effects(e2, linfct = c("Z1~X2|total","Z1~X2|direct","Z1~X2|indirect"))
+})
+
+
+##----------------------------------------------------------------------
+### test1-sCorrect-effects2.R ends here
diff --git a/tests/testthat/test1-sCorrect-lava.R b/tests/testthat/test1-sCorrect-lava.R
deleted file mode 100644
index 4ee3525..0000000
--- a/tests/testthat/test1-sCorrect-lava.R
+++ /dev/null
@@ -1,835 +0,0 @@
-### test1-sCorrect-lava.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar  6 2018 (10:40) 
-## Version: 
-## Last-Updated: apr  4 2018 (14:20) 
-##           By: Brice Ozenne
-##     Update #: 137
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## source("c:/Users/hpl802/Documents/GitHub/lavaSearch2/tests/testthat/test1-iid2-lava.R")
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-
-lava.options(symbols = c("~","~~"))
-.coef2 <- lavaSearch2:::.coef2
-library(nlme)
-context("sCorrect (replicate lava results)")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
-           measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
-dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
-
-## * linear regression [lm,gls,lvm]
-## ** model fit and sCorrect
-e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
-e.lm <- lm(Y1~X1+X2+Gender, data = d)
-e.gls <- gls(Y1~X1+X2+Gender, data = d, method = "ML")
-
-e2.lvm <- e.lvm
-e2.gls <- e.gls
-e2.lm <- e.lm
-
-sCorrect(e2.lvm) <- FALSE
-sCorrect(e2.gls, cluster = 1:n) <- FALSE
-sCorrect(e2.lm) <- FALSE
-
-## ** check score, iid, residuals, vcov, compare2 at ML
-test_that("linear regression (at ML) internal consistency",{
-    expect_equivalent(e2.lvm$sCorrect$Omega,e2.lm$sCorrect$Omega)
-    expect_equivalent(e2.gls$sCorrect$Omega,e2.lm$sCorrect$Omega)
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param,e2.lm$sCorrect$vcov.param)
-    expect_equivalent(e2.gls$sCorrect$vcov.param,e2.lm$sCorrect$vcov.param)
-
-    expect_equivalent(e2.lvm$sCorrect$leverage,e2.lm$sCorrect$leverage)
-    expect_equivalent(e2.gls$sCorrect$leverage,e2.lm$sCorrect$leverage)
-
-    expect_equivalent(e2.lvm$sCorrect$score,e2.lm$sCorrect$score)
-    expect_equivalent(e2.gls$sCorrect$score,e2.lm$sCorrect$score)
-
-    expect_equivalent(e2.lvm$sCorrect$epsilon,e2.lm$sCorrect$epsilon)
-    expect_equivalent(e2.gls$sCorrect$epsilon,e2.lm$sCorrect$epsilon)
-
-    expect_equivalent(e2.lvm$sCorrect$dVcov.param,e2.lm$sCorrect$dVcov.param)
-    expect_equivalent(e2.gls$sCorrect$dVcov.param,e2.lm$sCorrect$dVcov.param)    
-
-    expect_equivalent(iid2(e2.gls), iid2(e2.lvm))    
-    expect_equivalent(iid2(e2.lm), iid2(e2.lvm))    
-})
-
-test_that("linear regression (at ML) compare to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(e2.lvm$sCorrect$n.corrected==e.lvm$data$n)
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-
-    GS <- iid(e.lm)
-    expect_equivalent(iid2(e2.lvm)[,1:length(colnames(GS))], GS)
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-
-    ## NOTE: iid in lava uses numerical derivative to compute the information matrix
-    ## this is why there is not a perfect matching between iid2.lvm and iid.lvm
-    
-    ## expect_equal(as.double(iid2(e2.lvm)), as.double(iid(e.lvm)))
-    
-    ## Error: as.double(iid2(e2.lvm)) not equal to as.double(iid(e.lvm)).
-    ## 245/250 mismatches (average diff: 1.46e-06)
-    ## [1] -0.01755 - -0.01756 ==  6.95e-07
-    ## [2] -0.02036 - -0.02037 ==  6.63e-07
-    ## [3]  0.04413 -  0.04413 ==  4.34e-07
-    ## [4]  0.00615 -  0.00615 == -9.30e-07
-    ## [5]  0.00236 -  0.00236 ==  2.20e-07
-    ## [6]  0.01338 -  0.01338 ==  7.35e-07
-    ## [7] -0.00482 - -0.00482 == -4.08e-08
-    ## [8]  0.04877 -  0.04877 ==  3.21e-07
-    ## [9] -0.00811 - -0.00811 == -1.35e-06 
-
-    expect_true(all(leverage2(e2.lvm) == 0))
-
-    C1 <- compare2(e2.lvm, par = c("Y1~X1","Y1~X2"))
-    C2 <- lava::compare(e.lvm, par = c("Y1~X1","Y1~X2"))
-    expect_equal(unname(C1$statistic),
-                 unname(C2$statistic/NROW(C1$estimate))
-                 )
-
-})
-
-## ** check score not at ML
-S1 <- score2(e2.lvm, param = coef(e2.lvm)+1)
-S2 <- score2(e.lvm, param = coef(e.lvm)+1, bias.correct = FALSE)
-
-S3 <- score2(e2.lm, param = .coef2(e2.gls)+1) ## not .coef2(e2.lm) because different estimate of the variance
-S4 <- score2(e.lm, param = .coef2(e.gls)+1, bias.correct = FALSE)
-
-S5 <- score2(e2.gls, param = .coef2(e2.gls)+1)
-S6 <- score2(e.gls, param = .coef2(e.gls)+1, cluster = 1:n, bias.correct = FALSE)
-
-GS <- score(e.lvm, p = coef(e.lvm)+1, indiv = TRUE)
-
-test_that("linear regression (at ML + 1) compare to lava",{
-    expect_equal(S1, GS)
-    expect_equal(S2, GS)
-    
-    expect_equal(as.double(S3), as.double(GS))
-    expect_equal(as.double(S4), as.double(GS))
-    expect_equal(as.double(S5), as.double(GS))
-    expect_equal(as.double(S6), as.double(GS))
-})
-
-p <- length(coef(e.lvm))
-S1 <- score2(e2.lvm, param = coef(e2.lvm)+1:p)
-S2 <- score2(e.lvm, param = coef(e.lvm)+1:p, bias.correct = FALSE)
-
-S3 <- score2(e2.lm, param = .coef2(e2.gls)+1:p) ## not .coef2(e2.lm) because different estimate of the variance
-S4 <- score2(e.lm, param = .coef2(e.gls)+1:p, bias.correct = FALSE)
-
-S5 <- score2(e2.gls, param = .coef2(e2.gls)+1:p)
-S6 <- score2(e.gls, param = .coef2(e.gls)+1:p, cluster = 1:n, bias.correct = FALSE)
-GS <- score(e.lvm, p = coef(e.lvm)+1:p, indiv = TRUE)
-
-test_that("linear regression (at ML + 1:p) compare to lava",{
-    
-    expect_equal(S1, GS)
-    expect_equal(S2, GS)
-    
-    expect_equal(as.double(S3), as.double(GS))
-    expect_equal(as.double(S4), as.double(GS))
-    expect_equal(as.double(S5), as.double(GS))
-    expect_equal(as.double(S6), as.double(GS))
-    
-})
-
-
-## * linear regression with constrains [lvm]
-## ** model fit and sCorrect
-e.lvm <- estimate(lvm(Y1[0:2]~X1+1*X2), data = d)
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-e.lvm2 <- estimate(lvm(Y1~beta*X1+beta*X2), d)
-
-e2.lvm2 <- e.lvm2
-sCorrect(e2.lvm2) <- FALSE
-
-## ** check score, iid, residuals, vcov, compare2 at ML
-test_that("linear regression: constrains",{
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(e2.lvm$sCorrect$n.corrected==e.lvm$data$n)
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-    
-    expect_equal(score2(e.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv = TRUE))
-})
-
-## * multiple linear regression [lvm,gls]
-## ** model fit and sCorrect
-ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
-e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
-
-e.lvm2 <- estimate(lvm(Y1[mu:sigma1]~ beta1*X1 + beta2*X2,
-                       Y2[mu:sigma2]~ beta1*X1 + beta2*X2,
-                       Y3[mu:sigma3]~ beta1*X1 + beta2*X2),
-                       data = d)
-e.gls <- gls(value ~ X1 + X2,
-             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
-             weight = varIdent(form = ~1|variable),
-             method = "ML")
-
-test_that("gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm2)), as.double(logLik(e.gls)))
-})
-
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-e2.lvm2 <- e.lvm2
-sCorrect(e2.lvm2) <- FALSE
-
-e2.gls <- e.gls
-sCorrect(e2.gls, cluster = "Id") <- FALSE
-
-## ** check score, iid, residuals, vcov, compare2 at ML
-
-test_that("multiple linear regression (at ML) internal consistency",{
-    param <- attr(e2.gls$sCorrect$param,"mean.coef")
-
-    expect_equal(unname(e2.lvm2$sCorrect$Omega),
-                 unname(e2.gls$sCorrect$Omega),
-                 tolerance = 1e-5)
-    expect_equal(unname(e2.lvm2$sCorrect$residuals),
-                 unname(e2.gls$sCorrect$residuals),
-                 tolerance = 1e-5)
-    expect_equal(unname(e2.lvm2$sCorrect$score[,c("Y1","Y1~X1","Y1~X2")]),
-                 unname(e2.gls$sCorrect$score[,c("(Intercept)","X1","X2")]),
-                 tolerance = 1e-5)
-})
-
-e2.lvm2$sCorrect$residuals[1:5,]
-e2.gls$sCorrect$residuals[1:5,]
-
-test_that("multiple linear regression (at ML) compare to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equal(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-
-    GS <- lapply(ls.lm,iid)
-    test <- iid2(e2.lvm)
-    expect_equivalent(test[,grep("^Y1$|^Y1~X",colnames(test))], GS[[1]])
-    expect_equivalent(test[,grep("^Y2$|^Y2~X",colnames(test))], GS[[2]])
-    expect_equivalent(test[,grep("^Y3$|^Y3~X",colnames(test))], GS[[3]])
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-    expect_true(all(leverage2(e2.lvm) == 0))
-
-    C1 <- compare2(e2.lvm, par = c("Y1~X1","Y2~X2","Y3~X1"))
-    C2 <- lava::compare(e.lvm, par = c("Y1~X1","Y2~X2","Y3~X1"))
-    expect_equal(unname(C1$statistic),
-                 unname(C2$statistic/NROW(C1$estimate))
-                 )
-})
-
-## ** check score not at ML
-test_that("multiple linear regression (at ML + 1) compare to lava",{
-    S1 <- score2(e2.lvm, param = coef(e2.lvm)+1)
-    S2 <- score2(e.lvm, param = coef(e.lvm)+1, bias.correct = FALSE)
-
-    GS <- score(e.lvm, p = coef(e.lvm)+1, indiv = TRUE)
-
-    expect_equal(S1, GS)
-    expect_equal(S2, GS)
-})
-
-test_that("multiple linear regression (at ML + 1:p) compare to lava",{
-    p <- length(coef(e.lvm))
-    S1 <- score2(e2.lvm, param = coef(e2.lvm)+1:p)
-    S2 <- score2(e.lvm, param = coef(e.lvm)+1:p, bias.correct = FALSE)
-
-    GS <- score(e.lvm, p = coef(e.lvm)+1:p, indiv = TRUE)
-    
-    expect_equal(S1, GS)
-    expect_equal(S2, GS)
-})
-
-## * multiple linear regression with constrains [lvm]
-## ** model fit and sCorrect
-e.lvm <- estimate(lvm(Y1~X1+1*X2,Y2~2*X3+2*X1,Y3~X2), data = d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE    
-
-## ** check score, residuals, vcov
-test_that("multiple linear regressions: constrains",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equal(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-    expect_true(all(leverage2(e2.lvm) == 0))
-
-    C1 <- compare2(e2.lvm, par = c("Y1~X1"))
-    C2 <- lava::compare(e.lvm, par = c("Y1~X1"))
-    expect_equal(unname(C1$statistic),
-                 unname(C2$statistic/NROW(C1$estimate))
-                 )
-})
-
-## * multiple linear regression with covariance links [lvm]
-## ** model fit and sCorrect
-e.lvm <- estimate(lvm(Y1~X1+X2,Y2~X3+X1,Y3~X2,Y1~~Y2),d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("multiple linear regression, covariance link (at ML)",{
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equal(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv=TRUE))
-    
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-    expect_true(all(leverage2(e2.lvm) == 0))
-})
-
-## ** check score not at ML
-test_that("multiple linear regression, covariance link (not at ML: +1)",{
-    expect_equal(score2(e.lvm, p = coef(e.lvm)+1, bias.correct = FALSE),
-                 score(e.lvm, p = coef(e.lvm)+1, indiv=TRUE))
-})
-
-test_that("multiple linear regression, covariance link (not at ML: +1:p)",{
-    newcoef <- coef(e.lvm)+0.1*(1:length(coef(e.lvm)))
-    expect_equal(score2(e.lvm, p = newcoef, bias.correct = FALSE),
-                 score(e.lvm, p = newcoef, indiv = TRUE))
-})
-
-## * mixed model: Compound symmetry [lvm,gls,lme]
-## ** model fit and sCorrect
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corCompSymm(form=~ 1|Id),
-             data = dLred,
-             method = "ML")
-
-test_that("compound symmetry: lme/gls equivalent to lvm",{
-    expect_equal(as.double(logLik(e.lme)),as.double(logLik(e.lvm)))
-    expect_equal(as.double(logLik(e.gls)),as.double(logLik(e.lvm)))
-})
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-e2.lme <- e.lme
-sCorrect(e2.lme) <- FALSE
-
-e2.gls <- e.gls
-sCorrect(e2.gls) <- FALSE
-
-
-## ** check score, residuals, vcov at ML
-test_that("compound symmetry (at ML) internal consistency",{
-    param.nlme <- names(coef(e.gls))
-    param.lava <- c("eta","Y2","Y3","eta~X1","eta~GenderFemale")
-    
-    expect_equal(unname(e2.lvm$sCorrect$Omega),unname(e2.lme$sCorrect$Omega), tol = 1e-5)
-    expect_equal(unname(e2.lme$sCorrect$Omega),unname(e2.gls$sCorrect$Omega), tol = 1e-5)
-
-    expect_equal(unname(e2.lvm$sCorrect$vcov.param[param.lava,param.lava]),
-                 unname(e2.lme$sCorrect$vcov.param[param.nlme,param.nlme]), tol = 1e-5)
-    expect_equal(unname(e2.gls$sCorrect$vcov.param[param.nlme,param.nlme]),
-                 unname(e2.lme$sCorrect$vcov.param[param.nlme,param.nlme]), tol = 1e-5)
-    
-    expect_equal(unname(e2.lvm$sCorrect$score[,param.lava]),
-                 unname(e2.lme$sCorrect$score[,param.nlme]), tol = 1e-5)
-    expect_equal(unname(e2.gls$sCorrect$score[,param.nlme]),
-                 unname(e2.lme$sCorrect$score[,param.nlme]), tol = 1e-5)
-
-    expect_equivalent(e2.lvm$sCorrect$residuals,e2.lme$sCorrect$residuals)
-    expect_equivalent(e2.gls$sCorrect$residuals,e2.lme$sCorrect$residuals)
-
-    expect_equivalent(e2.lvm$sCorrect$dVcov.param[param.lava,param.lava,"Y1~~Y1"],
-                      e2.lme$sCorrect$dVcov.param[param.nlme,param.nlme,"sigma2"])
-
-    expect_equivalent(iid2(e2.gls)[,param.nlme], iid2(e2.lme)[,param.nlme])    
-    expect_equivalent(iid2(e2.lme)[,param.nlme], iid2(e2.lvm)[,param.lava])    
-})
-
-test_that("compound symmetry (at ML) compare to lava",{
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equal(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv = TRUE))
-    
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-    expect_true(all(leverage2(e2.lvm) == 0))
-})
-
-
-## * mixed model: CS with different variances [lvm,lme]
-## ** model fit and sCorrect
-m <- lvm(c(Y1[mu1:sigma1]~1*eta,
-           Y2[mu2:sigma2]~1*eta,
-           Y3[mu3:sigma3]~1*eta,
-           eta~X1+Gender))
-latent(m) <- ~eta
-e.lvm <- estimate(m, d)
-
-e.lme <- nlme::lme(value ~ variable + X1 + Gender,
-                   random =~1| Id,
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-e.gls <- nlme::gls(value ~ variable + X1 + Gender,
-                   correlation = corCompSymm(form = ~1| Id),
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-test_that("lme equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    ## gls does not give the same likelihood
-    ## expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-e2.lme <- e.lme
-sCorrect(e2.lme) <- FALSE
-
-e2.gls <- e.gls
-sCorrect(e2.gls) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("compound symmetry with weights (at ML) internal consistency",{
-    param.nlme <- names(coef(e.gls))
-    param.lava <- c("eta","Y2","Y3","eta~X1","eta~GenderFemale")
-    
-    expect_equal(unname(e2.lvm$sCorrect$Omega),unname(e2.lme$sCorrect$Omega), tol = 1e-5)
-
-    expect_equal(unname(e2.lvm$sCorrect$vcov.param[param.lava,param.lava]),
-                 unname(e2.lme$sCorrect$vcov.param[param.nlme,param.nlme]), tol = 1e-5)
-    
-    expect_equal(unname(e2.lvm$sCorrect$score[,param.lava]),
-                 unname(e2.lme$sCorrect$score[,param.nlme]), tol = 1e-5)
-
-    expect_equal(unname(e2.lvm$sCorrect$residuals),
-                 unname(e2.lme$sCorrect$residuals), tol = 1e-5)
-})
-
-test_that("compound symmetry with weights (at ML) compare to lava",{
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equal(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv = TRUE))
-    
-    expect_equal(e2.lvm$sCorrect$residuals, residuals2(e2.lvm))
-    expect_true(all(leverage2(e2.lvm) == 0))
-})
-
-## * mixed model: Unstructured [lvm,gls,lme]
-## ** model fit and sCorrect
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             correlation = corSymm(),
-             ## weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corSymm(form=~ 1|Id),
-             ## weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## e2.lme <- e.lme
-## sCorrect(e2.lme) <- FALSE
-## does not work because the model is overparametrized
-
-e2.gls <- e.gls
-sCorrect(e2.gls) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("Unstructured (at ML) internal consistency",{
-    param.nlme <- names(coef(e.gls))
-    param.lava <- c("eta","Y2","Y3","eta~X1","eta~GenderFemale")
-
-    expect_equal(unname(e2.gls$sCorrect$Omega),unname(e2.lvm$sCorrect$Omega), tol = 1e-4)
-
-    expect_equal(unname(e2.lvm$sCorrect$vcov.param[param.lava,param.lava]),
-                 unname(e2.gls$sCorrect$vcov.param[param.nlme,param.nlme]), tol = 1e-4)
-    
-    expect_equal(unname(e2.lvm$sCorrect$score[,param.lava]),
-                 unname(e2.gls$sCorrect$score[,param.nlme]), tol = 1e-4)
-    
-    expect_equal(unname(e2.lvm$sCorrect$residuals),
-                 unname(e2.gls$sCorrect$residuals), tol = 1e-4)
-})
-
-test_that("Unstructured (at ML) compare to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-    
-})
-
-## * mixed model: Unstructured with weights [lvm,gls,lme]
-## ** model fit and sCorrect
-m <- lvm(Y1~1*eta,
-         Y2~1*eta,
-         Y3~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-e.lme <- lme(value ~ variable + X1 + Gender,
-             random =~ 1|Id,
-             correlation = corSymm(),
-             weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-e.gls <- gls(value ~ variable + X1 + Gender,
-             correlation = corSymm(form=~ 1|Id),
-             weights = varIdent(form =~ 1|variable),
-             data = dLred,
-             method = "ML")
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-e2.lme <- e.lme
-## sCorrect(e2.lme) <- FALSE
-## does not work because the model is overparametrized
-
-e2.gls <- e.gls
-sCorrect(e2.gls) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("Unstructured with weights (at ML) internal consistency",{
-    param.nlme <- names(coef(e.gls))
-    param.lava <- c("eta","Y2","Y3","eta~X1","eta~GenderFemale")
-
-    expect_equal(unname(e2.lvm$sCorrect$Omega),unname(e2.gls$sCorrect$Omega), tol = 1e-3)
-
-    expect_equal(unname(e2.lvm$sCorrect$vcov.param[param.lava,param.lava]),
-                 unname(e2.gls$sCorrect$vcov.param[param.nlme,param.nlme]), tol = 1e-3)
-    
-    expect_equal(unname(e2.lvm$sCorrect$score[,param.lava]),
-                 unname(e2.gls$sCorrect$score[,param.nlme]), tol = 1e-3)
-
-    expect_equal(unname(e2.lvm$sCorrect$residuals),
-                 unname(e2.gls$sCorrect$residuals), tol = 1e-3)
-
-})
-
-test_that("Unstructured with weights (at ML) compare to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-    
-})
-
-## * LVM: factor model
-## ** model fit and sCorrect
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1))
-regression(m) <- eta1~X1+X2
-
-e.lvm <- estimate(m,d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("factor model (at ML) compared to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-
-})
-
-## ** check score not at ML
-param <- coef(e.lvm)
-test <- score2(e.lvm, p = param+1, bias.correct = FALSE)
-GS <- score(e.lvm, p = param+1, indiv = TRUE)
-
-test_that("factor model (not at ML: +1)",{
-    expect_equal(test, GS)
-})
-
-test <- score2(e.lvm, p = param+0.1*(1:length(param)), bias.correct = FALSE)
-GS <- score(e.lvm, p = param+0.1*(1:length(param)), indiv=TRUE)
-test_that("factor model (not at ML: +1:p)",{
-    expect_equal(test, GS)
-})
-
-## * LVM: factor model with constrains
-## ** model fit and sCorrect
-e.lvm <- estimate(lvm(Y1~1*eta+1*X2,Y2~1*eta,Y3~1*eta),
-                  data = d)
-
-
-e.lvm2 <- estimate(lvm(Y1~1*eta+X2,
-                       Y2~lambda*eta+X2,
-                       Y3~lambda*eta,
-                       eta ~ beta*X2+beta*X1),
-                   data = d)
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-e2.lvm2 <- e.lvm2
-sCorrect(e2.lvm2) <- FALSE
-
-## ** check score at ML
-test_that("factor model: fixed coefficients",{
-    expect_equal(score2(e2.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv = TRUE))
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param,
-                      vcov(e.lvm))
-})
-
-test_that("factor model: constrains",{
-    expect_equal(score2(e2.lvm2, bias.correct = FALSE),
-                 score(e.lvm2, indiv = TRUE))
-
-    expect_equivalent(e2.lvm2$sCorrect$vcov.param,
-                      vcov(e.lvm2))
-})
-
-
-## * LVM: 2 factor model
-## ** model fit and sCorrect
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta1~X1+X2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("2 factor model (at ML) compared to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-
-})
-
-
-## ** check score not at ML
-param <- coef(e.lvm)
-test <- score2(e.lvm, p = param+1, bias.correct = FALSE)
-GS <- score(e.lvm, p = param+1, indiv = TRUE)
-
-test_that("2 factor model (not at ML: +1)",{
-    expect_equal(test, GS)
-})
-
-test <- score2(e.lvm, p = param+0.1*(1:length(param)), bias.correct = FALSE)
-GS <- score(e.lvm, p = param+0.1*(1:length(param)), indiv=TRUE)
-test_that("2 factor model (not at ML: +1:p)",{
-    expect_equal(test, GS)
-})
-
-## * LVM: 2 factor model with constrains
-## ** model fit and sCorrect
-m <- lvm(Y1~1*eta1+X2,Y2~lambda*eta1+X2,Y3~lambda*eta1,eta1 ~ beta*X2+beta*X1,
-         Z1~0+eta2,Z2~lambda*eta2,Z3~eta2)
-e.lvm <- estimate(m, d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score at ML
-test_that("2 factor model: constrains",{
-    expect_equal(score2(e2.lvm, bias.correct = FALSE),
-                 score(e.lvm, indiv = TRUE))
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param,
-                      vcov(e.lvm))
-})
-
-## * LVM: 2 factor model (covariance)
-## ** model fit and sCorrect
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-covariance(m) <- eta1 ~ eta2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("2 factor model with covariance (at ML) compared to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-
-})
-
-## ** check score not at ML
-param <- coef(e.lvm)
-test <- score2(e.lvm, p = param+1, bias.correct = FALSE)
-GS <- score(e.lvm, p = param+1, indiv = TRUE)
-
-test_that("2 factor model with covariance (not at ML: +1)",{
-    expect_equal(test, GS)
-})
-
-test <- score2(e.lvm, p = param+0.1*(1:length(param)), bias.correct = FALSE)
-GS <- score(e.lvm, p = param+0.1*(1:length(param)), indiv=TRUE)
-test_that("2 factor model with covariance (not at ML: +1:p)",{
-    expect_equal(test, GS)
-})
-
-## * LVM: 2 factor model (correlation LV)
-## ** model fit and sCorrect
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta2 ~ X1
-regression(m) <- eta1 ~ eta2+X2+X3
-
-e.lvm <- estimate(m,d)
-
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- FALSE
-
-## ** check score, residuals, vcov at ML
-test_that("2 factor model with correlation (at ML) compared to lava",{
-
-    expect_equivalent(e2.lvm$sCorrect$vcov.param, vcov(e.lvm))
-    expect_true(all(e2.lvm$sCorrect$leverage==0))
-    expect_true(all(e2.lvm$sCorrect$n.corrected==e.lvm$data$n))
-    expect_equivalent(e2.lvm$sCorrect$residuals, residuals(e.lvm))
-    expect_equal(e2.lvm$sCorrect$param, coef(e.lvm))
-    expect_equal(e2.lvm$sCorrect$score, score(e.lvm, indiv = TRUE))
-    expect_equal(score2(e.lvm, bias.correct = FALSE), score(e.lvm, indiv = TRUE))
-
-})
-
-## ** check score not at ML
-param <- coef(e.lvm)
-test <- score2(e.lvm, p = param+1, bias.correct = FALSE)
-GS <- score(e.lvm, p = param+1, indiv = TRUE)
-
-test_that("2 factor model (not at ML: +1)",{
-    expect_equal(test, GS)
-})
-
-test <- score2(e.lvm, p = param+0.1*(1:length(param)), bias.correct = FALSE)
-GS <- score(e.lvm, p = param+0.1*(1:length(param)), indiv=TRUE)
-test_that("2 factor model (not at ML: +1:p)",{
-    expect_equal(test, GS)
-})
-
-
-##----------------------------------------------------------------------
-### test1-sCorrect-lava.R ends here
diff --git a/tests/testthat/test1-sCorrect-missingValues.R b/tests/testthat/test1-sCorrect-missingValues.R
index 16d6b34..1b66637 100644
--- a/tests/testthat/test1-sCorrect-missingValues.R
+++ b/tests/testthat/test1-sCorrect-missingValues.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: mar  7 2018 (13:39) 
 ## Version: 
-## Last-Updated: feb 11 2019 (14:32) 
+## Last-Updated: Jan 17 2022 (23:21) 
 ##           By: Brice Ozenne
-##     Update #: 36
+##     Update #: 57
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -23,7 +23,6 @@ if(FALSE){ ## already called in test-all.R
 }
 
 lava.options(symbols = c("~","~~"))
-.coef2 <- lavaSearch2:::.coef2
 library(nlme)
 context("sCorrect (dealing with missing values)")
 
@@ -59,82 +58,108 @@ sX2 <- var(d$Y2)/n
 df <- (sX1+sX2)^2/(sX1^2/(n-1) + sX2^2/(n-1))
 df-e.ttest$parameter
 
-## *** using gls - no missing data
-e.gls <- gls(value ~ variable, data = dLred2,
-             weights = varIdent(form = ~1|variable),
-             method = "ML")
-
-expect_equal(as.double(coef(e.gls)[2]),
-             as.double(diff(e.ttest$estimate)))
-
-test_that("t test (full data)", {
-    eS.gls <- summary2(e.gls, cluster = "Id")$tTable
-    expect_equal(unname(e.ttest$parameter),
-                 eS.gls["variableY2","df"],
-                 tol = 1e-3)
-    expect_equal(unname(e.ttest$p.value),
-                 eS.gls["variableY2","p-value"],
-                 tol = 1e-5)
-})
-
-## *** using gls - missing data
-eNA.gls <- gls(value ~ variable, data = dLred3,
-               weights = varIdent(form = ~1|variable),
-               method = "ML")
-
-expect_equal(as.double(coef(eNA.gls)[2]),
-             as.double(diff(e.ttest$estimate)))
-## getVarCov2(eNA.gls, cluster = "Id")
-
-
-
-test_that("t test (missing data)", {
-    eNAS.gls <- summary2(eNA.gls, cluster = "Id")$tTable
-    
-    expect_equal(unname(e.ttest$parameter),
-                 eNAS.gls["variableY2","df"],
-                 tol = 1e-3)
-    expect_equal(unname(e.ttest$p.value),
-                 eNAS.gls["variableY2","p-value"],
-                 tol = 1e-5)
-})
-
 ## * LVM: factor model
 m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1))
 regression(m) <- eta1~X1+X2
 
 e.lvm <- estimate(m,d)
-e2.lvm <- e.lvm
-sCorrect(e2.lvm) <- TRUE
+e2.lvm <- estimate2(e.lvm)
 
 ## ** complete case analysis
 missing.Row <- d[1,]
 missing.Row[,"Id"] <- -1
 missing.Row[,c("Y1","Y2","Y3")] <- NA
-eNA.lvm <- estimate(m, rbind(d,missing.Row), missing = FALSE)
+## eNA.lvm <- estimate(m, rbind(d,missing.Row), missing = FALSE)
+eNA.lvm <- estimate(m, rbind(missing.Row,d,missing.Row), missing = FALSE)
 
 test_that("complete case analysis (factor model)", {
-    eNA2.lvm <- eNA.lvm
-    sCorrect(eNA2.lvm) <- TRUE
-
-    expect_equal(eNA2.lvm$sCorrect,
-                 e2.lvm$sCorrect)
+    expect_equal(unname(score2(eNA.lvm, ssc = FALSE, indiv = TRUE)), unname(score(eNA.lvm, indiv = TRUE)))
+    ## FIX NA!!!!!!
+    
+    eNA2.lvm <- estimate2(eNA.lvm)
+    
+    expect_equal(model.tables(eNA2.lvm), model.tables(e2.lvm))
+    ##               estimate        se        df       lower     upper   statistic     p.value
+    ## eta1       -0.37387088 0.2962948 18.338440 -0.99554076 0.2477990 -1.26182045 0.222827159
+    ## Y2         -0.02252887 0.3297457 15.258888 -0.72432797 0.6792702 -0.06832194 0.946416628
+    ## Y3          0.38272845 0.2851559  5.682026 -0.32459821 1.0900551  1.34217267 0.230678148
+    ## eta1~X1     0.99599616 0.3134807 18.469394  0.33859531 1.6533970  3.17721651 0.005092730
+    ## eta1~X2    -0.04275890 0.2607688  9.859874 -0.62490927 0.5393915 -0.16397243 0.873065310
+    ## Y2~eta1     1.05707590 0.2723211  5.481730  0.37515670 1.7389951  3.88172564 0.009722483
+    ## Y3~eta1     1.08664682 0.3566308  1.982418 -0.46092704 2.6342207  3.04697992 0.093951240
+    ## Y3~X1       0.61495545 0.4296209  3.045199 -0.74088088 1.9707918  1.43139085 0.246430988
+    ## Y1~~Y1      0.49861889 0.3398654  5.101312 -0.36983983 1.3670776          NA          NA
+    ## eta1~~eta1  1.10299240 0.5611968  4.190452 -0.42760479 2.6335896          NA          NA
+    ## Y2~~Y2      1.60214650 0.6249695  4.823447 -0.02223785 3.2265309          NA          NA
+    ## Y3~~Y3      0.64437632 0.4273389  4.023035 -0.53943230 1.8281849          NA          NA
 })
 
 ## ** full information
+
+### *** example of issue with lava
+m <- lvm(c(Y1~1*eta1,Y2~1*eta1,Y3~1*eta1+X1,
+           eta1~1))
+
 missing.Row <- d[1,]
 missing.Row[,"Id"] <- -1
 missing.Row[,c("Y1","Y2")] <- NA
-eNA.lvm <- estimate(m, rbind(d,missing.Row), missing = TRUE)
+missing.Row2 <- d[3,]
+missing.Row2[,"Id"] <- -2
+missing.Row2[,c("Y1","Y3")] <- NA
 
-test_that("full information (factor model)", {
-    eNA2.lvm <- eNA.lvm
-    sCorrect(eNA2.lvm) <- TRUE
-    ## sCorrect(eNA2.lvm, numeric.derivative = TRUE) <- TRUE
-    summary2(eNA2.lvm)
+dNA.wide <- rbind(missing.Row,d,missing.Row2)
+dNA.long <- melt(dNA.wide, measure.vars = c("Y1","Y2","Y3"))
+dNA.long$variable <- factor(dNA.long$variable, levels = c("Y1","Y2","Y3"))
+
+test_that("full information (issue lava)", {
+    eNA.lvm <- estimate(m, data = dNA.wide, missing = TRUE)
+    test <- estimate2(eNA.lvm, ssc = FALSE)
+
+    hessian.GS <- numDeriv::hessian(func = function(x){logLik(eNA.lvm, p = x)},
+                                    x = coef(eNA.lvm))
+    hessian.info <- information(eNA.lvm, p = coef(eNA.lvm), type = "hessian")
+    hessian.lavaSearch2 <- hessian2(test)
+
+    expect_equal(unname(hessian.lavaSearch2), unname(hessian.GS), tol = 1e-6)
+    ## expect_equal(unname(hessian.info), unname(hessian.GS), tol = 1e-6) ## fails
 })
 
 
+## *** factor model
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1))
+regression(m) <- eta1~X1+X2
+
+eNA.lvm <- estimate(m, dNA.wide, missing = TRUE)
+
+test_that("full information (factor model)", {
+
+    hessian.GS <- numDeriv::hessian(func = function(x){logLik(eNA.lvm, p = x)},
+                                    x = coef(eNA.lvm))
+    expect_equal(hessian.GS, unname(hessian2(eNA.lvm, ssc = FALSE)), tol = 1e-6)
+
+    ## NOT TRUE!!!! issue in lava or on purpose?
+    ## expect_equal(unname(information(eNA.lvm)), unname(solve(vcov(eNA.lvm))), tol = 1e-6)
+    ## NOT TRUE !!! bug in lava? (see previous ***)
+    ## expect_equal(information(eNA.lvm), unname(information2(eNA.lvm, ssc = FALSE)), tol = 1e-6)
+    eNA2.lvm <- estimate2(eNA.lvm)
+    model.tables(eNA2.lvm)
+    ##               estimate        se         df        lower      upper   statistic     p.value
+    ## eta1       -0.39593331 0.2854528 19.5265046  -0.99230434  0.2004377 -1.38703618 0.181061270
+    ## Y3          0.37704733 0.2887687  8.9341075  -0.27692804  1.0310227  1.30570700 0.224270881
+    ## Y2         -0.04308602 0.3263015 16.1994022  -0.73412297  0.6479509 -0.13204356 0.896575991
+    ## eta1~X1     1.03194106 0.2972545 17.5733098   0.40634382  1.6575383  3.47157405 0.002802916
+    ## eta1~X2    -0.01552331 0.2462704  9.4842975  -0.56831932  0.5372727 -0.06303358 0.951048335
+    ## Y3~eta1     1.07977655 0.3656814  1.0724740  -2.88655281  5.0461059  2.95277987 0.194108317
+    ## Y3~X1       0.62193065 0.4431914  2.2225543  -1.11318213  2.3570434  1.40330027 0.283921347
+    ## eta1~~eta1  1.02409942 0.5259689  0.5684192 -44.51400617 46.5622050          NA          NA
+    ## Y3~~Y3      0.62918891 0.4126684  4.8415855  -0.44213402  1.7005118          NA          NA
+    ## Y2~eta1     1.07161261 0.2676486  8.1125205   0.45590084  1.6873244  4.00380494 0.003818464
+    ## Y1~~Y1      0.51334322 0.3337466  2.9800835  -0.55281455  1.5795010          NA          NA
+    ## Y2~~Y2      1.54266511 0.6031601  5.4456513   0.02959063  3.0557396          NA          NA
+    expect_equal(summary(eNA2.lvm)$table2$df,
+                 c(19.52650464, 8.93410748, 16.19940217, 17.57330981, 9.4842975, 1.07247398, 2.2225543, 0.56841916, 4.84158554, 8.1125205, 2.9800835, 5.44565132),
+                 tol = 1e-6)
+})
 
 ##----------------------------------------------------------------------
 ### test1-sCorrect-missingValues.R ends here
diff --git a/tests/testthat/test1-sCorrect-smallSampleCorrection.R b/tests/testthat/test1-sCorrect-smallSampleCorrection.R
deleted file mode 100644
index a99a832..0000000
--- a/tests/testthat/test1-sCorrect-smallSampleCorrection.R
+++ /dev/null
@@ -1,322 +0,0 @@
-### test1-sCorrect-smallSampleCorrection.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar  7 2018 (12:08) 
-## Version: 
-## Last-Updated: jul 31 2020 (10:23) 
-##           By: Brice Ozenne
-##     Update #: 57
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-}
-
-lava.options(symbols = c("~","~~"))
-## library(clubSandwich)
-library(nlme)
-calcFactor <- function(object){
-    return((object$dims$N - object$dims$p)/(object$dims$N - object$dims$p * (object$method == "REML")))
-}
-
-context("sCorrect (Satterthwaite + small sample correction)")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-              Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
-                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
-dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
-dLred$variable.factor <- as.factor(dLred$variable)
-
-## * linear regression [lm,gls,lvm]
-## ** model fit and sCorrect
-e.lm <- lm(Y1~X1+X2, data = d)
-
-## ** iid2 matches clubSandwich
-test_that("iid2.lm/iid2.lvm matches clubSandwich", {
-    ## V.GS <- clubSandwich::vcovCR(e.lm, type = "CR2", cluster = d$Id)
-    
-    eHC2.iid2.lm <- iid2(e.lm, bias.correct = TRUE)
-    V.lm <- crossprod(eHC2.iid2.lm)
-
-    ## no more equal to clubSandwich since version 1.2.1 
-    ## > lavaSearch2 uses the average bias to correct the residuals instead of the individual bias
-    
-    ## expect_equal(as.matrix(V.GS),
-    ##              V.lm[rownames(V.GS),colnames(V.GS)],
-    ##              tol = 1e-7)
-})
-
-
-## * multiple linear regression with constrains [lvm, gls]
-## ** model fit and sCorrect
-e.gls <- gls(value ~ X1 + X2,
-             data = dLred,
-             weight = varIdent(form = ~1|variable),
-             method = "ML")
-e.lvm <- estimate(lvm(Y1[mu:sigma1]~ beta1*X1 + beta2*X2,
-                      Y2[mu:sigma2]~ beta1*X1 + beta2*X2,
-                      Y3[mu:sigma3]~ beta1*X1 + beta2*X2),
-                  data = d)
-
-factor <- calcFactor(e.gls)
-index.coef <- 1:3
-
-test_that("gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.gls)))
-})
- 
-## ** HC0/HC1
-iid2HC0.gls <- iid2(e.gls, bias.correct = FALSE, cluster = "Id")
-iid2HC0.lvm <- iid2(e.lvm, bias.correct = FALSE)
-
-test_that("iid2.gls/iid2.lvm matches clubSandwich (HC0-HC1)", {
-    expect_equal(unname(iid2HC0.gls[,index.coef]),
-                 unname(iid2HC0.lvm[,index.coef]),
-                 tol = 1e-5)
-    
-    VHC0.gls <- crossprod(iid2HC0.gls)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR0", cluster = dLred$Id) * factor^2
-    ## expect_equal(as.double(GS),as.double(VHC0.gls), tolerance = 1e-10)
-    
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR1", cluster = dLred$Id) * factor^2
-    ## VHC1.gls <- VHC0.gls*n/(n-1)
-    ## expect_equal(as.double(GS),as.double(VHC1.gls), tolerance = 1e-10)
-})
-
-## ** HC2
-iid2HC2.gls <- iid2(e.gls, bias.correct = TRUE, n.iter = 1, cluster = "Id")
-iid2HC2.lvm <- iid2(e.lvm, bias.correct = TRUE, n.iter = 1)
-
-test_that("iid2.lvm matches clubSandwich (HC2)", {
-    expect_equal(unname(iid2HC2.gls[,index.coef]),
-                 unname(iid2HC2.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    VHC2.gls <- crossprod(iid2HC2.gls)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR2", cluster = dLred$Id) * factor^2
-
-    ## no more equal to clubSandwich since version 1.2.1 
-    ## > lavaSearch2 uses the average bias to correct the residuals instead of the individual bias
-    ## expect_equal(as.double(GS),as.double(VHC2.gls), tolerance = 1e-5)
-})
-
-## * mixed model: CS [lvm,gls,lme]
-## ** model fit and sCorrect
-m <- lvm(c(Y1[0:sigma]~1*eta,
-           Y2[0:sigma]~1*eta,
-           Y3[0:sigma]~1*eta,
-           eta~X1+X2))
-latent(m) <- ~eta
-e.lvm <- estimate(m, d)
-
-e.lme <- nlme::lme(value ~ X1 + X2,
-                   random =~1| Id,
-                   data = dLred, method = "ML")
-
-e.gls <- nlme::gls(value ~ X1 + X2,
-                   correlation = corCompSymm(form = ~1| Id),
-                   data = dLred, method = "ML")
-index.coef <- 1:length(coef(e.gls))
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-factor <- calcFactor(e.gls)
- 
-## ** HC0/HC1
-iid2HC0.gls <- iid2(e.gls, bias.correct = FALSE)
-iid2HC0.lme <- iid2(e.lme, bias.correct = FALSE)
-iid2HC0.lvm <- iid2(e.lvm, bias.correct = FALSE)
-
-test_that("iid2.gls/iid2.lme/iid2.lvm matches clubSandwich (HC0-HC1)", {
-    expect_equal(unname(iid2HC0.gls[,index.coef]),
-                 unname(iid2HC0.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    expect_equal(unname(iid2HC0.gls[,index.coef]),
-                 unname(iid2HC0.lme[,index.coef]),
-                 tol = 1e-5)
-    
-    VHC0.gls <- crossprod(iid2HC0.gls)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR0", cluster = dLred$Id) * factor^2
-    ## expect_equal(as.double(GS),as.double(VHC0.gls), tolerance = 1e-10)
-
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR1", cluster = dLred$Id) * factor^2
-    ## VHC1.gls <- VHC0.gls*n/(n-1)
-    ## expect_equal(as.double(GS),as.double(VHC1.gls), tolerance = 1e-10)
-})
-
-## ** HC2
-iid2HC2.gls <- iid2(e.gls, bias.correct = TRUE, n.iter = 1)
-iid2HC2.lme <- iid2(e.lme, bias.correct = TRUE, n.iter = 1)
-iid2HC2.lvm <- iid2(e.lvm, bias.correct = TRUE, n.iter = 1)
-
-test_that("iid2.gls/iid2.lme/iid2.lvm matches clubSandwich (HC2)", {
-    expect_equal(unname(iid2HC2.gls[,index.coef]),
-                 unname(iid2HC2.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    expect_equal(unname(iid2HC2.gls[,index.coef]),
-                 unname(iid2HC2.lme[,index.coef]),
-                 tol = 1e-5)
-
-    VHC2.gls <- crossprod(iid2HC2.gls)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.gls, type = "CR2", cluster = dLred$Id) * factor^2
-    ## no more equal to clubSandwich since version 1.2.1 
-    ## > lavaSearch2 uses the average bias to correct the residuals instead of the individual bias
-    ## expect_equal(as.double(GS),as.double(VHC2.gls), tolerance = 1e-10)
-})
-
-## * mixed model: CS with different variances [lvm,gls]
-## ** model fit and sCorrect
-m <- lvm(c(Y1[0:sigma1]~1*eta,
-           Y2[0:sigma2]~1*eta,
-           Y3[0:sigma3]~1*eta,
-           eta~X1+X2))
-latent(m) <- ~eta
-e.lvm <- estimate(m, d)
-
-e.lme <- nlme::lme(value ~ X1 + X2,
-                   random =~1| Id,
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-e.gls <- nlme::gls(value ~ X1 + X2,
-                   correlation = corCompSymm(form = ~1| Id),
-                   weights = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-index.coef <- 1:length(coef(e.gls))
-
-test_that("lme equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    ## gls does not give the same likelihood
-    ## expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-## ** HC0/HC1
-iid2HC0.gls <- iid2(e.gls, bias.correct = FALSE)
-iid2HC0.lme <- iid2(e.lme, bias.correct = FALSE)
-iid2HC0.lvm <- iid2(e.lvm, bias.correct = FALSE)
-
-test_that("iid2.lme/iid2.lvm matches clubSandwich (HC0-HC1)", {
-    expect_equal(unname(iid2HC0.lme[,index.coef]),
-                 unname(iid2HC0.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    VHC0.lme <- crossprod(iid2HC0.lme)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR0", cluster = dLred$Id)
-    ## expect_equal(as.double(GS),as.double(VHC0.lme), tolerance = 1e-10)
-
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR1", cluster = dLred$Id)
-    ## VHC1.lme <- VHC0.lme*n/(n-1)
-    ## expect_equal(as.double(GS),as.double(VHC1.lme), tolerance = 1e-10)
-})
-
-## ** HC2
-iid2HC2.gls <- iid2(e.gls, bias.correct = TRUE, n.iter = 1)
-iid2HC2.lme <- iid2(e.lme, bias.correct = TRUE, n.iter = 1)
-iid2HC2.lvm <- iid2(e.lvm, bias.correct = TRUE, n.iter = 1)
-
-test_that("iid2.lme/iid2.lvm matches clubSandwich (HC2)", {
-    expect_equal(unname(iid2HC2.lme[,index.coef]),
-                 unname(iid2HC2.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    VHC2.lme <- crossprod(iid2HC2.lme)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR2", cluster = dLred$Id)
-    ## no more equal to clubSandwich since version 1.2.1 
-    ## > lavaSearch2 uses the average bias to correct the residuals instead of the individual bias
-    ## expect_equal(as.double(GS),as.double(VHC2.lme), tolerance = 1e-5)
-})
-
-
-## * mixed model: Unstructured
-## ** model fit and sCorrect
-m <- lvm(c(Y1~0+1*eta,
-           Y2~0+1*eta,
-           Y3~0+1*eta,
-           eta~X1+X2))
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-e.gls <- nlme::gls(value ~ X1 + X2,
-                   correlation = corSymm(form =~ 1| Id),
-                   weight = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-e.lme <- nlme::lme(value ~ X1 + X2,
-                   random =~ 1|Id,
-                   correlation = corSymm(),
-                   weight = varIdent(form =~ 1|variable),
-                   data = dLred, method = "ML")
-
-index.coef <- 1:length(coef(e.gls))
-
-test_that("lme/gls equivalent to lvm", {
-    expect_equal(as.double(logLik(e.lvm)), as.double(logLik(e.lme)))
-    expect_equal(as.double(logLik(e.gls)), as.double(logLik(e.lme)))
-})
-
-## ** HC0/HC1
-iid2HC0.gls <- iid2(e.gls, bias.correct = FALSE)
-## iid2HC0.lme <- iid2(e.lme, bias.correct = FALSE)
-## does not work because the model is overparametrized
-iid2HC0.lvm <- iid2(e.lvm, bias.correct = FALSE)
-
-test_that("iid2.gls/iid2.lme/iid2.lvm matches clubSandwich (HC0-HC1)", {
-    expect_equal(unname(iid2HC0.gls[,index.coef]),
-                 unname(iid2HC0.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    VHC0.lvm <- crossprod(iid2HC0.lvm)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR0", cluster = dLred$Id)
-    ## expect_equal(as.double(GS),as.double(VHC0.lvm), tolerance = 1e-5)
-
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR1", cluster = dLred$Id)
-    ## VHC1.lvm <- VHC0.lvm*n/(n-1)
-    ## expect_equal(as.double(GS),as.double(VHC1.lvm), tolerance = 1e-5)
-})
-
-## ** HC2
-iid2HC2.gls <- iid2(e.gls, bias.correct = TRUE, n.iter = 1)
-expect_error(iid2HC2.lme <- iid2(e.lme, bias.correct = TRUE, n.iter = 1))
-## does not work because the model is overparametrized
-iid2HC2.lvm <- iid2(e.lvm, bias.correct = TRUE, n.iter = 1)
-
-test_that("iid2.gls/iid2.lme/iid2.lvm matches clubSandwich (HC2)", {
-    expect_equal(unname(iid2HC2.gls[,index.coef]),
-                 unname(iid2HC2.lvm[,index.coef]),
-                 tol = 1e-5)
-
-    VHC2.lvm <- crossprod(iid2HC2.lvm)[index.coef,index.coef]
-    ## GS <- clubSandwich::vcovCR(e.lme, type = "CR2", cluster = dLred$Id)
-    ## no more equal to clubSandwich since version 1.2.1 
-    ## > lavaSearch2 uses the average bias to correct the residuals instead of the individual bias
-    ## expect_equal(as.double(GS),as.double(VHC2.lvm), tolerance = 1e-5)
-})
-
-
-##----------------------------------------------------------------------
-### test1-sCorrect-smallSampleCorrection.R ends here
diff --git a/tests/testthat/test1-sCorrect-summary2.R b/tests/testthat/test1-sCorrect-summary2.R
deleted file mode 100644
index 8455094..0000000
--- a/tests/testthat/test1-sCorrect-summary2.R
+++ /dev/null
@@ -1,911 +0,0 @@
-### test1-sCorrect-summary2.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: apr  4 2018 (13:29) 
-## Version: 
-## Last-Updated: mar  4 2019 (18:52) 
-##           By: Brice Ozenne
-##     Update #: 51
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R
-    library(testthat)
-    library(lavaSearch2)
-
-    printDF <- function(object, bias.correct){
-        colDF <- summary2(object, bias.correct = bias.correct)$coef[,"df",drop=FALSE]
-        n.coef <- NROW(colDF)
-        vec.end <- c(rep(",",n.coef-1),")")
-        vec.start <- c("c(", rep("",n.coef-1))        
-        df <- data.frame(paste0(vec.start,"\"",rownames(colDF),"\""),
-                         "=",
-                         paste0(colDF[,1],vec.end))
-        names(df) <- NULL
-        print(df, row.names = FALSE)
-    }
-
-}
-
-lava.options(symbols = c("~","~~"))
-library(nlme)
-context("sCorrect (dVcov-SatterthwaiteCorrection)")
-
-## * simulation
-n <- 5e1
-mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
-              Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(mSim) <- eta1~X1+Gender
-latent(mSim) <- ~eta1+eta2
-categorical(mSim, labels = c("Male","Female")) <- ~Gender
-transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
-set.seed(10)
-d <- lava::sim(mSim, n = n, latent = FALSE)
-dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
-                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
-dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
-
-## * linear regression 
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
-
-## ** test df
-test_that("linear regression: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~X1" = 50,
-            "Y1~X2" = 50,
-            "Y1~GenderFemale" = 50,
-            "Y1~~Y1" = 12.5,
-            "Y1" = 50)
-    expect_equal(as.double(df),
-                 summary2(e.lvm, bias.correct = FALSE)$coef$df)
-})
-
-test_that("linear regression: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~X1" =   46,
-            "Y1~X2" =   46,
-            "Y1~GenderFemale" =   46,
-            "Y1~~Y1" = 11.5,
-            "Y1" =   46)
-    expect_equal(as.double(df),
-                 summary2(e.lvm, bias.correct = TRUE)$coef$df)
-})
-
-## ** robust standard error
-test_that("linear regression: robust SE", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    eS0 <- summary2(e.lvm, robust = TRUE, df = TRUE)$coef
-    eS1 <- summary2(e.lvm, robust = TRUE, df = 2)$coef
-    eS2 <- summary2(e.lvm, robust = TRUE, df = 2, cluster = 1:n)$coef
-    expect_equal(eS1,eS2)
-    
-    df <- c("Y1~X1" =   46,
-            "Y1~X2" =   46,
-            "Y1~GenderFemale" =   46,
-            "Y1~~Y1" = 11.5,
-            "Y1" =   46)
-    expect_equal(as.double(df),
-                 eS0$df, tol = 1e-2)
-    
-    df <- c("Y1~X1" = 43.194962,   
-            "Y1~X2" = 48.765588,
-            "Y1~GenderFemale" = 52.687514,
-            "Y1~~Y1" = 9.694972,
-            "Y1" = 42.373871)
-    expect_equal(as.double(df),
-                 eS1$df, tol = 1e-2)
-})
-
-## * linear regression with constrains 
-## ** model fit
-e.lvm <- estimate(lvm(Y1[0:2]~X1+1*X2), data = d)
-
-e.lvm2 <- estimate(lvm(Y1~beta*X1+beta*X2), d)
-
-
-## ** test df
-test_that("linear regression with constrains: Satterthwaite (df)", {
-    expect_equal(summary2(e.lvm)$coef$df,c(Inf,NA,NA)) ## Inf since the variance coefficient is known
-    ## printDF(e.lvm2, bias.correct = FALSE)
-    df <- c("Y1~X1" =   50,
-            "Y1~X2" =   NA,
-            "Y1~~Y1" = 12.5,
-            "Y1" =   50)
-    expect_equal(summary2(e.lvm2, bias.correct = FALSE)$coef$df,
-                 as.double(df))
-})
-
-test_that("linear regression with constrains: Satterthwaite + SSC (df)", {
-    expect_equal(summary2(e.lvm)$coef$df,c(Inf,NA,NA)) ## Inf since the variance coefficient is known
-    ## printDF(e.lvm2, bias.correct = TRUE)
-    df <- c("Y1~X1" =   48,
-            "Y1~X2" =   NA,
-            "Y1~~Y1" = 12,
-            "Y1" =   48)
-    expect_equal(summary2(e.lvm2, bias.correct = TRUE)$coef$df,
-                 as.double(df))
-})
-
-## * multiple linear regression 
-## ** model fit
-ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
-e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
-
-## ** test df
-test_that("multiple linear regression: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~X1" =   50,
-            "Y2~X2" =   50,
-            "Y3~X1" =   50,
-            "Y3~X3" =   50,
-            "Y1~~Y1" = 12.5,
-            "Y2~~Y2" = 12.5,
-            "Y3~~Y3" = 12.5,
-            "Y1" =   50,
-            "Y2" =   50,
-            "Y3" =   50)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-test_that("multiple linear regression: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~X1" =    48,
-            "Y2~X2" =    48,
-            "Y3~X1" =    47,
-            "Y3~X3" =    47,
-            "Y1~~Y1" =    12,
-            "Y2~~Y2" =    12,
-            "Y3~~Y3" = 11.75,
-            "Y1" =    48,
-            "Y2" =    48,
-            "Y3" =    47)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-## * multiple linear regression with constrains 
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+1*X2,Y2~2*X3+2*X1,Y3~X2), data = d)
-
-## ** test df
-test_that("multiple linear regression with constrains: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~X1" =   50,
-            "Y1~X2" =   NA,
-            "Y2~X1" =   NA,
-            "Y2~X3" =   NA,
-            "Y3~X2" =   50,
-            "Y1~~Y1" = 12.5,
-            "Y2~~Y2" = 12.5,
-            "Y3~~Y3" = 12.5,
-            "Y1" =   50,
-            "Y2" =   50,
-            "Y3" =   50)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-test_that("multiple linear regression with constrains: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~X1" =    48,
-            "Y1~X2" =    NA,
-            "Y2~X1" =    NA,
-            "Y2~X3" =    NA,
-            "Y3~X2" =    48,
-            "Y1~~Y1" =    12,
-            "Y2~~Y2" = 12.25,
-            "Y3~~Y3" =    12,
-            "Y1" =    48,
-            "Y2" =    49,
-            "Y3" =    48)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-## * multiple linear regression with covariance links 
-## ** model fit
-e.lvm <- estimate(lvm(Y1~X1+X2,Y2~X3+X1,Y3~X2,Y1~~Y2),d)
-
-## ** test df
-test_that("multiple linear regression with covariance: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~X1" = 50.0023249929247,
-            "Y1~X2" = 50.0557533452502,
-            "Y2~X1" = 50.1412333709522,
-            "Y2~X3" = 50.0557533452502,
-            "Y3~X2" =               50,
-            "Y1~~Y1" =             12.5,
-            "Y1~~Y2" = 14.4382586892588,
-            "Y2~~Y2" =             12.5,
-            "Y3~~Y3" =             12.5,
-            "Y1" = 51.0449669789772,
-            "Y2" = 50.0000667169911,
-            "Y3" =               50)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-test_that("multiple linear regression with covariance: Satterthwaite +SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~X1" = 47.0021511585814,
-            "Y1~X2" = 47.0515840309539,
-            "Y2~X1" = 47.1306527469107,
-            "Y2~X3" = 47.0515840309539,
-            "Y3~X2" =               48,
-            "Y1~~Y1" =            11.75,
-            "Y1~~Y2" = 13.6588307493286,
-            "Y2~~Y2" =            11.75,
-            "Y3~~Y3" =               12,
-            "Y1" = 47.9656506208306,
-            "Y2" = 47.0000617288762,
-            "Y3" =               48)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-## * mixed model: Compound symmetry 
-## ** model fit
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-e.lvm <- estimate(m, d)
-
-## ** test df
-test_that("compound symmetry: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" =               50,
-            "eta~GenderFemale" = 49.9999999999999,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" =               25,
-            "eta~~eta" = 14.9861363947381,
-            "Y2~~Y2" =               NA,
-            "Y3~~Y3" =               NA,
-            "Y1" =               NA,
-            "eta" = 91.8352861647611,
-            "Y2" = 99.9999999999999,
-            "Y3" =              100)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-test_that("compound symmetry: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" = 48.3333333333333,
-            "eta~GenderFemale" = 48.3333333333334,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 24.1666666666667,
-            "eta~~eta" = 14.2918140430781,
-            "Y2~~Y2" =               NA,
-            "Y3~~Y3" =               NA,
-            "Y1" =               NA,
-            "eta" = 87.2184581017777,
-            "Y2" = 96.6666666666667,
-            "Y3" = 96.6666666666667)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-## * mixed model: CS with different variances 
-## ** model fit 
-m <- lvm(c(Y1[mu1:sigma1]~1*eta,
-           Y2[mu2:sigma2]~1*eta,
-           Y3[mu3:sigma3]~1*eta,
-           eta~X1+Gender))
-latent(m) <- ~eta
-e.lvm <- estimate(m, d)
-
-## ** test df
-test_that("compound symmetry with different variances: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" =               50,
-            "eta~GenderFemale" =               50,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 13.7622127329219,
-            "eta~~eta" = 14.9076367038288,
-            "Y2~~Y2" = 13.1071601322285,
-            "Y3~~Y3" = 12.9639539581698,
-            "Y1" =               NA,
-            "eta" =  62.670059576658,
-            "Y2" = 61.7435858694498,
-            "Y3" = 62.9968941834921)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-test_that("compound symmetry with different variances: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" = 48.3090185442116,
-            "eta~GenderFemale" = 48.3090185442116,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 13.2163927334731,
-            "eta~~eta" =  14.217415837608,
-            "Y2~~Y2" = 12.6752803879324,
-            "Y3~~Y3" = 12.5558385456554,
-            "Y1" =               NA,
-            "eta" = 60.2494643343862,
-            "Y2" = 59.2748591852783,
-            "Y3" = 60.5251223192755)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-    
-})
-
-## * mixed model: Unstructured 
-## ** model fit 
-m <- lvm(Y1[mu1:sigma]~1*eta,
-         Y2[mu2:sigma]~1*eta,
-         Y3[mu3:sigma]~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-## ** test df
-test_that("Unstructured: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" =               50,
-            "eta~GenderFemale" =               50,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 17.5643964155044,
-            "Y1~~Y2" = 26.5641253277581,
-            "Y1~~Y3" = 27.0682699161553,
-            "eta~~eta" =  28.574875590729,
-            "Y2~~Y2" =               NA,
-            "Y3~~Y3" =               NA,
-            "Y1" =               NA,
-            "eta" = 91.2264371148653,
-            "Y2" = 53.4049195968228,
-            "Y3" = 53.5959248802897)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("Unstructured: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" = 48.3224853449504,
-            "eta~GenderFemale" = 48.3224853449505,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 16.7854424067793,
-            "Y1~~Y2" = 25.5782787764655,
-            "Y1~~Y3" = 26.0669936093665,
-            "eta~~eta" = 26.8150959679397,
-            "Y2~~Y2" =               NA,
-            "Y3~~Y3" =               NA,
-            "Y1" =               NA,
-            "eta" = 86.6593055939208,
-            "Y2" = 51.5324014285871,
-            "Y3" = 51.7048594272516)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * mixed model: Unstructured with weights 
-## ** model fit
-m <- lvm(Y1~1*eta,
-         Y2~1*eta,
-         Y3~1*eta,
-         eta~X1+Gender)
-covariance(m) <- Y1~Y2
-covariance(m) <- Y1~Y3
-e.lvm <- estimate(m, d)
-
-## ** test df
-test_that("Unstructured with different variances: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta" =               NA,
-            "eta~X1" =               50,
-            "eta~GenderFemale" =               50,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 29.4058931067402,
-            "Y1~~Y2" =  24.123892562579,
-            "Y1~~Y3" = 23.8490532004303,
-            "eta~~eta" = 21.6480839727271,
-            "Y2~~Y2" = 13.9484476182045,
-            "Y3~~Y3" = 13.6295423667226,
-            "Y1" =               NA,
-            "eta" = 61.9919494846624,
-            "Y2" =               50,
-            "Y3" =               50)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("Unstructured with different variances: Satterthwaite + SSC(df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <-  c("Y1~eta" =               NA,
-             "eta~X1" = 48.3304216261627,
-             "eta~GenderFemale" = 48.3304216261627,
-             "Y2~eta" =               NA,
-             "Y3~eta" =               NA,
-             "Y1~~Y1" = 28.3183276959291,
-             "Y1~~Y2" = 23.3515179666561,
-             "Y1~~Y3" =  23.067088992202,
-             "eta~~eta" = 20.5208370543495,
-             "Y2~~Y2" = 13.5374504027834,
-             "Y3~~Y3" = 13.2154882614502,
-             "Y1" =               NA,
-             "eta" = 59.5876907394727,
-             "Y2" =  48.303327990695,
-             "Y3" = 48.3503310393569)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * LVM: factor model
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1))
-regression(m) <- eta1~X1+X2
-
-e.lvm <- estimate(m,d)
-
-## ** test df
-test_that("factor model: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta1" =               NA,
-            "eta1~X1" = 58.4757554282479,
-            "eta1~X2" = 10.8872653678716,
-            "Y2~eta1" =  12.650352895762,
-            "Y3~eta1" = 1.66449902683186,
-            "Y3~X1" = 1.94477624007772,
-            "Y1~~Y1" = 13.4119782396656,
-            "eta1~~eta1" = 9.77837284896349,
-            "Y2~~Y2" = 13.1191680921666,
-            "Y3~~Y3" = 3.08049854039328,
-            "Y1" =               NA,
-            "eta1" = 53.5326429983215,
-            "Y2" = 33.8519195633547,
-            "Y3" = 5.63733433999094)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("factor model: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta1" =               NA,
-            "eta1~X1" = 55.6095030455868,
-            "eta1~X2" =   10.69185489681,
-            "Y2~eta1" = 12.0538338192516,
-            "Y3~eta1" = 1.68984160423946,
-            "Y3~X1" = 1.98887307453212,
-            "Y1~~Y1" = 12.7884282564093,
-            "eta1~~eta1" = 9.24396149670392,
-            "Y2~~Y2" = 12.5520589048566,
-            "Y3~~Y3" = 3.19288665194889,
-            "Y1" =               NA,
-            "eta1" =  51.033476570524,
-            "Y2" = 32.4702969907481,
-            "Y3" = 5.69386974149653)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * LVM: factor model with constrains
-## ** model fit
-e.lvm <- estimate(lvm(Y1~1*eta+1*X2,Y2~1*eta,Y3~1*eta),
-                  data = d)
-
-e.lvm2 <- estimate(lvm(Y1~1*eta+X2,
-                       Y2~lambda*eta+X2,
-                       Y3~lambda*eta,
-                       eta ~ beta*X2+beta*X1),
-                   data = d)
-
-## ** test df
-test_that("factor model with constrains: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <-  c("Y1~eta" =               NA,
-             "Y1~X2" =               NA,
-             "Y2~eta" =               NA,
-             "Y3~eta" =               NA,
-             "Y1~~Y1" = 13.0283303539105,
-             "eta~~eta" = 13.3180611458203,
-             "Y2~~Y2" = 14.6602148987303,
-             "Y3~~Y3" =  13.336755056036,
-             "Y1" =               NA,
-             "eta" = 65.4311665778315,
-             "Y2" = 55.8228293621142,
-             "Y3" = 65.2565795061356)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-
-    ## printDF(e.lvm2, bias.correct = FALSE)
-    df <-  c("Y1~eta" =               NA,
-             "Y1~X2" = 69.0600280017674,
-             "eta~X2" = 72.4161865968702,
-             "eta~X1" =               NA,
-             "Y2~eta" = 9.69151052458835,
-             "Y2~X2" = 79.5362655494372,
-             "Y3~eta" =               NA,
-             "Y1~~Y1" = 15.8090377333579,
-             "eta~~eta" = 9.84379200456075,
-             "Y2~~Y2" = 16.0628919232454,
-             "Y3~~Y3" = 13.0030782764756,
-             "Y1" =               NA,
-             "eta" = 53.7056242594302,
-             "Y2" = 33.2159634933677,
-             "Y3" = 44.9825684761348)
-    expect_equal(summary2(e.lvm2, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-
-})
-
-test_that("factor model with constrains: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta" =               NA,
-            "Y1~X2" =               NA,
-            "Y2~eta" =               NA,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 12.7677637468323,
-            "eta~~eta" = 13.0516999229039,
-            "Y2~~Y2" = 14.3670106007557,
-            "Y3~~Y3" = 13.0700199549153,
-            "Y1" =               NA,
-            "eta" = 64.1225432462749,
-            "Y2" =  54.706372774872,
-            "Y3" =  63.951447916013)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ##
-
-    ## printDF(e.lvm2, bias.correct = TRUE)
-    df <- c("Y1~eta" =               NA,
-            "Y1~X2" =  65.226689786171,
-            "eta~X2" = 68.3697577738816,
-            "eta~X1" =               NA,
-            "Y2~eta" = 9.28152750240834,
-            "Y2~X2" = 75.9794793068646,
-            "Y3~eta" =               NA,
-            "Y1~~Y1" = 14.9386883202788,
-            "eta~~eta" = 9.23004100483922,
-            "Y2~~Y2" = 15.2672594957146,
-            "Y3~~Y3" = 12.6319294995649,
-            "Y1" =               NA,
-            "eta" = 50.7582990669443,
-            "Y2" = 31.4593748928189,
-            "Y3" = 43.0867331053962)
-    expect_equal(summary2(e.lvm2, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ##
-   
-})
-
-## * LVM: 2 factor model
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta1~X1+X2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-
-## ** test df
-test_that("2 factor model: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta1" =               NA,
-            "eta1~X1" = 58.4757578087337,
-            "eta1~X2" = 10.8872658714826,
-            "Y2~eta1" = 12.6503503160925,
-            "Y3~eta1" = 1.66449908523609,
-            "Y3~X1" = 1.94477634615581,
-            "Z1~eta2" =               NA,
-            "Z2~eta2" = 5.13217660987868,
-            "Z3~eta2" = 1.02808174561543,
-            "Z3~X3" = 50.0000000000001,
-            "Y1~~Y1" = 13.4119785866779,
-            "eta1~~eta1" =  9.7783707750495,
-            "Y2~~Y2" = 13.1191682660331,
-            "Y3~~Y3" = 3.08049828807642,
-            "Z1~~Z1" = 10.2604884164458,
-            "eta2~~eta2" = 7.15722190963717,
-            "Z2~~Z2" = 11.2873117817945,
-            "Z3~~Z3" = 1.29830427629455,
-            "Y1" =               NA,
-            "eta1" = 53.5326451480583,
-            "Y2" = 33.8519153928353,
-            "Y3" = 5.63733390756826,
-            "Z1" =               NA,
-            "eta2" =               50,
-            "Z2" = 12.8319846641959,
-            "Z3" = 1.86432248627634)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("2 factor model: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta1" =               NA,
-            "eta1~X1" = 55.6095055473752,
-            "eta1~X2" = 10.6918550078085,
-            "Y2~eta1" = 12.0538312418171,
-            "Y3~eta1" = 1.68984165232773,
-            "Y3~X1" =  1.9888731675747,
-            "Z1~eta2" =               NA,
-            "Z2~eta2" = 4.29439517300613,
-            "Z3~eta2" = 1.01287510875383,
-            "Z3~X3" = 47.9896226830952,
-            "Y1~~Y1" = 12.7884284893288,
-            "eta1~~eta1" =  9.2439595074979,
-            "Y2~~Y2" = 12.5520589907774,
-            "Y3~~Y3" = 3.19288640546866,
-            "Z1~~Z1" = 9.62175964330932,
-            "eta2~~eta2" = 5.76218376653209,
-            "Z2~~Z2" =  10.754309181554,
-            "Z3~~Z3" =  1.3442891734018,
-            "Y1" =               NA,
-            "eta1" = 51.0334789747374,
-            "Y2" = 32.4702926711208,
-            "Y3" = 5.69386915546178,
-            "Z1" =               NA,
-            "eta2" = 48.2092603391128,
-            "Z2" = 11.4347010520163,
-            "Z3" = 1.70873484420441)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * LVM: 2 factor model with constrains
-## ** model fit
-m <- lvm(Y1~1*eta1+X2,Y2~lambda*eta1+X2,Y3~lambda*eta1,eta1 ~ beta*X2+beta*X1,
-         Z1~0+eta2,Z2~lambda*eta2,Z3~eta2)
-e.lvm <- estimate(m, d)
-e2.lvm <- e.lvm
-
-## ** test df
-test_that("2 factor model with constrains: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <-  c("Y1~eta1" =               NA,
-             "Y1~X2" = 71.8402328650548,
-             "eta1~X2" = 70.4776885380334,
-             "eta1~X1" =               NA,
-             "Y2~eta1" = 11.6960236660969,
-             "Y2~X2" = 81.3187195450428,
-             "Y3~eta1" =               NA,
-             "Z1~eta2" =               NA,
-             "Z2~eta2" =               NA,
-             "Z3~eta2" =  1.3843686550126,
-             "Y1~~Y1" = 16.4264277692649,
-             "eta1~~eta1" = 10.0266166285231,
-             "Y2~~Y2" = 15.9811366004273,
-             "Y3~~Y3" = 12.9879081515096,
-             "Z1~~Z1" =  14.333152850676,
-             "eta2~~eta2" = 10.3356317755694,
-             "Z2~~Z2" =  14.049054309863,
-             "Z3~~Z3" = 3.85929725387927,
-             "Y1" =               NA,
-             "eta1" = 53.6044335618639,
-             "Y2" = 36.2614266452424,
-             "Y3" = 47.3109927669215,
-             "Z1" =               NA,
-             "eta2" = 54.7953340215845,
-             "Z2" =   34.66624553191,
-             "Z3" = 3.27226847220648)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("2 factor model with constrains: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta1" =               NA,
-            "Y1~X2" = 68.0113221224538,
-            "eta1~X2" = 66.6077972427716,
-            "eta1~X1" =               NA,
-            "Y2~eta1" = 11.2408944185087,
-            "Y2~X2" = 77.7269315920684,
-            "Y3~eta1" =               NA,
-            "Z1~eta2" =               NA,
-            "Z2~eta2" =               NA,
-            "Z3~eta2" = 1.35704893019338,
-            "Y1~~Y1" = 15.5089493754438,
-            "eta1~~eta1" = 9.41781854032817,
-            "Y2~~Y2" = 15.1899683888099,
-            "Y3~~Y3" = 12.6199235511737,
-            "Z1~~Z1" = 14.0484102543646,
-            "eta2~~eta2" = 10.0631956693674,
-            "Z2~~Z2" = 13.7710175900207,
-            "Z3~~Z3" = 3.78212082916409,
-            "Y1" =               NA,
-            "eta1" = 50.7194725113502,
-            "Y2" =  34.397316568393,
-            "Y3" = 45.4582268189079,
-            "Z1" =               NA,
-            "eta2" = 53.6884108360234,
-            "Z2" = 33.7172204457595,
-            "Z3" = 3.20514896820429)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * LVM: 2 factor model (covariance)
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-covariance(m) <- eta1 ~ eta2
-latent(m) <- ~eta1+eta2
-
-e.lvm <- estimate(m,d)
-
-## ** test df
-test_that("2 factor model (covariance): Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <-  c("Y1~eta1" =               NA,
-             "Y2~eta1" = 10.3311839234591,
-             "Y3~eta1" = 6.89676162244621,
-             "Y3~X1" = 50.0009118560904,
-             "Z1~eta2" =               NA,
-             "Z2~eta2" = 5.21780223264166,
-             "Z3~eta2" = 1.28142409755873,
-             "Z3~X3" = 50.0009118560906,
-             "Y1~~Y1" = 11.8346316264266,
-             "eta1~~eta1" = 12.7693197454533,
-             "eta1~~eta2" = 5.12579807333608,
-             "Y2~~Y2" = 13.6592519987499,
-             "Y3~~Y3" = 13.0926110099904,
-             "Z1~~Z1" = 11.3663608914827,
-             "eta2~~eta2" = 7.04643330784406,
-             "Z2~~Z2" =  12.226224178805,
-             "Z3~~Z3" = 1.73380172595227,
-             "Y1" =               NA,
-             "eta1" =               50,
-             "Y2" = 33.1988285063178,
-             "Y3" = 23.9858199401525,
-             "Z1" =               NA,
-             "eta2" = 50.0000000000002,
-             "Z2" = 13.1992959295212,
-             "Z3" = 2.36363262783338)
-    expect_equal(summary2(e.lvm, bias.correct = FALSE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-test_that("2 factor model (covariance): Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta1" =               NA,
-            "Y2~eta1" = 9.58349979014013,
-            "Y3~eta1" =  6.6486275006551,
-            "Y3~X1" = 47.9891761341331,
-            "Z1~eta2" =               NA,
-            "Z2~eta2" = 4.43249430163883,
-            "Z3~eta2" = 1.25054115806101,
-            "Z3~X3" = 47.9905341990707,
-            "Y1~~Y1" = 11.0158265784989,
-            "eta1~~eta1" = 12.4789600763067,
-            "eta1~~eta2" = 4.74482430527407,
-            "Y2~~Y2" = 13.3536495981587,
-            "Y3~~Y3" = 12.8019178207734,
-            "Z1~~Z1" = 10.6708758416369,
-            "eta2~~eta2" = 6.07189801632622,
-            "Z2~~Z2" = 11.6954988957563,
-            "Z3~~Z3" =  1.7905368531979,
-            "Y1" =               NA,
-            "eta1" = 48.6681581950846,
-            "Y2" = 31.8998071401989,
-            "Y3" = 22.9660180992877,
-            "Z1" =               NA,
-            "eta2" = 48.2861885270471,
-            "Z2" =  11.886664758447,
-            "Z3" = 2.17616519510875)
-    expect_equal(summary2(e.lvm, bias.correct = TRUE)$coef$df,
-                 as.double(df)) ## 
-   
-})
-
-## * LVM: 2 factor model (correlation LV)
-## ** model fit
-m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
-           Z1~eta2,Z2~eta2,Z3~eta2+X3))
-regression(m) <- eta2 ~ X1
-regression(m) <- eta1 ~ eta2+X2+X3
-
-e.lvm <- estimate(m,d)
-## ** test df
-test_that("2 factor model with correlation: Satterthwaite (df)", {
-    ## printDF(e.lvm, bias.correct = FALSE)
-    df <- c("Y1~eta1" = NA,
-            "eta1~eta2" = 7.589758,
-            "eta1~X3" = 29.344241,
-            "eta1~X2" = 28.544507,
-            "Y2~eta1" = 10.329613,
-            "Y3~eta1" = 6.948063,
-            "Y3~X1" = 50.130493,
-            "Z1~eta2" = NA,
-            "eta2~X1" = 7.199988,
-            "Z2~eta2" = 5.317187,
-            "Z3~eta2" = 1.578974,
-            "Z3~X3" = 51.112714,
-            "Y1~~Y1" = 12.115728,
-            "eta1~~eta1" = 11.798156,
-            "Y2~~Y2" = 13.659673,
-            "Y3~~Y3" = 13.247963,
-            "Z1~~Z1" = 12.134005,
-            "eta2~~eta2" = 7.073715,
-            "Z2~~Z2" = 12.808074,
-            "Z3~~Z3" = 2.357478,
-            "Y1" = NA,
-            "eta1" = 26.791170,
-            "Y2" = 33.325329,
-            "Y3" = 24.126234,
-            "Z1" = NA,
-            "eta2" = 50.252830,
-            "Z2" = 13.570638,
-            "Z3" = 3.000578)
-    expect_equal(as.double(df),
-                 suppressWarnings(summary2(e.lvm, bias.correct = FALSE)$coef$df)
-                 )
-})
-
-test_that("2 factor model with correlation: Satterthwaite + SSC (df)", {
-    ## printDF(e.lvm, bias.correct = TRUE)
-    df <- c("Y1~eta1" =               NA,
-            "eta1~eta2" = 6.92084347005809,
-            "eta1~X3" = 28.7806435824005,
-            "eta1~X2" = 28.0048020209966,
-            "Y2~eta1" = 9.78669612407933,
-            "Y3~eta1" = 6.95748573225155,
-            "Y3~X1" = 47.3760723713175,
-            "Z1~eta2" =               NA,
-            "eta2~X1" = 6.51590190860975,
-            "Z2~eta2" = 4.27555935788741,
-            "Z3~eta2" = 1.54668374474335,
-            "Z3~X3" = 48.0870623502859,
-            "Y1~~Y1" = 11.5749134030209,
-            "eta1~~eta1" = 11.5462469627778,
-            "Y2~~Y2" = 13.2162556803891,
-            "Y3~~Y3" = 13.0315399607834,
-            "Z1~~Z1" = 11.0267345810053,
-            "eta2~~eta2" = 6.02873123765679,
-            "Z2~~Z2" = 12.1044846453221,
-            "Z3~~Z3" = 2.57541058844509,
-            "Y1" =               NA,
-            "eta1" =  25.286034977098,
-            "Y2" = 32.5523755802979,
-            "Y3" = 23.8146546187347,
-            "Z1" =               NA,
-            "eta2" = 47.8752187652239,
-            "Z2" = 11.9821151867928,
-            "Z3" = 2.75496365860703)
-    expect_equal(as.double(df),
-                 summary2(e.lvm, bias.correct = TRUE)$coef$df)
-})
-
-######################################################################
-### test1-sCorrect-summary2.R ends here
diff --git a/tests/testthat/test1-sCorrect-validObjects.R b/tests/testthat/test1-sCorrect-validObjects.R
deleted file mode 100644
index 62963e6..0000000
--- a/tests/testthat/test1-sCorrect-validObjects.R
+++ /dev/null
@@ -1,113 +0,0 @@
-### test1-sCorrect-validObjects.R --- 
-##----------------------------------------------------------------------
-## Author: Brice Ozenne
-## Created: mar  6 2018 (10:42) 
-## Version: 
-## Last-Updated: jul 31 2020 (10:46) 
-##           By: Brice Ozenne
-##     Update #: 64
-##----------------------------------------------------------------------
-## 
-### Commentary: 
-## 
-### Change Log:
-##----------------------------------------------------------------------
-## 
-### Code:
-
-## * header
-rm(list = ls())
-if(FALSE){ ## already called in test-all.R    
-    library(testthat)
-    library(lavaSearch2)
-}
-library(data.table)
-library(nlme)
-lava.options(symbols = c("~","~~"))
-context("sCorrect (warnings and errors for invalid objects/arguments)")
-
-## * Simulation
-n <- 100
-m.sim <- lvm(Y~X1+X2,G~1)
-categorical(m.sim,K=3,label=c("a","b","c")) <- ~G+X2
-set.seed(10)
-d <- lava::sim(m.sim,n,latent=FALSE)
-
-## * sCorrect for lvm objects
-
-## ** error for multigroup lvm
-## check in sCorrect.R 
-suppressWarnings(e <- estimate(list(lvm(Y~X1),lvm(Y~X1),lvm(Y~X1)), data = split(d,d$G)))
-test_that("error for multigroup models", {
-    expect_error(sCorrect(e))
-})
-
-## ** error for tobit lvm
-## check in sCorrect.R
-## if(require(lava.tobit)){
-##     e <- estimate(lvm(G~X1), data = d)
-##     test_that("error for tobit models", {
-##         expect_error(sCorrect(e))
-##     })
-## }
-
-## ** error for lvm with transform variables
-## check in sCorrect.R
-m <- lvm(Y~X1)
-transform(m,Id~X1) <- function(x){1:NROW(x)}
-d.tempo <- lava::sim(m, n)
-e <- estimate(m, data = d.tempo)
-test_that("error when using transform", {
-    expect_error(sCorrect(e))
-})
-
-## * sCorrect for nlme objects
-
-## ** warning for the satterthwaite approx. with REML
-## check in sCorrect.R 
-e <- gls(Y~X1, data = d, correlation = corCompSymm(form =~1|G),
-         method = "REML")
-test_that("no warning when using no correction", {
-    expect_warning(sCorrect(e, df = FALSE, trace = 0) <- FALSE, regexp = NA)
-})
-
-## ** warning for the small sample correction estimated with REML
-## check in sCorrect.R
-e <- gls(Y~X1, data = d, correlation = corCompSymm(form =~1|G))
-test_that("warning when using nlme with REML and small sample correction", {
-    expect_warning(sCorrect(e, df = FALSE, trace = 0) <- TRUE)
-})
-
-## ** error for more than one random effect
-## check in Utils-nlme.R (.getVarCov)
-e <- lme(Y~X1, random =~ 1|G / X2, data = d, method = "ML")
-test_that("error when using nlme with several random effects", {
-    expect_error(sCorrect(e, trace = 0))
-})
-
-## ** error for more non-standard correlation shape
-## check in Utils-nlme.R (.getIndexOmega2)
-e <- gls(Y~X1, data = d, correlation = corAR1(form =~1|G), method = "ML")
-test_that("error when using nlme with non standard correlation", {
-    expect_error(sCorrect(e, trace = 0))
-})
-
-## ** error for more non-standard variance shape
-## check in Utils-nlme.R (.getIndexOmega2)
-e <- gls(Y~X1, data = d, weight = varExp(form =~X1), method = "ML")
-test_that("error when using nlme with non standard variance", {
-    expect_error(sCorrect(e, cluster = 1:NROW(d), trace = 0))
-})
-
-
-## * sCorrect with data.table
-
-e <- estimate(lvm(Y~X1+X2+G), data = as.data.table(d))
-test_that("ok for data.table objects", {
-    sCorrect(e) <- FALSE
-    sCorrect(e) <- TRUE
-})
-
-##----------------------------------------------------------------------
-### test1-sCorrect-validObjects.R ends here
-
diff --git a/tests/testthat/test1a-sCorrect-validObjects.R b/tests/testthat/test1a-sCorrect-validObjects.R
new file mode 100644
index 0000000..e4ddb82
--- /dev/null
+++ b/tests/testthat/test1a-sCorrect-validObjects.R
@@ -0,0 +1,70 @@
+### test1-sCorrect-validObjects.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: mar  6 2018 (10:42) 
+## Version: 
+## Last-Updated: Jan 11 2022 (09:07) 
+##           By: Brice Ozenne
+##     Update #: 68
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * header
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R    
+    library(testthat)
+    library(lavaSearch2)
+}
+library(data.table)
+lava.options(symbols = c("~","~~"))
+context("sCorrect (warnings and errors for invalid objects/arguments)")
+
+## * Simulation
+n <- 100
+m.sim <- lvm(Y~X1+X2,G~1)
+categorical(m.sim,K=3,label=c("a","b","c")) <- ~G+X2
+set.seed(10)
+d <- lava::sim(m.sim,n,latent=FALSE)
+
+## * estimate2 for lvm objects
+
+## ** error for multigroup lvm
+## check in sCorrect.R 
+suppressWarnings(e <- estimate(list(lvm(Y~X1),lvm(Y~X1),lvm(Y~X1)), data = split(d,d$G)))
+test_that("error for multigroup models", {
+    expect_error(estimate2(e))
+})
+
+## ** error for tobit lvm
+## check in sCorrect.R
+e <- estimate(lvm(G~X1), data = d)
+test_that("error for tobit models", {
+    expect_error(estimate2(e))
+})
+
+## ** error for lvm with transform variables
+## check in sCorrect.R
+m <- lvm(Y~X1)
+transform(m,Id~X1) <- function(x){1:NROW(x)}
+d.tempo <- lava::sim(m, n)
+e <- estimate(m, data = d.tempo)
+test_that("error when using transform", {
+    expect_error(estimate2(e))
+})
+
+## * estimate2 with data.table
+dt <- as.data.table(d)
+e <- estimate(lvm(Y~X1+X2+G), data = dt)
+test_that("ok for data.table objects", {
+    expect_true(inherits(estimate2(e), "lvmfit2"))
+})
+
+##----------------------------------------------------------------------
+### test1-sCorrect-validObjects.R ends here
+
diff --git a/tests/testthat/test1b-sCorrect-conditionalMoment.R b/tests/testthat/test1b-sCorrect-conditionalMoment.R
new file mode 100644
index 0000000..df23aaa
--- /dev/null
+++ b/tests/testthat/test1b-sCorrect-conditionalMoment.R
@@ -0,0 +1,533 @@
+### test1-conditionalMoment.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: mar 27 2018 (09:50) 
+## Version: 
+## Last-Updated: jan 19 2022 (11:00) 
+##           By: Brice Ozenne
+##     Update #: 102
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## Compare the computation of the score/information matrix/residuals between sCorrect and lava
+## Compare the computation of the hessian/derivative of the information matrix using analytical formulae vs. numerical derivatives
+##
+## NOTE: iid in lava uses numerical derivative to compute the information matrix
+## this is why there is not a perfect matching between iid2.lvm and iid.lvm
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+
+## * header
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R
+    library(testthat)
+    library(lavaSearch2)
+    library(nlme)
+}
+lava.options(symbols = c("~","~~"))
+context("conditionalMoment")
+
+test.secondOrder <- TRUE
+
+## * simulation
+cat("- simulation \n")
+n <- 5e1
+mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
+              Z1~eta2,Z2~eta2,Z3~eta2+X3,
+              X4~1,X5~1))
+regression(mSim) <- eta1~X1+Gender
+latent(mSim) <- ~eta1+eta2
+categorical(mSim, labels = c("Male","Female")) <- ~Gender
+transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
+set.seed(10)
+d <- lava::sim(mSim, n = n, latent = FALSE)
+dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","X4","X5","Gender"),
+                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
+
+## * multiple linear regression
+cat("- multiple linear regression \n")
+    
+## ** no constrains
+e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
+e.lm <- lm(Y1~X1, data = d)
+lvm2lm <- c("(Intercept)" = "Y1", "X1" = "Y1~X1", "sigma2" = "Y1~~Y1")
+
+test_that("linear regression (ML) - no constrains",{
+    expect_equal(as.double(logLik(e.lvm)), -277.9339, tol = 1e-6)
+    
+    test.lvm <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"satterthwaite"}else{"none"}, derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+    
+    expect_equal(lava::score(e.lvm, indiv = TRUE),
+                 score2(test.lvm, indiv = TRUE),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(coef(e.lvm),
+                 coef2(test.lvm),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+    
+    if(test.secondOrder){
+        
+        X <- lapply(list(Y1~X1,Y2~X2,Y3~X1+X3), model.matrix, d)
+        sigma2 <- list(coef(e.lvm)["Y1~~Y1"],
+                       coef(e.lvm)["Y2~~Y2"],
+                       coef(e.lvm)["Y3~~Y3"])
+
+        dI <- mapply(X,sigma2, FUN = function(iX,iSigma){
+            bdiag(crossprod(iX)/iSigma^2,n/(iSigma^3))
+        })
+        vcov <- mapply(X,sigma2, FUN = function(iX,iSigma){
+            solve(bdiag(crossprod(iX)/iSigma,n/(2*iSigma^2)))
+        })
+        GS <- mapply(vcov, dI, FUN = function(x,y){
+            as.matrix(x %*% y %*% x)
+        })
+        nameCoef.Y <- lapply(list("Y1","Y2","Y3"), function(iY){grep(iY, names(coef(e.lvm)), value = TRUE)})
+
+        testN.lvm <- estimate2(e.lvm, ssc = "none", df = "satterthwaite", derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+        expect_equal(test.lvm$sCorrect$hessian,testN.lvm$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dVcov.param, testN.lvm$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(unname(test.lvm$sCorrect$dVcov.param[nameCoef.Y[[1]],nameCoef.Y[[1]],"Y1~~Y1"]), unname(GS[[1]]), tol = 1e-7)
+        expect_equal(unname(test.lvm$sCorrect$dVcov.param[nameCoef.Y[[2]],nameCoef.Y[[2]],"Y2~~Y2"]), unname(GS[[2]]), tol = 1e-7)
+        expect_equal(unname(test.lvm$sCorrect$dVcov.param[nameCoef.Y[[3]],nameCoef.Y[[3]],"Y3~~Y3"]), unname(GS[[3]]), tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dRvcov.param,testN.lvm$sCorrect$dRvcov.param, tol = 1e-7)
+    }
+})
+
+test_that("linear regression (ML+1) - no constrains",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(lava::score(e.lvm, p = newcoef, indiv = TRUE),
+                 score2(test.lvm, indiv = TRUE),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm, p = newcoef),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+## ** constrains and covariance link
+m <- lvm(Y1[mu1:sigma]~X1,
+         Y2[mu2:sigma]~X2,
+         Y3~X1+X3,
+         Y2~~Y3)
+e.lvm <- estimate(m, data = d)
+
+test_that("linear regression - constrains and covariance",{
+    expect_equal(as.double(logLik(e.lvm)), -272.5088, tol = 1e-6)
+
+    test <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"satterthwaite"}else{NA}, derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+    
+    expect_equal(unname(lava::score(e.lvm, indiv = TRUE)),
+                 unname(score2(test, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test)),
+                 tol = 1e-8)
+    expect_equal(unname(coef(e.lvm)),
+                 unname(coef2(test)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN <- estimate2(e.lvm, ssc = "none", df = "satterthwaite", derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+        keep.param <- dimnames(test$sCorrect$dVcov.param)[[3]]
+        zero.param <- setdiff(dimnames(testN$sCorrect$dVcov.param)[[3]], keep.param)
+        expect_equal(test$sCorrect$hessian,testN$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test$sCorrect$dVcov.param,testN$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test$sCorrect$dRvcov.param,testN$sCorrect$dRvcov.param, tol = 1e-7)
+
+        ## compare to previous versions
+        GS <- matrix(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.00118648, 0.01999964, 0, -0.00554516, 3.94e-06, -3.01e-05, 0, 0, 0, 0, 0.01999964, -4.014e-05, 0, -1.68e-06, 0.00086362, -3.14e-05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.00554516, -1.68e-06, 0, -0.02591598, 1.841e-05, -0.00014069, 0, 0, 0, 0, 3.94e-06, 0.00086362, 0, 1.841e-05, -0.01884834, -0.00143231, 0, 0, 0, 0, -3.01e-05, -3.14e-05, 0, -0.00014069, -0.00143231, -0.01661326, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05139595, 0.05281454, 0, 0, 0, 0, 0, 0, 0, 0.05139595, -0.02433603, 0.0498916, 0, 0, 0, 0, 0, 0, 0, 0.05281454, 0.0498916, 0), 
+                     nrow = 10, 
+                     ncol = 10, 
+                     dimnames = list(c("Y1", "Y2", "Y3", "Y1~X1", "Y2~X2", "Y3~X1", "Y3~X3", "Y1~~Y1", "Y3~~Y3", "Y2~~Y3"),c("Y1", "Y2", "Y3", "Y1~X1", "Y2~X2", "Y3~X1", "Y3~X3", "Y1~~Y1", "Y3~~Y3", "Y2~~Y3")) 
+                     ) 
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y1","Y2~~Y3"],GS[,"Y1"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y2","Y2~~Y3"],GS[,"Y2"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y3","Y2~~Y3"],GS[,"Y3"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y1~X1","Y2~~Y3"],GS[,"Y1~X1"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y2~X2","Y2~~Y3"],GS[,"Y2~X2"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y3~X1","Y2~~Y3"],GS[,"Y3~X1"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y3~X3","Y2~~Y3"],GS[,"Y3~X3"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y1~~Y1","Y2~~Y3"],GS[,"Y1~~Y1"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y3~~Y3","Y2~~Y3"],GS[,"Y3~~Y3"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),"Y2~~Y3","Y2~~Y3"],GS[,"Y2~~Y3"],tol = 1e-6)
+        expect_equivalent(test$sCorrect$dVcov.param[rownames(GS),colnames(GS),"Y2~~Y3"],GS,tol = 1e-6)
+    }
+})
+
+test_that("linear regression (ML+1) - constrains and covariance",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+
+    expect_equal(coef(test.lvm), newcoef, tol = 1e-8)
+
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+
+## * mixed model
+cat("- mixed model \n")
+
+## ** Compound symmetry
+m <- lvm(Y1[mu1:sigma]~1*eta,
+         Y2[mu2:sigma]~1*eta,
+         Y3[mu3:sigma]~1*eta,
+         eta~X1+Gender)
+e.lvm <- estimate(m, d)
+
+e.lme <- lme(value ~ variable + X1 + Gender,
+             random =~ 1|Id,
+             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
+             method = "ML")
+
+e.gls <- gls(value ~ variable + X1 + Gender,
+             correlation = corCompSymm(form=~ 1|Id),
+             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
+             method = "ML")
+
+test_that("Compound symmetry", {
+    expect_equal(as.double(logLik(e.lme)),as.double(logLik(e.lvm)), tol = 1e-3)
+    expect_equal(as.double(logLik(e.gls)),as.double(logLik(e.lvm)), tol = 1e-3)
+    expect_equal(as.double(logLik(e.lvm)), -259.8317, tol = 1e-6)
+
+    test.lvm <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"Satterthwaite"}else{"none"},
+                          derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+
+    expect_equal(as.double(getVarCov(e.gls)),
+                 as.double(getVarCov2(test.lvm)),
+                 tol = 1e-3)
+    expect_equal(unname(lava::score(e.lvm, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(coef(e.lvm)),
+                 unname(coef2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN.lvm <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                               derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+
+        expect_equal(test.lvm$sCorrect$hessian,testN.lvm$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dVcov.param,testN.lvm$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dRvcov.param,testN.lvm$sCorrect$dRvcov.param, tol = 1e-7)
+
+        ## compare to previous versions
+        GS <- matrix(c(0.02860806, -0.02, -0.02, -0.00089185, -0.01529786, 0, 0, -0.02, 0.04, 0.02, 0, 0, 0, 0, -0.02, 0.02, 0.04, 0, 0, 0, 0, -0.00089185, 0, 0, 0.00645539, 0.00105925, 0, 0, -0.01529786, 0, 0, 0.00105925, 0.02723009, 0, 0, 0, 0, 0, 0, 0, 0.05700596, -0.01900199, 0, 0, 0, 0, 0, -0.01900199, 0.03500504), 
+                     nrow = 7, 
+                     ncol = 7, 
+                     dimnames = list(c("eta", "Y2", "Y3", "eta~X1", "eta~GenderFemale", "Y1~~Y1", "eta~~eta"),c("eta", "Y2", "Y3", "eta~X1", "eta~GenderFemale", "Y1~~Y1", "eta~~eta")) 
+                     )
+        expect_equal(test.lvm$sCorrect$dVcov.param[rownames(GS),colnames(GS),"Y1~~Y1"],GS,tol = 1e-6)
+    }
+})
+
+test_that("mixed model (ML+1) - CS",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+## ** Unstructured 
+m <- lvm(Y1~1*eta,
+         Y2~1*eta,
+         Y3~1*eta,
+         eta~X1+Gender)
+covariance(m) <- Y1~Y2
+covariance(m) <- Y1~Y3
+e.lvm <- estimate(m, d)
+
+e.lme <- lme(value ~ variable + X1 + Gender,
+             random =~ 1|Id,
+             correlation = corSymm(),
+             weights = varIdent(form =~ 1|variable),
+             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
+             method = "ML")
+
+e.gls <- gls(value ~ variable + X1 + Gender,
+             correlation = corSymm(form=~ 1|Id),
+             weights = varIdent(form =~ 1|variable),
+             data = dL[dL$variable %in% c("Y1","Y2","Y3"),],
+             method = "ML")
+
+test_that("Unstructured", {
+    expect_equal(as.double(logLik(e.lme)),as.double(logLik(e.lvm)), tol = 1e-3)
+    expect_equal(as.double(logLik(e.gls)),as.double(logLik(e.lvm)), tol = 1e-3)
+    expect_equal(as.double(logLik(e.lvm)), -258.8121, tol = 1e-6)
+
+    test.lvm <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"Satterthwaite"}else{NA},
+                          derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+
+    expect_equal(as.double(getVarCov(e.gls)),
+                 as.double(getVarCov2(test.lvm)),
+                 tol = 1e-3)
+    expect_equal(unname(lava::score(e.lvm, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(coef(e.lvm)),
+                 unname(coef2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN.lvm <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                               derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+
+        expect_equal(test.lvm$sCorrect$hessian,testN.lvm$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dVcov.param,testN.lvm$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test.lvm$sCorrect$dRvcov.param,testN.lvm$sCorrect$dRvcov.param, tol = 1e-7)
+
+        ## compare to previous versions
+        GS <- matrix(c(0.02337325, -0.02, -0.02, -0.00034949, -0.00599479, 0, 0, 0, 0, 0, 0, -0.02, 0.02, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, -0.02, 0.02, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, -0.00034949, 0, 0, 0.00252968, 0.00041509, 0, 0, 0, 0, 0, 0, -0.00599479, 0, 0, 0.00041509, 0.0106707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.14197442, 0, 0, 0, 0.02650713, 0.02772787, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02650713, 0, 0, 0, 0.04155753, 0.00895138, 0, 0, 0, 0, 0, 0.02772787, 0, 0, 0, 0.00895138, 0.04452667), 
+                     nrow = 11, 
+                     ncol = 11, 
+                     dimnames = list(c("eta", "Y2", "Y3", "eta~X1", "eta~GenderFemale", "Y1~~Y1", "eta~~eta", "Y2~~Y2", "Y3~~Y3", "Y1~~Y2", "Y1~~Y3"),c("eta", "Y2", "Y3", "eta~X1", "eta~GenderFemale", "Y1~~Y1", "eta~~eta", "Y2~~Y2", "Y3~~Y3", "Y1~~Y2", "Y1~~Y3")) 
+                     ) 
+        expect_equal(test.lvm$sCorrect$dVcov.param[rownames(GS),colnames(GS),"Y1~~Y1"],GS,tol = 1e-6)
+    }
+
+})
+
+test_that("mixed model (ML+1) - UN",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+## * factor model
+cat("- factor model \n")
+
+m <- lvm(Y1~eta,
+         Y2~eta+X2,
+         Y3~eta,
+         Z1~eta, Z1~~Y1,Z1~~Y2,
+         eta~X1+X3)
+e.lvm <- estimate(m, d)
+
+test_that("factor model",{
+    expect_equal(as.double(logLik(e.lvm)), -334.2583, tol = 1e-6)
+
+    test <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"Satterthwaite"}else{NA},
+                      derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+    
+    expect_equal(lava::score(e.lvm, indiv = TRUE),
+                 score2(test, indiv = TRUE),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test)),
+                 tol = 1e-8)
+    expect_equal(coef(e.lvm),
+                 coef2(test),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                           derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+
+        expect_equal(test$sCorrect$hessian,testN$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test$sCorrect$dVcov.param,testN$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test$sCorrect$dRvcov.param,testN$sCorrect$dRvcov.param, tol = 1e-7)
+
+        ## compare to previous versions
+        GS <- matrix(c(0.04792556, 0.04673097, 0.04673097, -0.00085306), 
+                     nrow = 2, 
+                     ncol = 2, 
+                     dimnames = list(c("Y2", "Y3"),c("Y2", "Y3")) 
+                     ) 
+        expect_equal(test$sCorrect$dVcov.param[paste0("Y",2:3),paste0("Y",2:3),"Y2~eta"],GS,tol = 1e-6)
+    }
+})
+
+test_that("factor model (ML+1)",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+## * two factor model
+cat("- two factor model \n")
+
+## ** correlation
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
+           Z1~eta2,Z2~eta2,Z3~eta2+X3,
+           eta1~eta2))
+
+e.lvm <- estimate(m, d)
+
+test_that("two factor model - correlation",{
+    expect_equal(as.double(logLik(e.lvm)), -518.3538, tol = 1e-6)
+
+    test <- estimate2(e.lvm, ssc = "none", df = if(test.secondOrder){"Satterthwaite"}else{NA},
+                      derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+    
+    expect_equal(lava::score(e.lvm, indiv = TRUE),
+                 score2(test, indiv = TRUE),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test)),
+                 tol = 1e-8)
+    expect_equal(coef(e.lvm),
+                 coef2(test),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                          derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+
+        expect_equal(test$sCorrect$hessian,testN$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test$sCorrect$dVcov.param,testN$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test$sCorrect$dRvcov.param,testN$sCorrect$dRvcov.param, tol = 1e-7)
+    
+        ## compare to previous versions
+        GS <- matrix(c(-0.01772622, 0.01772622, -0.01540713, 0.07276457, 0.01772622, -0.01772622, 0.01540713, -0.07276457, -0.01540713, 0.01540713, -0.0133915, 0.06324503, 0.07276457, -0.07276457, 0.06324503, -0.29869238), 
+                     nrow = 4, 
+                     ncol = 4, 
+                     dimnames = list(c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3"),c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3")) 
+                     )
+        keep.param <-  c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3")
+        expect_equal(test$sCorrect$dVcov.param[keep.param,keep.param,"eta1~eta2"],GS,tol = 1e-6)
+    }
+})
+
+test_that("two factor model (ML+1) - correlation",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+## ** covariance
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,eta1~X1,
+           Z1~eta2,Z2~eta2,Z3~eta2+X3,eta2~X4,
+           eta1~~eta2))
+
+e.lvm <- estimate(m, d)
+
+test_that("two factor model - covariance",{
+    expect_equal(as.double(logLik(e.lvm)), -502.7317, tol = 1e-6)
+
+    test <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                      derivative = "analytic", hessian = TRUE, dVcov.robust = TRUE)
+    
+    expect_equal(lava::score(e.lvm, indiv = TRUE),
+                 score2(test, indiv = TRUE),
+                 tol = 1e-8)
+    expect_equal(lava::information(e.lvm),
+                 unname(information2(test)),
+                 tol = 1e-8)
+    expect_equal(coef(e.lvm),
+                 coef2(test),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm)),
+                 unname(residuals2(test, type = "response")),
+                 tol = 1e-8)
+
+    if(test.secondOrder){
+        testN <- estimate2(e.lvm, ssc = "none", df = "Satterthwaite",
+                           derivative = "numeric", hessian = TRUE, dVcov.robust = TRUE)
+    
+        expect_equal(test$sCorrect$hessian,testN$sCorrect$hessian, tol = 1e-7)
+        expect_equal(test$sCorrect$dVcov.param,testN$sCorrect$dVcov.param, tol = 1e-7)
+        expect_equal(test$sCorrect$dRvcov.param,testN$sCorrect$dRvcov.param, tol = 1e-7)
+
+        GS <- matrix(c(-0.01561465, 0.01447463, -0.01301386, 0.05822372, 0.01447463, -0.01341929, 0.01206254, -0.05397059, -0.01301386, 0.01206254, -0.01091509, 0.04865601, 0.05822372, -0.05397059, 0.04865601, -0.21734976), 
+                     nrow = 4, 
+                     ncol = 4, 
+                     dimnames = list(c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3"),c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3")) 
+                     ) 
+        keep.param <-  c("Z1~~Z1", "eta2~~eta2", "Z2~~Z2", "Z3~~Z3")
+        expect_equal(test$sCorrect$dVcov.param[keep.param,keep.param,"eta1~~eta2"],GS,tol = 1e-6)
+    }
+})
+
+test_that("two factor model (ML+1) - covariance",{
+    newcoef <- coef(e.lvm)+1
+    test.lvm <- estimate2(e.lvm, param = newcoef, ssc = "none", df = "none")
+    
+    expect_equal(unname(lava::score(e.lvm, p = newcoef, indiv = TRUE)),
+                 unname(score2(test.lvm, indiv = TRUE)),
+                 tol = 1e-8)
+    expect_equal(unname(lava::information(e.lvm, p = newcoef)),
+                 unname(information2(test.lvm)),
+                 tol = 1e-8)
+    expect_equal(unname(residuals(e.lvm, p = newcoef)),
+                 unname(residuals2(test.lvm, type = "response")),
+                 tol = 1e-8)
+})
+
+##
+##----------------------------------------------------------------------
+### test1-conditionalMoment.R ends here
diff --git a/tests/testthat/test1c-sCorrect-ssc.R b/tests/testthat/test1c-sCorrect-ssc.R
new file mode 100644
index 0000000..25c83d0
--- /dev/null
+++ b/tests/testthat/test1c-sCorrect-ssc.R
@@ -0,0 +1,503 @@
+### test1-sCorrect-ssc.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: mar  7 2018 (12:08) 
+## Version: 
+## Last-Updated: jan 19 2022 (11:40) 
+##           By: Brice Ozenne
+##     Update #: 122
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * header
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R
+    library(testthat)
+    library(lavaSearch2)
+}
+
+lava.options(symbols = c("~","~~"))
+library(nlme)
+context("sCorrect (small sample correction)")
+
+## * simulation
+n <- 5e1
+mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
+              Z1~eta2,Z2~eta2,Z3~eta2+X3))
+regression(mSim) <- eta1~X1+Gender
+latent(mSim) <- ~eta1+eta2
+categorical(mSim, labels = c("Male","Female")) <- ~Gender
+transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
+set.seed(10)
+d <- lava::sim(mSim, n = n, latent = FALSE)
+dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
+                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
+dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
+dLred$variable.factor <- as.factor(dLred$variable)
+
+## * linear regression
+## ** univariate
+e.lm <- lm(Y1~X1+X2, data = d)
+e.gls <- gls(Y1~X1+X2, data = d)
+e.lvm <- estimate(lvm(Y1~X1+X2), data = d)
+
+test_that("linear regression - residual correction equivalent to REML", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+    
+    ## comapre parameters
+    GS <- c(coef(e.lm), sigma(e.lm)^2)
+    expect_equal(unname(eSSC1.lvm$sCorrect$param),
+                 unname(GS), tol = 1e-6)
+
+    ## compare vcov
+    GS <- vcov(e.lm)
+    expect_equal(unname(eSSC1.lvm$sCorrect$vcov.param[1:3,1:3]),
+                 unname(GS), tol = 1e-6)
+        
+    ## model based
+    GS <- data.frame("estimate" = c(0.15110105, 1.08233491, -0.42993475, 1.82039942), 
+                     "se" = c(0.19577922, 0.18717083, 0.20041228, 0.37551973), 
+                     "df" = c(47, 47, 47, 11.75), 
+                     "lower" = c(-0.24275594, 0.70579577, -0.83311225, 1.00027731), 
+                     "upper" = c(0.54495805, 1.45887406, -0.02675726, 2.64052153), 
+                     "statistic" = c(0.77179308, 5.78260466, -2.14525157, NA), 
+                     "p.value" = c(0.44410038, 5.7e-07, 0.03713074, NA))
+    
+    expect_equivalent(GS,
+                      summary2(eSSC1.lvm)$table2, tol = 1e-6)
+    
+    ## robust
+    GS <- data.frame("estimate" = c(0.15110105, 1.08233491, -0.42993475, 1.82039942), 
+                     "se" = c(0.18025537, 0.2334738, 0.15426941, 0.3611838), 
+                     "df" = c(47, 47, 47, 11.75), 
+                     "lower" = c(-0.21152598, 0.61264622, -0.74028477, 1.03158648), 
+                     "upper" = c(0.51372808, 1.55202361, -0.11958474, 2.60921236), 
+                     "statistic" = c(0.83826102, 4.63578753, -2.78690869, NA), 
+                     "p.value" = c(0.40612711, 2.848e-05, 0.00765275, NA))
+    expect_equivalent(GS,
+                      summary2(eSSC1.lvm, robust = TRUE)$table2, tol = 1e-6)
+
+})
+
+## test_that("linear regression - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- estimate2(e.lvm, ssc = "Cox")
+
+##     ## compare coef
+##     GS <- c(coef(e.lm), sigma(e.lm)^2)
+##     expect_equal(unname(eSSC2.lm$sCorrect$param),
+##                  unname(GS), tol = 1e-6)
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param),
+##                  unname(GS), tol = 1e-6)
+    
+##     ## compare vcov
+##     GS <- vcov(e.lm)
+##     expect_equal(unname(eSSC2.lm$sCorrect$vcov.param[1:3,1:3]),
+##                  unname(GS), tol = 1e-6)
+##     expect_equal(unname(eSSC2.lvm$sCorrect$vcov.param[1:3,1:3]),
+##                  unname(GS), tol = 1e-6)
+
+##     ## compare JJK
+##     name.param <- c(names(coef(e.lm)),"sigma2")
+##     p <- length(name.param)
+##     JJK <- array(0, dim = rep(p,3), dimnames = list(name.param,name.param,name.param))
+##     X <- model.matrix(e.lm)
+
+##     JJK[name.param[1:3],name.param[1:3],"sigma2"] <- -crossprod(X)/sigma(e.lm)^4
+##     JJK[name.param[1:3],"sigma2",name.param[1:3]] <- -crossprod(X)/sigma(e.lm)^4
+##     JJK["sigma2",name.param[1:3],name.param[1:3]] <- crossprod(X)/sigma(e.lm)^4
+##     expect_equal(JJK, eSSC2.lm$sCorrect$ssc$JJK, tol = 1e-5)
+## })
+
+
+## ** multiple, no constrain
+e.gls <- gls(value ~ -1 + variable + variable:X1,
+             data = dLred,
+             weight = varIdent(form = ~1|variable),
+             method = "REML")
+e.lvm <- estimate(lvm(Y1 ~ X1,
+                      Y2 ~ X1,
+                      Y3 ~ X1),
+                  data = d)
+
+test_that("multiple linear regression - residual correction equivalent to REML", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+
+    ## lvm
+    GS <- c(intervals(e.gls)$coef[,2],
+            (c(Y1 = 1, intervals(e.gls)$varStruct[,2])*intervals(e.gls)$sigma[2])^2
+            )
+    
+    expect_equal(unname(eSSC1.lvm$sCorrect$param[c("Y1","Y2","Y3","Y1~X1","Y2~X1","Y3~X1","Y1~~Y1","Y2~~Y2","Y3~~Y3")]),
+                 unname(GS), tol = 1e-6)
+
+    GS <- vcov(e.gls)
+    expect_equal(unname(eSSC1.lvm$sCorrect$vcov.param[1:6,1:6]),
+                 unname(GS), tol = 1e-6)
+
+})
+
+## test_that("multiple linear regression - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- estimate2(e.lvm, ssc = "Cox")
+##     ## eSSC2.gls <- sCorrect(e.gls, ssc = "Cox")
+##     ## eSSC2.glsN <- sCorrect(e.gls, ssc = "Cox", derivative = "numeric")
+
+##     ## range(eSSC2.gls$sCorrect$hessian-eSSC2.glsN$sCorrect$hessian)
+##     ## eSSC2.gls$sCorrect$information
+##     ## eSSC2.glsN$sCorrect$information
+
+##     ## lvm
+##     GS <- c(intervals(e.gls)$coef[,2],
+##             c(Y1 = 1, intervals(e.gls)$varStruct[,2]^2)*intervals(e.gls)$sigma[2]^2
+##             )
+    
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param[c("Y1","Y2","Y3","Y1~X1","Y2~X1","Y3~X1","Y1~~Y1","Y2~~Y2","Y3~~Y3")]),
+##                  unname(GS), tol = 1e-6)
+    
+##     GS <- vcov(e.gls)
+##     expect_equal(unname(eSSC2.lvm$sCorrect$vcov.param[1:6,1:6]),
+##                  unname(GS), tol = 1e-6)
+
+##     ## gls
+##     ## GS <- c(intervals(e.gls)$coef[,2],
+##     ##         sigma2 = as.double(intervals(e.gls)$sigma[2]^2),
+##     ##         intervals(e.gls)$varStruct[,2]
+##     ##         )
+    
+## })
+
+## ** multiple, with constrains
+e.gls0 <- gls(value ~ variable-1 + X1,
+             data = dLred,
+             weight = varIdent(form = ~1|variable),
+             method = "ML")
+e.gls <- gls(value ~ variable-1 + X1,
+             data = dLred,
+             weight = varIdent(form = ~1|variable),
+             method = "REML")
+e.lvm <- estimate(lvm(Y1[mu1:sigma1]~ beta1*X1,
+                      Y2[mu2:sigma2]~ beta1*X1,
+                      Y3[mu3:sigma3]~ beta1*X1),
+                  data = d)
+## logLik(e.gls0)
+## logLik(e.gls)
+## logLik(e.lvm)
+
+## vcov(e.gls0)[1:4,1:4]/vcov(e.lvm)[1:4,1:4]
+
+
+test_that("multiple linear regression - residual correction equivalent to REML", {
+
+    expect_equal(information(e.lvm), unname(moments2(e.lvm)$information), tol = 1e-6)
+    
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+
+    GS <- c(intervals(e.gls)$coef[,2],
+            (c(Y1 = 1, intervals(e.gls)$varStruct[,2]) * intervals(e.gls)$sigma[2])^2)
+
+    ## not precisely the same but better
+    expect_equal(unname(eSSC1.lvm$sCorrect$param),
+                 unname(GS), tol = 1e-3)
+})
+
+## test_that("multiple linear regression - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+
+##     GS <- c(intervals(e.gls)$coef[,2],
+##             (c(Y1 = 1, intervals(e.gls)$varStruct[,2]) * intervals(e.gls)$sigma[2])^2)
+
+##     ## not precisely the same but better
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param),
+##                  unname(GS), tol = 1e-3)
+
+## })
+
+
+## * mixed model
+## ** CS
+m <- lvm(c(Y1[0:sigma]~1*eta,
+           Y2[0:sigma]~1*eta,
+           Y3[0:sigma]~1*eta,
+           eta~X1+X2))
+latent(m) <- ~eta
+e.lvm <- estimate(m, d)
+
+e.lme <- nlme::lme(value ~ X1 + X2,
+                   random =~1| Id,
+                   data = dLred, method = "REML")
+
+e.gls <- nlme::gls(value ~ X1 + X2,
+                   correlation = corCompSymm(form = ~1| Id),
+                   data = dLred, method = "REML")
+
+test_that("mixed model (CS) - residual correction equivalent to REML", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+    ## eSSC1.lvm <- sCorrect(update(e.gls, method = "ML"), ssc = "residuals")
+
+    GS <- c(intervals(e.lme)$fixed[,2],
+            sigma2 = as.double(intervals(e.lme)$sigma[2])^2,
+            tau = intervals(e.lme)$reStruct$Id[,2,]^2)
+
+    ## not precisely the same but better
+    expect_equal(unname(eSSC1.lvm$sCorrect$param),
+                 unname(GS), tol = 1e-4)
+    ## eSSC.lvm$sCorrect$param - GS
+})
+
+## test_that("mixed model (CS) - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+##     ## coef(eSSC2.lvm) - coef(e.lvm)
+##     ## GS - coef(e.lvm)
+    
+##     GS <- c(intervals(e.lme)$fixed[,2],
+##             sigma2 = as.double(intervals(e.lme)$sigma[2])^2,
+##             tau = intervals(e.lme)$reStruct$Id[,2,]^2)
+##     GS2 <- c(intervals(e.gls)$coef[,2],
+##             sigma2 = sigma(e.gls)^2,
+##             corCoef1 = intervals(e.gls)$corStruct[1,2])
+
+##     ## not precisely the same but better
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param),
+##                  unname(GS), tol = 1e-4)
+##     expect_equal(unname(eSSC2.lme$sCorrect$param),
+##                  unname(GS), tol = 1e-4)
+##     expect_equal(unname(eSSC2.gls$sCorrect$param),
+##                  unname(GS2), tol = 1e-2)
+##     ## eSSC.lvm$sCorrect$param - GS
+## })
+
+## ** CS with different variances
+m <- lvm(c(Y1[0:sigma1]~1*eta,
+           Y2[0:sigma2]~1*eta,
+           Y3[0:sigma3]~1*eta,
+           eta~X1+X2))
+latent(m) <- ~eta
+e.lvm <- estimate(m, d)
+
+e.lme <- nlme::lme(value ~ X1 + X2,
+                   random =~1| Id,
+                   weights = varIdent(form =~ 1|variable),
+                   data = dLred, method = "REML")
+
+e.gls <- nlme::gls(value ~ X1 + X2,
+                   correlation = corCompSymm(form = ~1| Id),
+                   weights = varIdent(form =~ 1|variable),
+                   data = dLred, method = "REML")
+
+test_that("mixed model (CS,weight) - residual correction differs from REML", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+
+    GS.lme <- c(intervals(e.lme)$fixed[,2],
+                Y1 = as.double(intervals(e.lme)$sigma[2])^2,
+                tau = intervals(e.lme)$reStruct$Id[,2,]^2,
+                intervals(e.lme)$varStruct[,2]^2 * as.double(intervals(e.lme)$sigma[2])^2)
+
+    GS <- c("eta" = 0.37740319, "eta~X1" = 1.32095068, "eta~X2" = -0.02166077, "Y1~~Y1" = 1.07114383, "Y2~~Y2" = 1.50935967, "Y3~~Y3" = 1.78871997, "eta~~eta" = 0.90068904)
+    expect_equal(unname(eSSC1.lvm$sCorrect$param),
+                 unname(GS), tol = 1e-6)
+    ## eSSC.lvm$sCorrect$param - GS
+    ## coef(e.lvm) - GS
+})
+
+## test_that("mixed model (CS,weight) - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+##     eSSC2.gls <- sCorrect(e.gls, ssc = "Cox")
+##     eSSC2.lme <- sCorrect(e.lme, ssc = "Cox")
+##     ## GS - coef(e.lvm)
+##     ## coef(eSSC2.lvm) - coef(e.lvm)
+    
+##     GS <- c(intervals(e.lme)$fixed[,2],
+##             Y1 = as.double(intervals(e.lme)$sigma[2])^2,
+##             tau = intervals(e.lme)$reStruct$Id[,2,]^2,
+##             intervals(e.lme)$varStruct[,2]^2 * as.double(intervals(e.lme)$sigma[2])^2)
+
+##     GS2 <- c(intervals(e.lme)$fixed[,2],
+##              c(Y1 = 1,intervals(e.lme)$varStruct[,2])^2 * as.double(intervals(e.lme)$sigma[2])^2,
+##              tau = intervals(e.lme)$reStruct$Id[1,2])
+
+##     ## not precisely the same but better
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param),
+##                  unname(GS), tol = 5e-3)
+##     eSSC2.lme$sCorrect$param - GS2
+##     ## eSSC.lvm$sCorrect$param - GS
+## })
+
+
+## ** UN
+m <- lvm(c(Y1~0+1*eta,
+           Y2~0+1*eta,
+           Y3~0+1*eta,
+           eta~X1+X2))
+covariance(m) <- Y1~Y2
+covariance(m) <- Y1~Y3
+e.lvm <- estimate(m, d)
+
+e.gls <- nlme::gls(value ~ X1 + X2,
+                   correlation = corSymm(form =~ 1| Id),
+                   weight = varIdent(form =~ 1|variable),
+                   data = dLred, method = "REML")
+
+e.lme <- nlme::lme(value ~ X1 + X2,
+                   random =~ 1|Id,
+                   correlation = corSymm(),
+                   weight = varIdent(form =~ 1|variable),
+                   data = dLred, method = "REML")
+
+test_that("mixed model (UN) - residual correction differs from REML", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals") 
+    ## eSSC1.lvm$sCorrect$param - coef(e.lvm)
+    
+    gls_sigma2 <- as.double(intervals(e.gls)$sigma[2])^2
+    gls_var <- c(Y1 = gls_sigma2, gls_sigma2 * intervals(e.gls)$varStruct[,2]^2)
+    gls_tau <- as.double(sqrt(gls_var)["Y2"] * sqrt(gls_var)["Y3"] * intervals(e.gls)$corStruct[3,2])
+    
+    ## getVarCov2(eSSC1.lvm)
+    ## eSSC1.lvm$sCorrect$param
+
+    GS.gls <- c(intervals(e.gls)$coef[,2],
+                Y1 = as.double(gls_var["Y1"]) - gls_tau,
+                "eta~~eta" = gls_tau,
+                Y2 = as.double(gls_var["Y2"] - gls_tau),
+                Y3 = as.double(gls_var["Y3"] - gls_tau),
+                "Y1~~Y2" = as.double(sqrt(gls_var)["Y1"] * sqrt(gls_var)["Y2"] * intervals(e.gls)$corStruct[1,2] - gls_tau),
+                "Y1~~Y3" = as.double(sqrt(gls_var)["Y1"] * sqrt(gls_var)["Y3"] * intervals(e.gls)$corStruct[2,2] - gls_tau)
+                )
+
+    GS <- data.frame("estimate" = c(0.40605248, 1.34876061, 0.03171814, 1.36038582, 0.73014581, 1.59213818, 1.8781554, 0.20120537, 0.23243999), 
+                     "se" = c(0.16913807, 0.16170109, 0.17314067, 0.48960791, 0.36665137, 0.46517041, 0.50622562, 0.36375786, 0.36974), 
+                     "df" = c(48.99131541, 48.99131541, 48.99131541, 28.35663651, 18.85511933, 14.13124925, 13.54461812, 23.82210554, 23.7096419), 
+                     "lower" = c(0.06615527, 1.02380864, -0.31622263, 0.35803744, -0.03766363, 0.59531552, 0.78897477, -0.54985076, -0.53116071), 
+                     "upper" = c(0.74594969, 1.67371257, 0.37965891, 2.3627342, 1.49795525, 2.58896085, 2.96733602, 0.9522615, 0.99604068), 
+                     "statistic" = c(2.40071598, 8.34107309, 0.18319289, NA, NA, NA, NA, 0.55312996, 0.62865795), 
+                     "p.value" = c(0.0202054, 0, 0.85540273, NA, NA, NA, NA, 0.5853285, 0.53558303))
+
+    expect_equal(as.double(unlist(model.tables(eSSC1.lvm))),
+                 as.double(unlist(GS)), tol = 1e-6)
+})
+
+## test_that("mixed model (UN) - Cox correction equivalent to REML", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+
+##     ## eSSC2.lvm$sCorrect$param - coef(e.lvm)
+    
+##     gls_sigma2 <- as.double(intervals(e.gls)$sigma[2])^2
+##     gls_var <- c(Y1 = gls_sigma2, gls_sigma2 * intervals(e.gls)$varStruct[,2]^2)
+##     gls_tau <- as.double(sqrt(gls_var)["Y2"] * sqrt(gls_var)["Y3"] * intervals(e.gls)$corStruct[3,2])
+
+##     GS <- c(intervals(e.gls)$coef[,2],
+##             Y1 = as.double(gls_var["Y1"]) - gls_tau,
+##             "eta~~eta" = gls_tau,
+##             Y2 = as.double(gls_var["Y2"] - gls_tau),
+##             Y3 = as.double(gls_var["Y3"] - gls_tau),
+##             "Y1~~Y2" = as.double(sqrt(gls_var)["Y1"] * sqrt(gls_var)["Y2"] * intervals(e.gls)$corStruct[1,2] - gls_tau),
+##             "Y1~~Y3" = as.double(sqrt(gls_var)["Y1"] * sqrt(gls_var)["Y3"] * intervals(e.gls)$corStruct[2,2] - gls_tau)
+##             )
+
+##     ## not precisely the same but better
+##     expect_equal(unname(eSSC2.lvm$sCorrect$param),
+##                  unname(GS), tol = 5e-3)
+##     ## eSSC2.lvm$sCorrect$param - GS
+##     ## coef(e.lvm) - GS
+## })
+
+## * latent variable model
+## ** factor model
+m <- lvm(Y1~eta,
+         Y2~eta+X2,
+         Y3~eta,
+         Z1~eta, Z1~~Y1,Z1~~Y2,
+         eta~X1+X3)
+e.lvm <- estimate(m, d)
+
+## round(coef(estimate(m, sim(m,1e4))),1) ## truth
+
+test_that("factor model - residuals correction", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+    ## coef(eSSC1.lvm) - coef(e.lvm)
+
+    GS <- data.frame("estimate" = c(0.23990945, 0.28804288, 0.17076884, 0.36590889, 1.1654944, 0.11297345, 0.91569218, 0.52324123, 1.77781303, 0.10836302, 1.19867136, 0.56519387, 1.47128944, 0.28970699, 1.97973709, 0.24899082, 0.35358187), 
+                     "se" = c(0.18799645, 0.23072591, 0.2931627, 0.20145141, 0.17621193, 0.10589956, 0.16303206, 0.18016769, 0.22465755, 0.14413976, 0.27267601, 0.18626544, 0.32259805, 0.35063842, 0.40312441, 0.22748203, 0.2547393), 
+                     "df" = c(56.44718181, 39.00470949, 18.52136972, 44.292982, 68.78535781, 21.25611374, 12.04379228, 47.13840874, 7.3533839, 17.22586144, 13.16324833, 7.71173505, 12.55326162, 17.06195323, 12.04013252, 22.1993749, 20.8126868), 
+                     "lower" = c(-0.13662687, -0.17864252, -0.44390274, -0.04001391, 0.81394165, -0.10709528, 0.56061904, 0.16081869, 1.25171407, -0.19544183, 0.61033243, 0.13285416, 0.77182911, -0.44987084, 1.10172906, -0.2225325, -0.17646796), 
+                     "upper" = c(0.61644576, 0.75472828, 0.78544041, 0.77183169, 1.51704715, 0.33304219, 1.27076532, 0.88566378, 2.30391198, 0.41216787, 1.78701029, 0.99753358, 2.17074976, 1.02928481, 2.85774513, 0.72051415, 0.88363171), 
+                     "statistic" = c(1.2761382, 1.24842017, 0.58250533, 1.81636304, 6.61416297, 1.06679811, 5.61663881, 2.9041902, 7.91343532, 0.75179128, NA, NA, NA, NA, NA, 1.09455159, 1.38801462), 
+                     "p.value" = c(0.20713262, 0.21931911, 0.56725087, 0.07608612, 1e-08, 0.29803039, 0.00011158, 0.00558839, 7.503e-05, 0.46232572, NA, NA, NA, NA, NA, 0.28544743, 0.17981328))
+
+    
+    expect_equal(as.double(unlist(model.tables(eSSC1.lvm))),
+                 as.double(unlist(GS)),
+                 tol = 1e-6)
+})
+
+## test_that("factor model - Cox correction", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+##     ## coef(eSSC2.lvm) - coef(e.lvm)
+    
+##     GS <- c("eta" = 0.23991104, "Y2" = 0.29144175, "Y3" = 0.17832511, "Z1" = 0.36554264, "eta~X1" = 1.16545963, "eta~X3" = 0.11297009, "Y2~eta" = 0.90409861, "Y2~X2" = 0.52324125, "Y3~eta" = 1.75203851, "Z1~eta" = 0.10961229, "Y1~~Y1" = 1.20351764, "eta~~eta" = 0.5603901, "Y2~~Y2" = 1.47834635, "Y3~~Y3" = 0.30540849, "Z1~~Z1" = 1.99182361, "Y1~~Z1" = 0.25025483, "Y2~~Z1" = 0.3555143)
+##     expect_equal(coef(eSSC2.lvm),GS, tol = 1e-6)
+## })
+
+## ** two factors model (regression)
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
+           Z1~eta2,Z2~eta2,Z3~eta2+X3,
+           eta1~eta2))
+
+e.lvm <- lava::estimate(m, d)
+
+test_that("two factors model (correlation) - residuals correction", {
+    eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+    ## summary2(e.lvm, ssc = "residuals")
+
+    GS <- data.frame("estimate" = c(0.1478569, 0.19515562, 0.37384111, 0.39767751, -0.19934296, -0.82545231, 0.36540063, 0.85064787, 0.88015853, 1.29882077, 0.92947602, 1.95754764, 1.22777389, 0.84489487, 2.201649, 1.66547564, 0.8429522, 1.23373245, 0.77442115, 1.40526018, 0.13550962), 
+                     "se" = c(0.27670411, 0.21934509, 0.17960657, 0.20040727, 0.24589559, 0.39285267, 0.27933333, 0.17310347, 0.1621181, 0.15302732, 0.26424083, 0.60071584, 0.16064393, 0.3688724, 0.67154771, 0.41296464, 0.30484248, 0.31661237, 0.37777288, 0.33039193, 0.73377475), 
+                     "df" = c(24.83880008, 31.95629525, 22.96732922, 48.29904737, 12.20156161, 2.4213353, 7.09306026, 9.4906504, 6.67523697, 47.98657702, 4.53936457, 1.37008364, 47.96401517, 11.36380095, 11.96547193, 13.3573358, 12.98491495, 10.99271212, 6.70939132, 11.93934438, 1.96554411), 
+                     "lower" = c(-0.42221348, -0.25165968, 0.00226736, -0.00520386, -0.73412348, -2.26295108, -0.29336504, 0.46212497, 0.49299671, 0.9911365, 0.22895845, -2.17802662, 0.90477135, 0.03617209, 0.73800383, 0.77573987, 0.18430228, 0.53681695, -0.12677344, 0.68499218, -3.07530271), 
+                     "upper" = c(0.71792727, 0.64197092, 0.74541486, 0.80055889, 0.33543756, 0.61204646, 1.02416631, 1.23917076, 1.26732036, 1.60650504, 1.62999358, 6.0931219, 1.55077643, 1.65361764, 3.66529418, 2.5552114, 1.50160213, 1.93064796, 1.67561575, 2.12552817, 3.34632195), 
+                     "statistic" = c(0.53435021, 0.88971956, 2.08144447, 1.98434679, -0.81068133, -2.10117525, 1.30811684, 4.91410075, 5.42911932, 8.48750911, 3.51753361, 3.25869159, 7.64282757, NA, NA, NA, NA, NA, NA, NA, NA), 
+                     "p.value" = c(0.59785007, 0.38026572, 0.04872921, 0.05291443, 0.43307701, 0.1478051, 0.23162872, 0.00071146, 0.00114389, 0, 0.01990062, 0.13317327, 0, NA, NA, NA, NA, NA, NA, NA, NA))
+
+    expect_equal(as.double(unlist(model.tables(eSSC1.lvm))),
+                 as.double(unlist(GS)),
+                 tol = 1e-6)
+
+})
+
+## test_that("two factors model (correlation) - Cox correction", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+
+##     GS <- c("eta1" = 0.15334368, "Y2" = 0.19799503, "Y3" = 0.37775788, "eta2" = 0.39767751, "Z2" = -0.18562638, "Z3" = -0.75078602, "eta1~eta2" = 0.35160357, "Y2~eta1" = 0.8409626, "Y3~eta1" = 0.86679841, "Y3~X1" = 1.29882077, "Z2~eta2" = 0.89498429, "Z3~eta2" = 1.76979176, "Z3~X3" = 1.22777389, "Y1~~Y1" = 0.88673054, "eta1~~eta1" = 2.22027337, "Y2~~Y2" = 1.7069357, "Y3~~Y3" = 0.87527537, "Z1~~Z1" = 1.26455176, "eta2~~eta2" = 0.74360185, "Z2~~Z2" = 1.43772669, "Z3~~Z3" = 0.31131063)
+
+##     expect_equal(coef(eSSC2.lvm), GS, tol = 1e-6)
+## })
+
+## ## ** two factors model (covariance)
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,eta1~X1,
+           Z1~eta2,Z2~eta2,Z3~eta2+X3,eta2~X2,
+           eta1~~eta2))
+
+## e.lvm <- estimate(m, d) ## not done due to lack of convergence
+## ## coef(e.lvm)
+
+## test_that("two factors model (correlation) - residuals correction", {
+##     eSSC1.lvm <- estimate2(e.lvm, ssc = "residuals")
+
+##     GS <- c("eta1" = 0.1478569, "Y2" = 0.19515562, "Y3" = 0.37384111, "eta2" = 0.39767751, "Z2" = -0.19934296, "Z3" = -0.82545231, "eta1~eta2" = 0.36540063, "Y2~eta1" = 0.85064787, "Y3~eta1" = 0.88015853, "Y3~X1" = 1.29882077, "Z2~eta2" = 0.92947602, "Z3~eta2" = 1.95754764, "Z3~X3" = 1.22777389, "Y1~~Y1" = 0.82498637, "eta1~~eta1" = 2.19604428, "Y2~~Y2" = 1.67543559, "Y3~~Y3" = 0.90484948, "Z1~~Z1" = 1.20086385, "eta2~~eta2" = 0.7944319, "Z2~~Z2" = 1.41920933, "Z3~~Z3" = 0.21553652)
+##     expect_equal(coef(eSSC1.lvm),GS, tol = 1e-6)
+## })
+
+## test_that("two factors model (correlation) - Cox correction", {
+##     eSSC2.lvm <- sCorrect(e.lvm, ssc = "Cox")
+
+##     GS <- c("eta1" = 0.24295941, "Y2" = 0.18388462, "Y3" = 0.32464952, "eta2" = 0.37537296, "Z2" = -0.1851004, "Z3" = -0.77200208, "eta1~X1" = 1.08475547, "Y2~eta1" = 0.88925211, "Y3~eta1" = 1.12263979, "Y3~X1" = 0.82749017, "eta2~X2" = -0.10473803, "Z2~eta2" = 0.89399459, "Z3~eta2" = 1.82282964, "Z3~X3" = 1.21228718, "Y1~~Y1" = 0.98937181, "eta1~~eta1" = 0.9128264, "Y2~~Y2" = 1.59949563, "Y3~~Y3" = 0.75743488, "Z1~~Z1" = 1.29260368, "eta2~~eta2" = 0.72179897, "Z2~~Z2" = 1.46486865, "Z3~~Z3" = 0.22243345, "eta1~~eta2" = 0.14547098)
+
+##     expect_equal(coef(eSSC2.lvm), GS, tol = 1e-6)
+##     ## coef(e.lvm) - coef(eSSC2.lvm)
+## })
+
+##----------------------------------------------------------------------
+### test1-sCorrect-ssc.R ends here
diff --git a/tests/testthat/test1-compare2.R b/tests/testthat/test1d-sCorrect-compare2.R
similarity index 59%
rename from tests/testthat/test1-compare2.R
rename to tests/testthat/test1d-sCorrect-compare2.R
index 2ef6a09..100c038 100644
--- a/tests/testthat/test1-compare2.R
+++ b/tests/testthat/test1d-sCorrect-compare2.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: okt 20 2017 (10:22) 
 ## Version: 
-## last-updated: jul 31 2020 (10:46) 
+## last-updated: Jan 17 2022 (23:26) 
 ##           By: Brice Ozenne
-##     Update #: 236
+##     Update #: 235
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -30,23 +30,20 @@
 ### Code:
 
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(FALSE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
 }
 
 
-## library(clubSandwich)
+library(clubSandwich)
 library(nlme)
 library(lme4)
 library(lmerTest)
 library(pbkrtest)
 lava.options(symbols = c("~","~~"))
 
-.coef2 <- lavaSearch2:::.coef2
-.coef2.gls <- lavaSearch2:::.coef2.gls
-.coef2.lme <- lavaSearch2:::.coef2.lme
 context("compare2")
 
 ## * simulation
@@ -63,7 +60,7 @@ dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
                      measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
 dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
 
-## * linear regression [lm,gls,lvm]
+## * linear regression
 
 e.lvm <- estimate(lvm(Y1~X1+X2), data = d)
 e.lm <- lm(Y1~X1+X2, data = d)
@@ -71,60 +68,44 @@ e.gls <- nlme::gls(Y1~X1+X2, data = d, method = "ML")
 
 ## vcov(e.lvm)
 
-## ### ** clubSandwich
-## cS.vcov <- clubSandwich::vcovCR(e.lm, type = "CR0", cluster = d$Id)
-## cS.df <- clubSandwich::coef_test(e.lm, vcov = cS.vcov, test = "Satterthwaite", cluster = 1:NROW(d))
-## cS.df
-## ## cS.df$df is very suspect: should be the same for all coefficient and close to n-p
+### ** clubSandwich
+cS.vcov <- vcovCR(e.lm, type = "CR0", cluster = d$Id)
+cS.df <- coef_test(e.lm, vcov = cS.vcov, test = "Satterthwaite", cluster = 1:NROW(d))
+cS.df
+## cS.df$df is very suspect: should be the same for all coefficient and close to n-p
 
 ### ** compare2
 test_that("linear regression: df",{
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    
-    name.param <- names(.coef2(e.gls))
-    df.lm <- compare2(e.lm, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    df.gls <- compare2(e.gls, par = name.param, cluster = 1:n, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    
-    ## test equivalence
-    expect_equivalent(df.lvm,df.gls)
-    expect_equivalent(df.lvm,df.lm)
-
+    df.lvm <- compare2(e.lvm, linfct = name.param, ssc = "none", as.lava = FALSE)
+        
     ## test value
     n.param <- length(coef(e.lm))
     df.GS <- c(rep(n,n.param), n/4)
-    expect_equal(df.lm$df, df.GS)
+    expect_equal(unname(df.lvm$df), df.GS)
 
     sigma2 <- coef(e.lvm)["Y1~~Y1"]
     iXX <- solve(crossprod(model.matrix(e.lm)))
     std.GS <- c(sqrt(diag(iXX*sigma2)),sqrt(2*sigma2^2/e.lvm$data$n))
-    expect_equal(df.lm$std, unname(std.GS))
+    expect_equal(unname(sqrt(diag(df.lvm$vcov))), unname(std.GS))
 })
 
 test_that("linear regression: df adjusted",{
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    
-    name.param <- names(.coef2(e.gls))
-    df.lm <- compare2(e.lm, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    df.gls <- compare2(e.gls, par = name.param, cluster = 1:n, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-
-    ## test equivalence
-    expect_equivalent(df.lvm,df.gls)
-    expect_equivalent(df.lvm,df.lm)
+    df.lvm <- compare2(e.lvm, linfct = name.param, as.lava = FALSE)
 
     ## test value
-    n.param <- length(coef(e.lm))
+    n.param <- length(coef(e.lvm))-1
     df.GS <- c(rep(n-n.param,n.param), (n-n.param)/4)
-    expect_equal(df.lm$df, df.GS)
+    expect_equal(unname(df.lvm$df), df.GS)
 
     sigma2 <- sigma(e.lm)^2
     iXX <- solve(crossprod(model.matrix(e.lm)))
     std.GS <- c(sqrt(diag(iXX*sigma2)),sqrt(2*sigma2^2/(n-n.param)))
-    expect_equal(df.lm$std, unname(std.GS), tolerance = 1e-7)
+    expect_equal(unname(sqrt(diag(df.lvm$vcov))), unname(std.GS))
 })
 
-## * multiple linear regression [lvm,gls]
+## * multiple linear regression lvm
 ## ** model fit
 ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
 e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
@@ -143,44 +124,9 @@ e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
 ## })
 
 ## ** compare2
-test_that("multiple linear regression: df",{
-    name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    
-    name.param <- names(.coef2(e.gls))
-    df.gls <- compare2(e.gls, par = name.param, cluster = "Id", bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-
-    ## 
-    sigma2 <- list(coef(e.lvm)["Y1~~Y1"],
-                   coef(e.lvm)["Y2~~Y2"],
-                   coef(e.lvm)["Y3~~Y3"])
-    X <- list(as.matrix(cbind(1,d[,c("X1")])),
-              as.matrix(cbind(1,d[,c("X2")])),
-              as.matrix(cbind(1,d[,c("X1","X3")])))
-    std.GS <- mapply(X, sigma2, FUN = function(x,y){
-        c(sqrt(diag(solve(crossprod(x))*y)),sqrt(2*y^2/n))
-    })
-
-    name.coef.lvm <- names(coef(e.lvm))
-    expect_equal(df.lvm$std[grep("Y1",name.coef.lvm)], unname(std.GS[[1]]))
-    expect_equal(df.lvm$std[grep("Y2",name.coef.lvm)], unname(std.GS[[2]]))
-    expect_equal(df.lvm$std[grep("Y3",name.coef.lvm)], unname(std.GS[[3]]))
-
-    ## test value
-    df.GS <- lapply(X, function(x){
-        c(rep(n,NCOL(x)), n/4)
-    })
-    expect_equal(df.lvm$df[grep("Y1",name.coef.lvm)], unname(df.GS[[1]]), tol = 1e-7)
-    expect_equal(df.lvm$df[grep("Y2",name.coef.lvm)], unname(df.GS[[2]]), tol = 1e-7)
-    expect_equal(df.lvm$df[grep("Y3",name.coef.lvm)], unname(df.GS[[3]]), tol = 1e-7)
-})
-
 test_that("multiple linear regression: df adjusted",{
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    
-    name.param <- names(.coef2(e.gls))
-    df.gls <- compare2(e.gls, par = name.param, cluster = "Id", bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
+    df.lvm <- compare2(estimate2(e.lvm), linfct = name.param, as.lava = FALSE, sep = c("",""))
 
     ## 
     X <- list(as.matrix(cbind(1,d[,c("X1")])),
@@ -194,18 +140,18 @@ test_that("multiple linear regression: df adjusted",{
     })
 
     name.coef.lvm <- names(coef(e.lvm))
-    expect_equal(df.lvm$std[grep("Y1",name.coef.lvm)], unname(std.GS[[1]]), tol = 1e-7)
-    expect_equal(df.lvm$std[grep("Y2",name.coef.lvm)], unname(std.GS[[2]]), tol = 1e-7)
-    expect_equal(df.lvm$std[grep("Y3",name.coef.lvm)], unname(std.GS[[3]]), tol = 1e-7)
+    expect_equal(unname(sqrt(diag(df.lvm$vcov))[c("Y1","Y1~X1","Y1~~Y1")]), unname(std.GS[[1]]), tol = 1e-7)
+    expect_equal(unname(sqrt(diag(df.lvm$vcov))[c("Y2","Y2~X2","Y2~~Y2")]), unname(std.GS[[2]]), tol = 1e-7)
+    expect_equal(unname(sqrt(diag(df.lvm$vcov))[c("Y3","Y3~X1","Y3~X3","Y3~~Y3")]), unname(std.GS[[3]]), tol = 1e-7)
 
     ## test value
     df.GS <- lapply(X, function(x){
         c(rep(n - NCOL(x),NCOL(x)),
           (n - NCOL(x))/4)
     })
-    expect_equal(df.lvm$df[grep("Y1",name.coef.lvm)], unname(df.GS[[1]]), tol = 1e-7)
-    expect_equal(df.lvm$df[grep("Y2",name.coef.lvm)], unname(df.GS[[2]]), tol = 1e-7)
-    expect_equal(df.lvm$df[grep("Y3",name.coef.lvm)], unname(df.GS[[3]]), tol = 1e-7)
+    expect_equal(unname(df.lvm$df[c("Y1","Y1~X1","Y1~~Y1")]), unname(df.GS[[1]]), tol = 1e-7)
+    expect_equal(unname(df.lvm$df[c("Y2","Y2~X2","Y2~~Y2")]), unname(df.GS[[2]]), tol = 1e-7)
+    expect_equal(unname(df.lvm$df[c("Y3","Y3~X1","Y3~X3","Y3~~Y3")]), unname(df.GS[[3]]), tol = 1e-7)
 
 })
 
@@ -229,7 +175,7 @@ e.gls <- nlme::gls(value ~ variable + X1 + Gender,
 
 ## ** clubSandwich - bug
 expect_equal(logLik(e.lmer),logLik(e.lme))
-## clubSandwich::coef_test(e.lme, vcov = "CR0", test = "Satterthwaite", cluster = dLred$Id)
+coef_test(e.lme, vcov = "CR0", test = "Satterthwaite", cluster = dLred$Id)
 ## strange that same type of coef have very different degrees of freedom
 
 ## ** compare 
@@ -247,28 +193,19 @@ test_that("mixed model: Satterthwaite ",{
     }))
 
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
+    df.lvm <- compare2(e.lvm, linfct = name.param, ssc = "none", as.lava = FALSE)
     expect_equal(as.double(GS$df),
-                 as.double(df.lvm[1:5,"df"]), tol = 1e-4) ## needed for CRAN
+                 as.double(df.lvm$df[1:5]), tol = 1e-4) ## needed for CRAN
     expect_equal(as.double(GS$statistic),
-                 as.double(abs(df.lvm[1:5,"statistic"])), tol = 1e-8) ## needed for CRAN
-
-    name.param <- names(.coef2(e.lme))
-    df.lme <- compare2(e.lme, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    expect_equal(df.lme$statistic, df.lvm$statistic, tol = 1e-5)
-    expect_equal(df.lme$df, df.lvm$df, tol = 1e-5)
-
-    name.param <- names(.coef2(e.gls))
-    df.gls <- compare2(e.gls, par = name.param, bias.correct = FALSE, as.lava = FALSE)[1:length(name.param),]
-    expect_equal(df.gls$statistic[1:5], df.lvm$statistic[1:5], tol = 1e-5)
-    expect_equal(df.gls$df[1:5], df.lvm$df[1:5], tol = 1e-5)
+                 as.double(abs(summary(df.lvm, test = multcomp::adjusted("none"))$table2[1:5,"statistic"])), tol = 1e-8) ## needed for CRAN
 
     ## F test
     GS <- lmerTest::contestMD(e.lmer, L = diag(1,5,5), rhs = 0, ddf = "Satterthwaite")
     name.param <- names(coef(e.lvm))    
-    df.F <- compare2(e.lvm, par = name.param[1:5], bias.correct = FALSE, as.lava = FALSE)["global",]
-    expect_equal(GS[["DenDF"]], df.F$df, tol = 1e-5)
-    expect_equal(GS[["F value"]], df.F$statistic, tol = 1e-8)
+    df.F <- compare2(e.lvm, linfct = name.param[1:5], ssc = "none", as.lava = FALSE, F.test = TRUE)
+    
+    expect_equal(GS[["DenDF"]], unname(df.F$global["df"]), tol = 1e-5)
+    expect_equal(GS[["F value"]], unname(df.F$global["statistic"]), tol = 1e-8)
 })
 
 test_that("mixed model: KR-like correction",{
@@ -285,16 +222,16 @@ test_that("mixed model: KR-like correction",{
     ## get_Lb_ddf(e.lmer, c(0,1,0,0,0))
     ## get_Lb_ddf(e.lmer, c(0,0,0,1,0))
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    name.param <- names(.coef2(e.lme))
-    df.lme <- compare2(e.lme, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    expect_equal(df.lme$statistic, df.lvm$statistic, tol = 1e-5)
-    expect_equal(df.lme$df, df.lvm$df, tol = 1e-5)
-
-    name.param <- names(.coef2(e.gls))
-    df.gls <- compare2(e.gls, par = name.param, bias.correct = TRUE, as.lava = FALSE)[1:length(name.param),]
-    expect_equal(df.gls$statistic[1:5], df.lvm$statistic[1:5], tol = 1e-5)
-    expect_equal(df.gls$df[1:5], df.lvm$df[1:5], tol = 1e-5)
+    df.lvm <- compare2(e.lvm, linfct = name.param, as.lava = FALSE)
+
+    previous.value <- data.frame("estimate" = c(-0.25588154, 0.15137028, 0.39879913, 1.48076547, 0.92411608, 1.45423356, 0.6594628), 
+                                 "se" = c(0.26176621, 0.24118321, 0.24118321, 0.15126168, 0.30866695, 0.20917549, 0.24297288), 
+                                 "df" = c(87.2184581, 96.66666667, 96.66666667, 48.33333333, 48.33333333, 24.16666667, 14.29181404), 
+                                 "lower" = c(-0.77615186, -0.32733248, -0.07990363, 1.17668766, 0.30361014, 1.02267408, 0.13933428), 
+                                 "upper" = c(0.26438879, 0.63007304, 0.87750189, 1.78484328, 1.54462201, 1.88579304, 1.17959131), 
+                                 "statistic" = c(-0.97751933, 0.62761531, 1.65351113, 9.78942913, 2.99389387, 6.95221787, 2.71414157), 
+                                 "p.value" = c(0.3310157, 0.53173576, 0.10147095, 0, 0.00433193, 3.3e-07, 0.01654271))
+    expect_equivalent(previous.value, summary(df.lvm, test = multcomp::adjusted("none"))$table2, tol = 1e-5)
 })
 
 ### ** compare to SAS
@@ -356,11 +293,8 @@ test_that("lme/gls equivalent to lvm", {
 
 ## ** compare
 test_that("UN mixed model: df",{
-    ## singular information matrix
-    ## df.adj.lme <- compare2(e.lme,
-    ##                          robust = FALSE, bias.correct = FALSE)
     name.param <- names(coef(e.lvm))
-    df.lvm <- compare2(e.lvm, par = name.param, bias.correct = FALSE, as.lava = FALSE)
+    df.lvm <- compare2(e.lvm, linfct = name.param, ssc = "none", as.lava = FALSE)
 
     ##                          estimate       std  statistic       df      p-value
     ## [eta] = 0              -0.2530247 0.2459609 -1.0287194 61.99195 3.076087e-01
@@ -375,12 +309,15 @@ test_that("UN mixed model: df",{
     ## [Y1~~Y2] = 0            0.2231421 0.3296648  0.6768757 24.12389 5.049242e-01
     ## [Y1~~Y3] = 0            0.2638691 0.3376548  0.7814760 23.84905 4.422119e-01
     ## global                         NA        NA 17.0357449 34.39628 7.882273e-11
-    ## overparametrized model
-    ## name.param <- names(.coef2(e.lme))
-    ## df.lme <- compare2(e.lme, par = name.param, bias.correct = FALSE, as.lava = FALSE)
 
-    name.param <- names(.coef2(e.gls))
-    df.gls <- compare2(e.gls, par = name.param, bias.correct = FALSE, as.lava = FALSE)
+    previous.value <- data.frame("estimate" = c(-0.25302471, 0.15137028, 0.39879913, 1.4498392, 0.92137382, 1.35338532, 0.43914858, 1.62009916, 1.78897339, 0.22314209, 0.26386911), 
+                                 "se" = c(0.24596086, 0.22481994, 0.22867534, 0.14657427, 0.29910174, 0.43232057, 0.30922829, 0.43920032, 0.46467742, 0.32966479, 0.33765478), 
+                                 "df" = c(61.99194948, 50, 50, 50, 50, 29.40589311, 21.64808397, 13.94844762, 13.62954237, 24.12389256, 23.8490532), 
+                                 "lower" = c(-0.74469474, -0.30019386, -0.0605088, 1.15543612, 0.3206103, 0.46972029, -0.20275661, 0.67778142, 0.78979205, -0.45706773, -0.43324959), 
+                                 "upper" = c(0.23864531, 0.60293442, 0.85810706, 1.74424227, 1.52213734, 2.23705035, 1.08105378, 2.5624169, 2.78815472, 0.90335192, 0.9609878), 
+                                 "statistic" = c(-1.02871942, 0.6732956, 1.74395338, 9.89149902, 3.08046963, 3.13051333, 1.42014364, 3.68874764, 3.84992537, 0.67687572, 0.78147601), 
+                                 "p.value" = c(0.30760871, 0.50385966, 0.08731285, 0, 0.00335522, 0.00392319, 0.16980825, 0.00244472, 0.00185058, 0.50492415, 0.44221192))
+    expect_equivalent(previous.value, summary(df.lvm, test = multcomp::adjusted("none"))$table2, tol = 1e-5)
 })
 
 
diff --git a/tests/testthat/test1-sCorrect-adjustedResiduals.R b/tests/testthat/test1d-sCorrect-residuals2.R
similarity index 74%
rename from tests/testthat/test1-sCorrect-adjustedResiduals.R
rename to tests/testthat/test1d-sCorrect-residuals2.R
index 4adbccc..dfb67f6 100644
--- a/tests/testthat/test1-sCorrect-adjustedResiduals.R
+++ b/tests/testthat/test1d-sCorrect-residuals2.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: mar  7 2018 (12:21) 
 ## Version: 
-## Last-Updated: apr  4 2018 (14:20) 
+## Last-Updated: jan 17 2022 (14:11) 
 ##           By: Brice Ozenne
-##     Update #: 28
+##     Update #: 32
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -16,7 +16,7 @@
 ### Code:
 
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(FALSE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
@@ -39,19 +39,14 @@ dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
                      measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
 dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
 
-## * linear regression [lm,gls,lvm]
+## * linear regression [lvm]
 ## ** model fit and sCorrect
 e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
 e.lm <- lm(Y1~X1+X2+Gender, data = d)
 e.gls <- gls(Y1~X1+X2+Gender, data = d, method = "ML")
 
-e2.lvm <- e.lvm
-e2.gls <- e.gls
-e2.lm <- e.lm
+e2.lvm <- estimate2(e.lvm)
 
-sCorrect(e2.lvm) <- TRUE
-sCorrect(e2.gls, cluster = 1:n) <- TRUE
-sCorrect(e2.lm) <- TRUE
 
 ## ** test adjusted residuals
 test_that("residuals2 match residuals.lm (lm adjusted)", {
@@ -74,12 +69,8 @@ test_that("residuals2 match residuals.lm (lm adjusted)", {
 
     ## match individual components
     factor <- sqrt(NROW(epsilon.lm)/(NROW(epsilon.lm) - length(coef(e.lm))))
-    expect_equal(as.double(e2.lm$sCorrect$residuals),
+    expect_equal(as.double(e2.lvm$sCorrect$residuals),
                  as.double(epsilon.lm)*factor)
-    expect_equal(e2.lvm$sCorrect$residuals,
-                 e2.lm$sCorrect$residuals)
-    expect_equal(unname(e2.gls$sCorrect$residuals),
-                 unname(e2.lm$sCorrect$residuals)) 
 })
 
 ## * multivariate linear models
@@ -87,7 +78,7 @@ test_that("residuals2 match residuals.lm (lm adjusted)", {
 ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
 e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
 
-sCorrect(e.lvm) <- TRUE
+e2.lvm <- estimate2(e.lvm)
 
 test_that("residuals2 match residuals.lm", {
 
@@ -101,12 +92,12 @@ test_that("residuals2 match residuals.lm", {
     ## })
 
     ## match expectation
-    expect_equal(as.double(colMeans(e.lvm$sCorrect$residuals)),
+    expect_equal(as.double(colMeans(e2.lvm$sCorrect$residuals)),
                  c(0,0,0),
                  tol = 1e-8)
     
     ## match variance
-    expect_equal(unname(colMeans(e.lvm$sCorrect$residuals^2)),
+    expect_equal(unname(colMeans(e2.lvm$sCorrect$residuals^2)),
                  sapply(ls.lm,sigma)^2,
                  tol = 1e-8)
 
@@ -118,11 +109,11 @@ test_that("residuals2 match residuals.lm", {
     })
 
     
-    expect_equal(unname(e.lvm$sCorrect$residuals),
+    expect_equal(unname(e2.lvm$sCorrect$residuals),
                  unname(do.call(cbind,ls.GS)))
 })
 
-## * mixed model: CS [lvm,gls,lme]
+## * mixed model: CS [lvm]
 m <- lvm(c(Y1[0:sigma]~1*eta,
            Y2[0:sigma]~1*eta,
            Y3[0:sigma]~1*eta,
@@ -138,26 +129,17 @@ e.gls <- nlme::gls(value ~ X1 + X2,
                    correlation = corCompSymm(form = ~1| Id),
                    data = dLred, method = "ML")
 
-sCorrect(e.lvm) <-  TRUE
-sCorrect(e.lme) <-  TRUE
-sCorrect(e.gls) <-  TRUE
+e2.lvm <- estimate2(e.lvm)
 
 test_that("residuals2 in mixed models", {
 
     ## mean
-    expect_equal(colSums(e.lvm$sCorrect$residuals),
+    expect_equal(colSums(e2.lvm$sCorrect$residuals),
                  colSums(residuals(e.lvm)))
 
     ## variance
-    ## e.lvm$sCorrect$Omega
-    crossprod(e.lvm$sCorrect$residuals)/NROW(e.lvm$sCorrect$residuals)
-    ## getVarCov2(e.lvm)
-    crossprod(residuals(e.lvm))/NROW(e.lvm$sCorrect$residuals)
-        
-    expect_equal(unname(e.lvm$sCorrect$residuals),
-                 unname(e.lme$sCorrect$residuals))
-    expect_equal(unname(e.lvm$sCorrect$residuals),
-                 unname(e.gls$sCorrect$residuals))
+    getVarCov2(e.lvm)/var(e2.lvm$sCorrect$residuals)
+    ## not 1....
     
 })
 
diff --git a/tests/testthat/test1d-sCorrect-summary2.R b/tests/testthat/test1d-sCorrect-summary2.R
new file mode 100644
index 0000000..4df75b0
--- /dev/null
+++ b/tests/testthat/test1d-sCorrect-summary2.R
@@ -0,0 +1,268 @@
+### test1-sCorrect-summary2.R --- 
+##----------------------------------------------------------------------
+## Author: Brice Ozenne
+## Created: apr  4 2018 (13:29) 
+## Version: 
+## Last-Updated: jan 17 2022 (16:51) 
+##           By: Brice Ozenne
+##     Update #: 67
+##----------------------------------------------------------------------
+## 
+### Commentary: 
+## 
+### Change Log:
+##----------------------------------------------------------------------
+## 
+### Code:
+
+## * header
+## rm(list = ls())
+if(FALSE){ ## already called in test-all.R
+    library(testthat)
+    library(lavaSearch2)
+
+    printDF <- function(object, bias.correct){
+        colDF <- summary2(object, bias.correct = bias.correct)$coef[,"df",drop=FALSE]
+        n.coef <- NROW(colDF)
+        vec.end <- c(rep(",",n.coef-1),")")
+        vec.start <- c("c(", rep("",n.coef-1))        
+        df <- data.frame(paste0(vec.start,"\"",rownames(colDF),"\""),
+                         "=",
+                         paste0(colDF[,1],vec.end))
+        names(df) <- NULL
+        print(df, row.names = FALSE)
+    }
+
+}
+
+lava.options(symbols = c("~","~~"))
+library(nlme)
+context("sCorrect (dVcov-SatterthwaiteCorrection)")
+
+## * simulation
+n <- 5e1
+mSim <- lvm(c(Y1~eta1,Y2~eta1+X2,Y3~eta1+X1,
+              Z1~eta2,Z2~eta2,Z3~eta2+X3))
+regression(mSim) <- eta1~X1+Gender
+latent(mSim) <- ~eta1+eta2
+categorical(mSim, labels = c("Male","Female")) <- ~Gender
+transform(mSim, Id~Y1) <- function(x){1:NROW(x)}
+set.seed(10)
+d <- lava::sim(mSim, n = n, latent = FALSE)
+dL <- reshape2::melt(d, id.vars = c("Id","X1","X2","X3","Gender"),
+                     measure.vars = c("Y1","Y2","Y3","Z1","Z2","Z3"))
+dLred <- dL[dL$variable %in% c("Y1","Y2","Y3"),]
+
+## * linear regression 
+## ** model fit
+e.lvm <- estimate(lvm(Y1~X1+X2+Gender), data = d)
+
+## ** test df
+test_that("linear regression: Satterthwaite (df)", {
+    df <- c("Y1" = 50,
+            "Y1~X1" = 50,
+            "Y1~X2" = 50,
+            "Y1~GenderFemale" = 50,            
+            "Y1~~Y1" = 12.5)
+    expect_equal(as.double(df),
+                 summary2(e.lvm, ssc = "none")$coef$df)
+})
+
+test_that("linear regression: Satterthwaite + SSC (df)", {
+    df <- c("Y1" =   46,
+            "Y1~X1" =   46,
+            "Y1~X2" =   46,
+            "Y1~GenderFemale" =   46,
+            "Y1~~Y1" = 11.5)
+    expect_equal(as.double(df),
+                 summary2(e.lvm, ssc = "residuals")$coef$df)
+})
+
+## ** robust standard error
+test_that("linear regression: robust SE", {
+    ## printDF(e.lvm, bias.correct = TRUE)
+    lava.options(df.robust = 1)
+    eS0 <- summary2(e.lvm, robust = TRUE, df = "satterthwaite")$coef
+    
+    df <- c("Y1" =   46,
+            "Y1~X1" =   46,
+            "Y1~X2" =   46,
+            "Y1~GenderFemale" =   46,
+            "Y1~~Y1" = 11.5)
+    expect_equal(as.double(df),
+                 eS0$df, tol = 1e-2)
+    
+    eS1 <- summary2(e.lvm, robust = TRUE, df = "satterthwaite")$coef
+    eS2 <- summary2(e.lvm, robust = TRUE, df = "satterthwaite", cluster = 1:n)$coef
+    expect_equal(eS1,eS2)
+   
+})
+
+## * linear regression with constrains 
+## ** model fit
+e.lvm <- estimate(lvm(Y1[0:2]~X1+1*X2), data = d)
+
+e.lvm2 <- estimate(lvm(Y1~beta*X1+beta*X2), d)
+
+
+## ** test df
+test_that("linear regression with constrains: Satterthwaite (df)", {
+    expect_equal(summary2(e.lvm)$coef$df,c(Inf)) ## Inf since the variance coefficient is known
+    ## printDF(e.lvm2, bias.correct = FALSE)
+    df <- c("Y1~X1" =   50,
+            "Y1" =   50,
+            "Y1~~Y1" = 12.5)
+    expect_equal(summary2(e.lvm2, ssc = "none")$coef$df,
+                 as.double(df))
+})
+
+test_that("linear regression with constrains: Satterthwaite + SSC (df)", {
+    expect_equal(summary2(e.lvm)$coef$df,c(Inf)) ## Inf since the variance coefficient is known
+    ## printDF(e.lvm2, bias.correct = TRUE)
+    df <- c("Y1~X1" =   48,
+            "Y1" =   48,
+            "Y1~~Y1" = 12)
+    expect_equal(summary2(e.lvm2)$coef$df,
+                 as.double(df))
+})
+
+## * multiple linear regression 
+## ** model fit
+ls.lm <- list(lm(Y1~X1,d),lm(Y2~X2,d),lm(Y3~X1+X3,d))
+e.lvm <- estimate(lvm(Y1~X1,Y2~X2,Y3~X1+X3), data = d)
+
+## ** test df
+test_that("multiple linear regression: Satterthwaite (df)", {
+    ## printDF(e.lvm, bias.correct = FALSE)
+    df <- c("Y1~X1" =   50,
+            "Y2~X2" =   50,
+            "Y3~X1" =   50,
+            "Y3~X3" =   50,
+            "Y1~~Y1" = 12.5,
+            "Y2~~Y2" = 12.5,
+            "Y3~~Y3" = 12.5,
+            "Y1" =   50,
+            "Y2" =   50,
+            "Y3" =   50)
+    expect_equal(summary2(e.lvm, ssc = "none")$coef[names(df),"df"],
+                 as.double(df)) ## 
+    
+})
+
+test_that("multiple linear regression: Satterthwaite + SSC (df)", {
+    ## printDF(e.lvm, bias.correct = TRUE)
+    df <- c("Y1~X1" =    48,
+            "Y2~X2" =    48,
+            "Y3~X1" =    47,
+            "Y3~X3" =    47,
+            "Y1~~Y1" =    12,
+            "Y2~~Y2" =    12,
+            "Y3~~Y3" = 11.75,
+            "Y1" =    48,
+            "Y2" =    48,
+            "Y3" =    47)
+    expect_equal(summary2(e.lvm)$coef[names(df),"df"],
+                 as.double(df)) ## 
+    
+})
+
+## * multiple linear regression with constrains 
+## ** model fit
+e.lvm <- estimate(lvm(Y1~X1+1*X2,Y2~2*X3+2*X1,Y3~X2), data = d)
+
+## ** test df
+test_that("multiple linear regression with constrains: Satterthwaite (df)", {
+    ## printDF(e.lvm, bias.correct = FALSE)
+    df <- c("Y1~X1" =   50,
+            "Y1~X2" =   NA,
+            "Y2~X1" =   NA,
+            "Y2~X3" =   NA,
+            "Y3~X2" =   50,
+            "Y1~~Y1" = 12.5,
+            "Y2~~Y2" = 12.5,
+            "Y3~~Y3" = 12.5,
+            "Y1" =   50,
+            "Y2" =   50,
+            "Y3" =   50)
+    expect_equal(summary2(e.lvm, ssc = "none")$coef[names(df),"df"],
+                 as.double(df)) ## 
+    
+})
+
+test_that("multiple linear regression with constrains: Satterthwaite + SSC (df)", {
+    ## printDF(e.lvm, bias.correct = TRUE)
+    df <- c("Y1~X1" =    48,
+            "Y1~X2" =    NA,
+            "Y2~X1" =    NA,
+            "Y2~X3" =    NA,
+            "Y3~X2" =    48,
+            "Y1~~Y1" =    12,
+            "Y2~~Y2" = 12.25,
+            "Y3~~Y3" =    12,
+            "Y1" =    48,
+            "Y2" =    49,
+            "Y3" =    48)
+    expect_equal(summary2(e.lvm)$coef[names(df),"df"],
+                 as.double(df)) ## 
+    
+})
+
+## * multiple linear regression with covariance links 
+## ** model fit
+e.lvm <- estimate(lvm(Y1~X1+X2,Y2~X3+X1,Y3~X2,Y1~~Y2),d)
+
+## ** test df
+test_that("multiple linear regression with covariance: Satterthwaite (df)", {
+    ## printDF(e.lvm, bias.correct = FALSE)
+    df <- c("Y1~X1" = 50.0023249929247,
+            "Y1~X2" = 50.0557533452502,
+            "Y2~X1" = 50.1412333709522,
+            "Y2~X3" = 50.0557533452502,
+            "Y3~X2" =               50,
+            "Y1~~Y1" =             12.5,
+            "Y1~~Y2" = 14.4382586892588,
+            "Y2~~Y2" =             12.5,
+            "Y3~~Y3" =             12.5,
+            "Y1" = 51.0449669789772,
+            "Y2" = 50.0000667169911,
+            "Y3" =               50)
+    expect_equal(summary2(e.lvm, ssc = "none")$coef[names(df),"df"],
+                 as.double(df), tol = 1e-6) ## 
+    
+})
+
+test_that("multiple linear regression with covariance: Satterthwaite +SSC (df)", {
+    ## printDF(e.lvm, bias.correct = TRUE)
+    df <- c("Y1~X1" = 47.0021511585814,
+            "Y1~X2" = 47.0515840309539,
+            "Y2~X1" = 47.1306527469107,
+            "Y2~X3" = 47.0515840309539,
+            "Y3~X2" =               48,
+            "Y1~~Y1" =            11.75,
+            "Y1~~Y2" = 13.6588307493286,
+            "Y2~~Y2" =            11.75,
+            "Y3~~Y3" =               12,
+            "Y1" = 47.9656506208306,
+            "Y2" = 47.0000617288762,
+            "Y3" =               48)
+    expect_equal(summary2(e.lvm)$coef[names(df),"df"],
+                 as.double(df), tol = 1e-6) ## 
+    
+})
+
+
+
+## * two factors model (regression)
+m <- lvm(c(Y1~eta1,Y2~eta1,Y3~eta1+X1,
+           Z1~eta2,Z2~eta2,Z3~eta2+X3,
+           eta1~eta2))
+
+e.lvm <- lava::estimate(m, d)
+
+test_that("Two factor model: Satterthwaite +SSC (df)", {
+    GS <- model.tables(estimate2(e.lvm))
+    expect_equal(summary2(e.lvm)$coef[rownames(GS),"df"],
+                 GS$df)
+})
+######################################################################
+### test1-sCorrect-summary2.R ends here
diff --git a/tests/testthat/test2-IntDensTri.R b/tests/testthat/test2-IntDensTri.R
index a228222..ac78118 100644
--- a/tests/testthat/test2-IntDensTri.R
+++ b/tests/testthat/test2-IntDensTri.R
@@ -3,9 +3,9 @@
 ## author: Brice Ozenne
 ## created: aug 31 2017 (16:32) 
 ## Version: 
-## last-updated: mar 13 2018 (13:24) 
+## last-updated: Jan 12 2022 (16:33) 
 ##           By: Brice Ozenne
-##     Update #: 15
+##     Update #: 16
 #----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -16,7 +16,7 @@
 ### Code:
 
 ## * header
-rm(list = ls(all.names = TRUE))
+##rm(list = ls(all.names = TRUE))
 if(TRUE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
diff --git a/tests/testthat/test2-modelsearch2.R b/tests/testthat/test2-modelsearch2.R
index ec309f3..d3a223c 100644
--- a/tests/testthat/test2-modelsearch2.R
+++ b/tests/testthat/test2-modelsearch2.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: jan 22 2018 (11:45) 
 ## Version: 
-## Last-Updated: jun 27 2019 (14:21) 
+## Last-Updated: Jan 12 2022 (14:47) 
 ##           By: Brice Ozenne
-##     Update #: 24
+##     Update #: 25
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -16,7 +16,7 @@
 ### Code:
 
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(FALSE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
diff --git a/tests/testthat/test3-multcomp.R b/tests/testthat/test3-multcomp.R
index c890666..b24040d 100644
--- a/tests/testthat/test3-multcomp.R
+++ b/tests/testthat/test3-multcomp.R
@@ -3,9 +3,9 @@
 ## Author: Brice Ozenne
 ## Created: nov 29 2017 (15:22) 
 ## Version: 
-## Last-Updated: feb 25 2019 (09:37) 
+## Last-Updated: Jan 12 2022 (14:46) 
 ##           By: Brice Ozenne
-##     Update #: 116
+##     Update #: 130
 ##----------------------------------------------------------------------
 ## 
 ### Commentary: 
@@ -29,7 +29,7 @@
 ### Code:
 
 ## * header
-rm(list = ls())
+## rm(list = ls())
 if(FALSE){ ## already called in test-all.R
     library(testthat)
     library(lavaSearch2)
@@ -37,88 +37,107 @@ if(FALSE){ ## already called in test-all.R
 
 library(multcomp)
 library(sandwich)
+library(emmeans)
 lava.options(symbols = c("~","~~"))
 
 context("multcomp - mmm")
 
 ## * simulation
-mSim <- lvm(c(Y1,Y2,Y3,Y4)~ beta * eta, E ~ 1)
+mSim <- lvm(c(Y1,Y2,Y3,Y4)~ beta * eta,
+            E ~ 1, Y1 ~ 0.25*T1 + 0.5*T2 + 0.05*T3)
 latent(mSim) <- "eta"
 set.seed(10)
 n <- 1e2
 
 df.data <- lava::sim(mSim, n, latent = FALSE, p = c(beta = 1))
+df.data$eY1 <- exp(df.data$Y1)
+
+## * linear regressions with logical constrains
+e.lm <- lm(Y1 ~ T1 + T2 + T3, data = df.data)
+e.lvm <- estimate(lvm(Y1 ~ T1 + T2 + T3), data = df.data)
+## summary(e.lm)
+
+test_that("glht vs. glht2 (logical constrains)", {
+    e.glht <- glht(e.lm, linfct = c("T2-T1=0",
+                                    "T2-T3=0",
+                                    "T1-T3=0"))
+    ## summary(e.glht, test = adjusted("none"))
+    ## summary(e.glht, test = adjusted("bonferroni"))
+
+    e.glht2 <- glht2(e.lvm, linfct = c("Y1~T2-Y1~T1=0",
+                                       "Y1~T2-Y1~T3=0",
+                                       "Y1~T1-Y1~T3=0"))
+    
+    expect_equal(unname(e.glht$vcov),unname(e.glht2$vcov[1:4,1:4]), tol = 1e-6)
+    expect_equal(unname(e.glht$coef),unname(e.glht2$coef[1:4]), tol = 1e-6)
+
+    eS.glht <- summary(e.glht, test = adjusted("Shaffer"))
+    eS.glht2 <- summary(e.glht2, test = adjusted("Shaffer"))
+
+    expect_equivalent(eS.glht$test[c("coefficients","sigma","tstat","pvalues")],
+                      eS.glht2$test[c("coefficients","sigma","tstat","pvalues")], tol = 1e-6)
+})
+
+test_that("glht2 (back-transformation)", {
+    e.log.lvm <- estimate(lvm(log(eY1) ~ T1 + T2 + T3), data = df.data)
+
+    e.glht2 <- glht2(e.log.lvm, linfct = c("eY1~T1","eY1~T2","eY1~T3"))
+    df.glht2 <- summary(e.glht2, transform = "exp", test = adjusted("none"))$table2
+
+    e.glht2.bis <- glht2(e.log.lvm, linfct = "eY1~T3")
+    df.glht2.bis <- summary(e.glht2.bis, transform = exp, test = adjusted("none"))$table2
+
+    expect_equal(as.double(df.glht2[3,]) , as.double(df.glht2.bis[1,]))
+})
 
 ## * list of linear regressions
-name.Y <- setdiff(endogenous(mSim),"E")
+name.Y <- setdiff(endogenous(mSim),"E")[1:2]
 n.Y <- length(name.Y)
 
-ls.formula <- lapply(paste0(name.Y,"~","E"),as.formula)
-ls.lm <- lapply(ls.formula, lm, data = df.data)
+ls.lm <- lapply(name.Y, function(iY){
+    eval(parse( text = paste("lm(",iY,"~E, data = df.data)")))
+})
 names(ls.lm) <- name.Y
 class(ls.lm) <- "mmm"
 
+ls.lvm <- lapply(name.Y, function(iY){
+    eval(parse( text = paste("estimate(lvm(",iY,"~E), data = df.data)")))
+})
+names(ls.lvm) <- name.Y
+class(ls.lvm) <- "mmm"
+
 test_that("glht vs. glht2 (list lm): information std", {
     e.glht <- glht(ls.lm, mlf("E = 0"))
-
-    resC <- createContrast(ls.lm, var.test = "E", add.variance = TRUE)
-    name.all <- colnames(resC$contrast)
-    name.mean <- name.all[-grep("sigma",name.all)]
-
-    e.glht2 <- glht2(ls.lm, linfct = resC$contrast,
-                     bias.correct = FALSE, robust = FALSE)
-
-    expect_equal(e.glht$vcov,
-                 n/(n-2)*e.glht2$vcov[name.mean,name.mean])
-    expect_equal(e.glht$coef,e.glht2$coef[names(e.glht$coef)])
-    expect_equivalent(e.glht$linfct,e.glht2$linfct[,name.mean])
+    e.glht2 <- glht2(ls.lvm, linfct = "E")
+    e.glht2C <- glht2(ls.lvm, linfct = createContrast(ls.lvm, linfct = "E")$contrast)
 
     eS.glht <- summary(e.glht)
     eS.glht2 <- summary(e.glht2)
+    eS.glht2C <- summary(e.glht2C)
 
-    expect_equal(eS.glht$test$tstat, 1/sqrt(n/(n-2))*eS.glht2$test$tstat)
+    expect_equivalent(eS.glht2$test, eS.glht2C$test, tol = 1e-6)
+    expect_equal(unname(eS.glht$test$tstat), unname(eS.glht2$test$tstat), tol = 1e-6)
 })
      
 test_that("glht vs. glht2 (list ml): robust std", {
-    e.glht <- glht(ls.lm, mlf("E = 0"), vcov = sandwich)
-
-    resC <- createContrast(ls.lm, var.test = "E", add.variance = TRUE)
-    name.all <- colnames(resC$contrast)
-    name.mean <- name.all[-grep("sigma",name.all)]
-    e.glht2 <- glht2(ls.lm, linfct = resC$contrast,
-                     bias.correct = FALSE, robust = TRUE)
-
-    expect_equivalent(e.glht$vcov,
-                      e.glht2$vcov[name.mean,name.mean])
-    expect_equal(e.glht$coef,e.glht2$coef[name.mean])
-    expect_equivalent(e.glht$linfct,e.glht2$linfct[,name.mean])
-
-    eS.glht <- summary(e.glht)
-    eS.glht2 <- summary(e.glht2)
-
-    expect_equal(eS.glht$test$tstat, eS.glht2$test$tstat)
+    e.glht <- summary(glht(ls.lm, mlf("E = 0"), vcov = sandwich))
+    e.lava <- rbind(estimate(ls.lm[[1]])$coefmat[2,,drop=FALSE],
+                    estimate(ls.lm[[2]])$coefmat[2,,drop=FALSE])
+    ## no correction for the score
+    e.glht0 <- summary(glht2(ls.lvm, linfct = "E", robust = TRUE, ssc = "residuals0"))
+    ## correction for the score by inflating the residuals such that they have correct variance
+    e.glht2 <- summary(glht2(ls.lvm, linfct = "E", robust = TRUE))
+    e.glht2C <- summary(glht2(ls.lvm, linfct = createContrast(ls.lvm, linfct = "E")$contrast, robust = TRUE))
+
+    expect_equivalent(e.glht0$test$tstat, e.glht$test$tstat, tol = 1e-6)
     ## cannot compare p.values
     ## because some are based on a student law and others on a gaussian law
-})
-
-test_that("glht2 vs. lava (ml): robust std", {
-    lsRed.lm <- ls.lm[1:2]
-    class(lsRed.lm) <- "mmm"
-
-    resC <- createContrast(lsRed.lm, var.test = "E", add.variance = TRUE)
-    name.all <- colnames(resC$contrast)
-    name.mean <- name.all[-grep("sigma",name.all)]
-    e.glht2 <- glht2(lsRed.lm, linfct = resC$contrast,
-                     bias.correct = FALSE, robust = TRUE, df = FALSE)
 
-    GS <- estimate(ls.lm[[1]], cluster = 1:n)$coefmat
-    test <- summary(e.glht2, test = adjusted("none"))$test
-    
-    expect_equal(as.double(test$sigma[1]), GS["E","Std.Err"], tol = 1e-8)
-    expect_equal(as.double(test$pvalues[1]), GS["E","P-value"], tol = 1e-8)
-    ##
+    expect_equivalent(e.glht2$test, e.glht2C$test, tol = 1e-6)
+    expect_equivalent(e.glht2$test$tstat, e.glht$test$tstat*sqrt(coef(estimate2(ls.lvm[[1]], ssc = "none"))["Y1~~Y1"])/sigma(ls.lm[[1]]), tol = 1e-6)
 })
 
+
 test_that("glht vs. calcDistMaxIntegral", {
     e.glht <- glht(ls.lm, mlf("E = 0"), vcov = sandwich)
     res.GS <- summary(e.glht)
@@ -150,14 +169,14 @@ e.lvm <- estimate(m.lvm, df.data)
 
 test_that("glht vs. glht2 (lvm): information std", {
 
-    resC <- createContrast(e.lvm, par = c("eta~E","Y2=1","Y3=1"))
+    resC <- createContrast(e.lvm, linfct = c("eta~E","Y2=1","Y3=1"))
     e.glht.null <- glht(e.lvm, linfct = resC$contrast)
     e.glht.H1 <- glht(e.lvm, linfct = resC$contrast, rhs = resC$null)
 
     e.glht2.null <- glht2(e.lvm, linfct = resC$contrast, rhs = rep(0,3),
-                          bias.correct = FALSE)
+                          ssc = "none")
     e.glht2.H1 <- glht2(e.lvm, linfct = resC$contrast, rhs = resC$null,
-                        bias.correct = FALSE)
+                        ssc = "none")
 
 
     eS.glht.null <- summary(e.glht.null)
@@ -180,34 +199,20 @@ mmm.lvm <- mmm(Y1 = estimate(lvm(Y1~E), data = df.data),
                Y4 = estimate(lvm(Y4~E), data = df.data)
                )
 
-test_that("glht vs. glht2 (list lvm): information std", {
+test_that("glht2 (list lvm): information std", {
 
-    ##
-    resC <- createContrast(mmm.lvm, var.test = "E")
+    ##    
+    resC <- createContrast(mmm.lvm, linfct = paste0("Y",1:4,": Y",1:4,"~E"))
     lvm2.glht <- glht2(mmm.lvm, linfct = resC$contrast,
-                       bias.correct = FALSE, robust = FALSE)
+                       ssc = NA, robust = FALSE)
     lvm2.sglht <- summary(lvm2.glht)    
-
+    expect_equal(lvm2.sglht$df,100)
+    
     lvm3.glht <- glht2(mmm.lvm, linfct = resC$contrast,
                        rhs = rnorm(4),
-                       bias.correct = FALSE, robust = FALSE)
+                       ssc = NA, robust = FALSE)
     lvm3.sglht <- summary(lvm3.glht)    
-
-    ##
-    lvm.glht <- glht(mmm.lvm, linfct = resC$contrast)
-    lvm.glht$df <- NROW(df.data)
-    lvm.sglht <- summary(lvm.glht)
-
-    ## compare
-    expect_equal(as.numeric(lvm2.sglht$test$coefficients),
-                 as.numeric(lvm.sglht$test$coefficients))
-
-    expect_equal(as.numeric(lvm2.sglht$test$sigma),
-                 as.numeric(lvm.sglht$test$sigma))
-
-    expect_equal(as.numeric(lvm2.sglht$test$pvalues),
-                 as.numeric(lvm.sglht$test$pvalues),
-                 tol = attr(lvm.sglht$test$pvalues,"error"))
+    expect_equal(lvm3.sglht$df,100)
 })
 
 ##----------------------------------------------------------------------
diff --git a/vignettes/overview.pdf.asis b/vignettes/overview.pdf.asis
new file mode 100644
index 0000000..ff145cc
--- /dev/null
+++ b/vignettes/overview.pdf.asis
@@ -0,0 +1,5 @@
+%\VignetteIndexEntry{lavaSearch2: overview}
+%\VignetteEngine{R.rsp::asis}
+%\VignetteKeyword{PDF}
+%\VignetteKeyword{vignette}
+%\VignetteKeyword{package}
\ No newline at end of file

More details

Full run details

Historical runs