psy/0000755000176200001440000000000014230351742011066 5ustar liggesuserspsy/NAMESPACE0000644000176200001440000000134214230254772012312 0ustar liggesusers# Generated by roxygen2: do not edit by hand export(ckappa) export(cronbach) export(fpca) export(icc) export(lkappa) export(mdspca) export(mtmm) export(scree.plot) export(sphpca) export(wkappa) importFrom(grDevices,dev.new) importFrom(graphics,abline) importFrom(graphics,boxplot) importFrom(graphics,lines) importFrom(graphics,par) importFrom(graphics,points) importFrom(graphics,stripchart) importFrom(graphics,symbols) importFrom(graphics,text) importFrom(stats,anova) importFrom(stats,cor) importFrom(stats,cov) importFrom(stats,lm) importFrom(stats,median) importFrom(stats,model.extract) importFrom(stats,na.omit) importFrom(stats,optim) importFrom(stats,reshape) importFrom(stats,rnorm) importFrom(stats,sd) importFrom(stats,var) psy/data/0000755000176200001440000000000014227300460011774 5ustar liggesuserspsy/data/expsy.rda0000644000176200001440000000075211765363324013654 0ustar liggesusersWN0 6$ =:~OpڵS7 8h<GNl$ xfwBRDZ'ǃ5hQDѦ,[mԴ:|=ն샱׆ONk/M)GÐ#vKZ+/K+?7nDi`܂>i(}h_9a=_6"?FZ:x&@sFT<飺*n/s9yY4 8wp_:"'T$;b͋fevUռk:ހus(u}{q=׋HĿ[`> s\NRqӗ~IUmظnY.Y3vT2z:ߒQs~\Gcnv96l/; ^`{C֭mGi-=8\ *PuEjd_LloYZ\kIpsy/data/sleep.rda0000644000176200001440000000435711765363324013621 0ustar liggesusersXklW{;/&k;ΣmH?M8vPPN3UQ"J *@EE$!T R!""*$`g}=;gm1 ֨mOQg4\)KDV2BD%BFF&PEmzbtl HL[ <@Anz&AA!] L=#{ '8@(c$$!'ciptDtԎdEє,D z"%-Ȝ fhe'kAe*Ŝ\8/] {4Z7̛[*m} EANM2QpJ*eR[b2~٤xegW&EՖ"(Kh-HeGIwȺ7^IOReZbLˢQyȑӘv|ϳ\ܜ$b!L x~$/V@ 7FXܼ6|+NQ =?`GN`MO= (fPp\jKNyI\v=MM }7_S&"v)uD^LU1CN U+yAPYwgD1G%^?}zN/TH7͈ⴖc}K5̊LXgE6YCZ7[A%as^4 +X6K7lf9ˊEGPhYO9B8$Ĝ-.Py+ ,z9i3,TFch)ux$$`iwڣxPϺ"౥Y^d˞Oow0oaݮ"nݼNK/%囊u2C9䫏}ro>;wVln&%o~ߞ>x^OA/)| ٻ_)[áo ҷ9얫>k]Q؞??zj,^땯s>m)W/3\~.xC=︭k|m _ٻUSo)|[*{ֻ3`Gxs?8hw\ 3gүE߆C;uqE?>iwaWG?>o3y6V.(\ |eY7>Tݺch@}91b~_ @n OG<{~Y鑈oЈvXk G ~u‡k8^k; 7z?5cݭKڱ~ MҟoT<F|_uoһ{cv+yr۰N݇8OmSBsb=Sw#c&ta!|v07źx=-[qZr|j}_Ft0wzs {:~s?I3 z[+ǁzXD?M|GٟD|X?|>sqUe!1s܇u?Bu~E9l>GwW> < 쾥y]j-^zsF_˭`?;#X{X~`w=ѯ1>_=+0׋,#'΁q ?t~^03؞0 7֮j"Bk>$_*BY6yO^EͺGHq?SHEOPDֈ2<:y867cQK5 ;Q?Gq{kX|Es%u$մ{sI:gi;zɳ$.qm9#|󖝬pcgraJ,; u򤥤֧ePe+egog|Rm.TfT|}4Ϫ; ̼OyH٧x8?a^w^3ԅНקkz,psy/data/ehd.rda0000644000176200001440000000504611765363324013245 0ustar liggesusersˎ\5}zH$'`6 \$@b;O7e:cQ'}48ÇoN6ߟn9?~8>o>C9f'ymwD?q1f{{&in׸ƁyG_[XƸ.~ǸAq/AO~;y&۵Ӎϕv'{# b羊X˱ҷcQccmrKDP#ږ7}9>n$%i>`1?L?s?XFCxNG򾋜oMswq0vo4XNrF&=nZv=3ݎ)ڃ!u=>@>yzŖ]4~|&EI6=s-=tw+jILs{9bcknNLJ뻶,!rKZ|OM>s˪em[V77[˫c/5$E{VM_>>ˢ;os왺1 8vtg~<$G9X˪M>YğjS0ΡӚs&hO\Obkqshw0򎃸ƾmF>k[=d42{wyzdOc4ojj`Gq`-TgՋO'uux$+ƗW19+J_Zx\69;f5y"_9kKwotwqKױܟtgD]ػ<^E;o"CjijQ8J&)1Ѷ::$T '$jeRY՜|ϝ|cY&q g^Ys*xWϗΘӭ0u5&I1Rrӑkħb̙I'ϥ={ܬLJ6 .cuE u}Tw]um]aَudžB:s>嘇 j^s{lMqqyj]=7Y]C^D<;\r,Gvqȵ_%7߮e5}c)pRM3?0<'NOwֲy.>; vpZ:L~7p7wuHwrMPVRVӽ.ʺw ’~#yo}xl^QbtP<ZӶ1'}q%._Hr̎}1žG]m .9'9Xk۩mkP/oatn:qwֹ)v9M)/6q_Al;9]۰~d['֐}1Fc'/ygswc_&c_FNKNo{!#uO\r b9C~] I~۴Ξǹ8Et|.uys,˴81Wu@%y9qtf$YAZ~Iu/]w=m`.;RHž(96|u-Y};S+RE]ʙ%_3ŴnJfoKw2v_{;O|&ͱ]dٺkb6, sqڊә%v9c;[<ߙ>/gν'fg++Jggh{ܛꌕ}67>뢞.Ldyv:z?62T'6kMe{,< S6c1%G5Mz%ǜwMyV{3gx(>?xЖfqs_7fU_3Mp/e Cm Swj%'Ǚl[Sp|$Iyn_^so8m"lafWiņdj5uyw3Xttg >\gRN CL诎Q#~|>uW/8ްOumxz>sglUbTO4_})mI#xQJyA' i}|<ܶ,dIim1'39>ֳ5z߱/Lg^{u5zrVi.aSD3Ksz6١s;;M~}'c^>ڶ};1ڔblWKwgI{@Vc({kޯx|/:lku@>_^SqL<||zrzzmmmWǟ]/'KKK٥Ҽ3^^G߿ŧ?vۯ~Z":OW?R.Vpsy/man/0000755000176200001440000000000014227311045011637 5ustar liggesuserspsy/man/wkappa.Rd0000644000176200001440000000334114230040723013406 0ustar liggesusers\name{wkappa} \alias{wkappa} \title{weighted Kappa for 2 raters} \description{ Computes a weighted Kappa for agreement in the case of 2 raters. The diagnosis (the object of the rating) may have k possible ordered values. } \usage{ wkappa(r,weights="squared") } \arguments{ \item{r}{n*2 matrix or dataframe, n subjects and 2 raters} \item{weights}{weights="squared" to obtain squared weights. If not, absolute weights are computed (see details)} } \details{ Diagnoses have to be coded by numbers (ordered naturally). For weigths="squared", weights are related to squared differences between rows and columns indices (in this situation wkappa is close to an icc). For weights!="squared", weights are related to absolute values of differences between rows and columns indices. The function is supposed to deal with the case where the two raters have not exactly the same scope of rating. Missing value are omitted. } \value{ A list with : \item{$table}{the 2*k table of raw data (first rater in rows, second rater in columns)} \item{$weights}{"squared" or "absolute"} \item{$kappa}{Weighted Kappa} } \references{Cohen, J. Weighted kappa: nominal scale agreement with provision for scaled disagreement or partial credit. Psychological Bulletin 70 (1968): 213-220. } \author{Bruno Falissard} \examples{ data(expsy) wkappa(expsy[,c(11,13)]) # weighted kappa (squared weights) #to obtain a 95%confidence interval: #library(boot) #wkappa.boot <- function(data,x) {wkappa(data[x,])[[3]]} #res <- boot(expsy[,c(11,13)],wkappa.boot,1000) #quantile(res$t,c(0.025,0.975)) # two-sided bootstrapped confidence interval of weighted kappa #boot.ci(res,type="bca") # adjusted bootstrap percentile (BCa) confidence interval (better) } \keyword{univar} psy/man/cronbach.Rd0000644000176200001440000000271211765364740013725 0ustar liggesusers\name{cronbach} \alias{cronbach} \title{Cronbach's coefficient alpha} \description{ Computes the Cronbach's reliability coefficient alpha. This coefficient may be applied to a series of items destinated to be aggregated in a single score. It estimates reliability in the framework of the domain sampling model. } \usage{ cronbach(v1) } \arguments{ \item{v1}{n*p matrix or dataframe, n subjects and p items} } \details{ Missing value are omitted in a "listwise" way (all items are removed even if only one of them is missing). } \value{ A list with : \item{$sample.size}{Number of subjects under study} \item{$number.of.items}{Number of items of the scale or questionnaire} \item{$alpha}{alpha} } \references{Nunnaly, J.C., Bernstein, I.H. (1994), Psychometric Theory, 3rd edition, McGraw-Hill Series in Psychology.} \author{Bruno Falissard} \examples{ data(expsy) cronbach(expsy[,1:10]) ## not good because item 2 is reversed (1 is high and 4 is low) cronbach(cbind(expsy[,c(1,3:10)],-1*expsy[,2])) ## better #to obtain a 95%confidence interval: #datafile <- cbind(expsy[,c(1,3:10)],-1*expsy[,2]) #library(boot) #cronbach.boot <- function(data,x) {cronbach(data[x,])[[3]]} #res <- boot(datafile,cronbach.boot,1000) #quantile(res$t,c(0.025,0.975)) ## two-sided bootstrapped confidence interval of Cronbach's alpha #boot.ci(res,type="bca") ## adjusted bootstrap percentile (BCa) confidence interval (better) } \keyword{univar} psy/man/scree.plot.Rd0000644000176200001440000000312511314423220014176 0ustar liggesusers\name{scree.plot} \alias{scree.plot} \title{Screeplot of eigenvalues, simulated data are available} \description{ Graphical representation of the eigenvalues of a correlation/covariance matrix. Usefull to determine the dimensional structure of a set of variables. Simulation are proposed to help the interpretation. } \usage{ scree.plot(namefile, title = "Scree Plot", type = "R", use = "complete.obs", simu = "F") } \arguments{ \item{namefile}{dataset} \item{title}{Title} \item{type}{ type="R" to obtain the eigenvalues of the correlation matrix of dataset, type="V" for the covariance matrix, type="M" if the input data is directly the matrix, type="E" if the input data are directly the eigenvalues} \item{use}{omit missing values by default, use="P" to analyse the pairwise correlation/covariance matrix} \item{simu}{simu=p to add p screeplots of simulated random normal data (same number of patients and variables as in the original data set, same pattern of missing data if use="P")} } \details{ Simulations lead sometimes to underestimate the actual number of dimensions (as opposed to Kayser rule: eigen values superior to 1). Basically, simu=20 is enough. } \value{ a plot } \references{Horn, JL (1965) A Rationale and Test for the Number of Factors in Factor Analysis, Psychometrika, 30, 179-185. Cattell, RB (1966) The scree test for the number of factors. Multivariate Behavioral Research, 1, 245-276.} \author{Bruno Falissard} \examples{ data(expsy) scree.plot(expsy[,1:10],simu=20,use="P") #no obvious structure with such a small sample } \keyword{multivariate} psy/man/lkappa.Rd0000644000176200001440000000360214230252557013405 0ustar liggesusers\name{lkappa} \alias{lkappa} \title{Light's kappa for n raters} \description{ Computes Light's Kappa for agreement in the case of n raters. The diagnosis (the object of the rating) may have k possible values (ordered or not). } \usage{ lkappa(r, type="Cohen", weights="squared") } \arguments{ \item{r}{m*n matrix, m subjects and n raters} \item{type}{type="Cohen" for a categorical diagnosis. If not, the diagnosis is supposed to be ordered} \item{weights}{weights="squared" for a weighted kappa with squared weights. If not, weigths are computed with absolute values.} } \details{ Light's Kappa is equal to the mean of the n(n-1)/2 kappas obtained from each pair of raters. Missing values are omitted locally when considering each pair of raters. If type="Cohen" the diagnosis is considered as a categorical variable. If not, the diagnosis is considered as an ordered variable and weigthed kappa's are computed. In this last situation, the type of weights that is used (squared or absolute values) is given by the variable weigths. } \value{ kappa (mean of the n(n-1)/2 kappas obtained from each pair of raters) } \references{ Conger, A.J. (1980), Integration and generalisation of Kappas for multiple raters, Psychological Bulletin, 88, 322-328. } \author{Bruno Falissard} \examples{ data(expsy) lkappa(expsy[,c(11,13,15)]) # Light's kappa for non binary diagnosis lkappa(expsy[,c(12,14,16)]) # Light's kappa for binary diagnosis lkappa(expsy[,c(11,13,15)], type="weighted") # Light's kappa for non binary ordered diagnosis #to obtain a 95%confidence interval: #library(boot) #lkappa.boot <- function(data,x) {lkappa(data[x,], type="weighted")} #res <- boot(expsy[,c(11,13,15)],lkappa.boot,1000) #quantile(res$t,c(0.025,0.975)) # Bootstrapped confidence interval of Light's kappa #boot.ci(res,type="bca") # adjusted bootstrap percentile (BCa) confidence interval } \keyword{univar} psy/man/mtmm.Rd0000644000176200001440000000533114230253314013100 0ustar liggesusers\name{mtmm} \alias{mtmm} \title{Multitrait Multimethod approach of scale validation} \description{ This function is destinated to assess the convergent and discriminant validity of subscales of a given scale. Items belonging to the same subscale should correlate highly amongst themselves. Items belonging to different subscales should not correlate highly. This approach is simpler and more robust than confirmatory factor analysis (CFA). It can be interesting to verify (at least approximately) the proposed structure of an existing instrument in a new population. Most psychometricians will however prefer CFA. } \usage{mtmm(datafile,x,color=FALSE,itemTot=FALSE,graphItem=FALSE,stripChart=FALSE,namesDim=NULL) } \arguments{ \item{datafile}{name of datafile} \item{x}{a list of variable names (as many elements as there are subscales)} \item{color}{boxplot are in colour: FALSE = colourless just in grey and white (by default), TRUE = with colours} \item{itemTot}{if TRUE, for subscale i (i=1,...,n), boxplot of Pearson's correlations between total score of subscale i and the items of subscale j (j=1,...n). If j=i, the item is omited in the computation of the total score. If FALSE, for subscale i (i=1,...,n), boxplot of Pearson's correlations between the items of subscale i and the items of subscale j (j=1,...n)} \item{graphItem}{if TRUE represents graphically each correlation} \item{stripChart}{if TRUE, dot charts are preferred to boxplots. Used with small number of items} \item{namesDim}{Labels foreach boxplots} } \value{ For subscale i (i=1,...,n), displays the n boxplots of the distributions of the Pearson's correlations between items of subscale i and items of subscale j (j=1,...,n). If j=i, the correlation of a given item with itself is ommited. Boxplot for i=j (grey by default) should be above boxplots for i!=j. Likewise, the correlation of an item with the global score of its subscale should be above its correlations with the global score of the other subscales. } \author{Adeline Abbe} \examples{ data(ehd) par(mfrow=c(1,5)) mtmm(ehd,list(c("e15","e18","e19","e20"),c("e4","e5","e6","e14","e17"),c("e11","e13","e16") ,c("e1","e10","e12"),c("e2","e3","e7","e8","e9"))) # Boxplots of the distributions of the Pearson's correlations between total score of # subscale i and the items of subscale j par(mfrow=c(1,5)) mtmm(ehd,list(c("e15","e18","e19","e20"),c("e4","e5","e6","e14","e17"),c("e11","e13","e16") ,c("e1","e10","e12"),c("e2","e3","e7","e8","e9"))) # Pearson's correlations between total score of subscale i and all items par(mfrow=c(3,2)) mtmm(ehd,list(c("e15","e18","e19","e20"),c("e4","e5","e6","e14","e17"),c("e11","e13","e16") ,c("e1","e10","e12"),c("e2","e3","e7","e8","e9")),graphItem=TRUE) } psy/man/ehd.Rd0000644000176200001440000000273711314423026012674 0ustar liggesusers\name{ehd} \alias{ehd} \non_function{} \title{Depressive Mood Scale} \usage{data(ehd)} \description{ A data frame with 269 observations on the following 20 variables. Jouvent, R et al 1988 La clinique polydimensionnelle de humeur depressive. Nouvelle version echelle EHD : Polydimensional rating scale of depressive mood. Psychiatrie et Psychobiologie. } \format{ This data frame contains the following columns: \describe{ \item{e1}{Observed painfull sadness} \item{e2}{Emotional hyperexpressiveness} \item{e3}{Emotional instability} \item{e4}{Observed monotony} \item{e5}{Lack spontaneous expressivity} \item{e6}{Lack affective reactivity} \item{e7}{Emotional incontinence} \item{e8}{Affective hyperesthesia} \item{e9}{Observed explosive mood} \item{e10}{Worried gesture} \item{e11}{Observed anhedonia} \item{e12}{Felt sadness} \item{e13}{Situational anhedonia} \item{e14}{Felt affective indifference} \item{e15}{Hypersensibility unpleasent events} \item{e16}{Sensory anhedonia} \item{e17}{Felt affective monotony} \item{e18}{Felt hyperemotionalism} \item{e19}{Felt irritability} \item{e20}{Felt explosive mood} } } \source{ Jouvent, R et al 1988 La clinique polydimensionnelle de humeur depressive. Nouvelle version echelle EHD : Polydimensional rating scale of depressive mood. Psychiatrie et Psychobiologie.} \examples{ data(ehd) str(ehd) } \keyword{datasets} psy/man/mdspca.Rd0000644000176200001440000000343214230252631013376 0ustar liggesusers\name{mdspca} \alias{mdspca} \title{Graphical representation of a correlation matrix using a Principal Component Analysis} \description{ Similar to many routines, the interest is in the possible representation of both variables and subjects (and by the way categorical variables) with active and supplementary points. Missing data are omitted. } \usage{ mdspca(datafile, supvar="no", supsubj="no", namesupvar=colnames(supvar,do.NULL=FALSE), namesupsubj=colnames(supsubj, do.NULL=FALSE), dimx=1, dimy=2, cx=0.75) } \arguments{ \item{datafile}{name of datafile} \item{supvar}{matrix corresponding to supplementary variables (if any), supvar="no" by default} \item{supsubj}{matrix corresponding to supplementary subjects (if any), supsubj="no" by default} \item{namesupvar}{names of the points corresponding to the supplementary variables} \item{namesupsubj}{names of the points corresponding to the supplementary subjects} \item{dimx}{rank of the component displayed on the x axis (1 by default)} \item{dimy}{rank of the component displayed on the y axis (2 by default)} \item{cx}{size of the lettering (0.75 by default, 1 for bigger letters, 0.5 for smaller)} } \value{ A diagram (two diagrams if supplementary subjects are used) } \author{Bruno Falissard} \examples{ data(sleep) mdspca(sleep[,c(2:5,7:11)]) ## three consistent groups of variables, paradoxical sleep (in other words: dream) ## is negatively correlated with danger mdspca(sleep[,c(2:5,7:11)],supvar=sleep[,6],namesupvar="Total.sleep",supsubj=sleep[,1], namesupsubj="",cx=0.5) ## Total.sleep is here a supplementary variable since it is deduced ## from Paradoxical.sleep and Slow.wave.sleep ## The variable Species is displayed in the subject plane, ## Rabbit and Cow have a high level of danger } \keyword{multivariate} psy/man/psy-package.Rd0000644000176200001440000000252411314443444014340 0ustar liggesusers\name{psy} \alias{psy} \docType{package} \title{Various procedures used in psychometry} \description{Kappa, Intra class correlation coefficient, Cronbach alpha, Scree plot, Multitraits multimethods, Spherical representation of a correlation matrix} \details{ \tabular{ll}{ Package: \tab psy\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2009-12-23\cr License: \tab free\cr LazyLoad: \tab yes\cr } } \author{Bruno Falissard } \references{ Falissard B, A spherical representation of a correlation matrix, Journal of Classification (1996), 13:2, 267-280.\cr Horn, JL (1965) A Rationale and Test for the Number of Factors in Factor Analysis, Psychometrika, 30, 179-185.\cr Mammals: Ecological and Constitutional Correlates, by Allison, T. and Cicchetti, D. (1976) Science, November 12, vol. 194, pp.732-734\cr Jouvent, R et al 1988 La clinique polydimensionnelle de humeur depressive. Nouvelle version echelle EHD : Polydimensional rating scale of depressive mood. Psychiatrie et Psychobiologie. } \keyword{package} \examples{ data(sleep) sphpca(sleep[,c(2:5,7:11)]) data(expsy) scree.plot(expsy[,1:10],simu=20,use="P") data(ehd) par(mfrow=c(1,5)) mtmm(ehd,list(c("e15","e18","e19","e20"),c("e4","e5","e6","e14","e17"),c("e11","e13","e16") ,c("e1","e10","e12"),c("e2","e3","e7","e8","e9"))) } psy/man/icc.Rd0000644000176200001440000000340611765364774012714 0ustar liggesusers\name{icc} \alias{icc} \title{Intraclass correlation coefficient (ICC)} \description{ Computes the ICC of several series of measurements, for example in an interrater agreement study. Two types of ICC are proposed: consistency and agreement. } \usage{ icc(data) } \arguments{ \item{data}{n*p matrix or dataframe, n subjects p raters} } \details{ Missing data are omitted in a listwise way. The "agreement" ICC is the ratio of the subject variance by the sum of the subject variance, the rater variance and the residual; it is generally prefered. The "consistency" version is the ratio of the subject variance by the sum of the subject variance and the residual; it may be of interest when estimating the reliability of pre/post variations in measurements. } \value{ A list with : \item{$nb.subjects}{number of subjects under study} \item{$nb.raters}{number of raters} \item{$subject.variance}{subject variance} \item{$rater.variance}{rater variance} \item{$residual}{residual variance} \item{$icc.consistency}{Intra class correlation coefficient, "consistency" version} \item{$icc.agreement}{Intra class correlation coefficient, "agreement" version} } \references{Shrout, P.E., Fleiss, J.L. (1979), Intraclass correlation: uses in assessing rater reliability, Psychological Bulletin, 86, 420-428.} \author{Bruno Falissard} \examples{ data(expsy) icc(expsy[,c(12,14,16)]) #to obtain a 95%confidence interval: #library(boot) #icc.boot <- function(data,x) {icc(data[x,])[[7]]} #res <- boot(expsy[,c(12,14,16)],icc.boot,1000) #quantile(res$t,c(0.025,0.975)) # two-sided bootstrapped confidence interval of icc (agreement) #boot.ci(res,type="bca") # adjusted bootstrap percentile (BCa) confidence interval (better) } \keyword{univar} psy/man/fpca.Rd0000644000176200001440000001066614230252437013053 0ustar liggesusers\name{fpca} \alias{fpca} \title{Focused Principal Components Analysis} \description{ Graphical representation similar to a principal components analysis but adapted to data structured with dependent/independent variables } \usage{ fpca(formula=NULL,y=NULL, x=NULL, data, cx=0.75, pvalues="No", partial="Yes", input="data", contraction="No", sample.size=1) } \arguments{ \item{formula}{"model" formula, of the form y ~ x } \item{y}{column number of the dependent variable} \item{x}{column numbers of the independent (explanatory) variables} \item{data}{name of datafile} \item{cx}{size of the lettering (0.75 by default, 1 for bigger letters, 0.5 for smaller)} \item{pvalues}{vector of prespecified pvalues (pvalues="No" by default) (see below)} \item{partial}{partial="Yes" by default, corresponds to the original method (see below)} \item{input}{input="Cor" for a correlation matrix (input="data" by default)} \item{contraction}{change the aspect of the diagram, contraction="Yes" is convenient for large data set (contraction="No" by default)} \item{sample.size}{to be specified if input="Cor"} } \details{ This representation is close to a Principal Components Analysis (PCA). Contrary to PCA, correlations between the dependent variable and the other variables are represented faithfully. The relationships between non dependent variables are interpreted like in a PCA: correlated variables are close or diametrically opposite (for negative correlations), independent variables make a right angle with the origin. The focus on the dependent variable leads formally to a partialisation of the correlations between the non dependent variables by the dependent variable (see reference). To avoid this partialisation, the option partial="No" can be used. It may be interesting to represent graphically the strength of association between the dependent variable and the other variables using p values coming from a model. A vector of pvalue may be specified in this case. } \value{ A plot (q plots in fact). } \references{Falissard B, Focused Principal Components Analysis: looking at a correlation matrix with a particular interest in a given variable. Journal of Computational and Graphical Statistics (1999), 8(4): 906-912.} \author{Bruno Falissard, Bill Morphey, Adeline Abbe} \examples{ data(sleep) fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+ Gestation.time+Predation+Sleep.exposure+Danger,data=sleep) fpca(y="Paradoxical.sleep",x=c("Body.weight","Brain.weight","Slow.wave.sleep", "Maximum.life.span","Gestation.time","Predation","Sleep.exposure","Danger"),data=sleep) ## focused PCA of the duration of paradoxical sleep (dreams, 5th column) ## against constitutional variables in mammals (columns 2, 3, 4, 7, 8, 9, 10, 11). ## Variables inside the red cercle are significantly correlated ## to the dependent variable with p<0.05. ## Green variables are positively correlated to the dependent variable, ## yellow variables are negatively correlated. ## There are three clear clusters of independent variables. corsleep <- as.data.frame(cor(sleep[,2:11],use="pairwise.complete.obs")) fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+ Gestation.time+Predation+Sleep.exposure+Danger, data=corsleep,input="Cor",sample.size=60) ## when missing data are numerous, the representation of a pairwise correlation ## matrix may be preferred (even if mathematical properties are not so good...) numer <- c(2:4,7:11) l <- length(numer) resu <- vector(length=l) for(i in 1:l) { int <- sleep[,numer[i]] mod <- lm(sleep$Paradoxical.sleep~int) resu[i] <- summary(mod)[[4]][2,4]*sign(summary(mod)[[4]][2,1]) } fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+ Gestation.time+Predation+Sleep.exposure+Danger, data=sleep,pvalues=resu) ## A representation with p values ## When input="Cor" or pvalues="Yes" partial is turned to "No" mod <- lm(sleep$Paradoxical.sleep~sleep$Body.weight+sleep$Brain.weight+ sleep$Slow.wave.sleep+sleep$Maximum.life.span+sleep$Gestation.time+ sleep$Predation+sleep$Sleep.exposure+sleep$Danger) resu <- summary(mod)[[4]][2:9,4]*sign(summary(mod)[[4]][2:9,1]) fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+ Gestation.time+Predation+Sleep.exposure+Danger, data=sleep,pvalues=resu) ## A representation with p values which come from a multiple linear model ## (here results are difficult to interpret) } \keyword{multivariate} psy/man/expsy.Rd0000644000176200001440000000350011314423042013267 0ustar liggesusers\name{expsy} \alias{expsy} \non_function{} \title{Data set related to psychometry} \usage{data(expsy)} \description{ The \code{expsy} data frame has 30 rows and 16 columns with missing data. it1-it10 correspond to the rating of 30 patients with a 10 items scale. r1, r2, r3 to the rating of item 1 by 3 different clinicians of the same 30 patients. rb1, rb2, rb3 to the binary transformation of r1, r2, r3 (1 or 2 -> 0; and 3 or 4 -> 1) . } \format{ This data frame contains the following columns: \describe{ \item{it1}{a numeric vector corresponding to item 1 (rated from 1:low to 4:high)} \item{it2}{a numeric vector corresponding to item 2 (rated from 1:high to 4:low)} \item{it3}{a numeric vector corresponding to item 3 (rated from 1:low to 4:high)} \item{it4}{a numeric vector corresponding to item 4 (rated from 1:low to 4:high)} \item{it5}{a numeric vector corresponding to item 5 (rated from 1:low to 4:high)} \item{it6}{a numeric vector corresponding to item 6 (rated from 1:low to 4:high)} \item{it7}{a numeric vector corresponding to item 7 (rated from 1:low to 4:high)} \item{it8}{a numeric vector corresponding to item 8 (rated from 1:low to 4:high)} \item{it9}{a numeric vector corresponding to item 9 (rated from 1:low to 4:high)} \item{it10}{a numeric vector corresponding to item 10 (rated from 1:low to 4:high)} \item{r1}{a numeric vector corresponding to item 1 rated by rater 1} \item{rb1}{binary transformation of r1} \item{r2}{a numeric vector corresponding to item 1 rated by rater 2} \item{rb2}{binary transformation of r2} \item{r3}{a numeric vector corresponding to item 1 rated by rater 3} \item{rb3}{binary transformation of r3} } } \source{ artificial data} \examples{ data(expsy) expsy[1:4,] } \keyword{datasets} psy/man/sleep.Rd0000644000176200001440000000261411314435506013244 0ustar liggesusers\name{sleep} \alias{sleep} \non_function{} \title{ Ecological and Constitutional Data in Mammals } \usage{data(sleep)} \description{ Data from which conclusions were drawn in the article Mammals: Ecological and Constitutional Correlates, by Allison, T. and Cicchetti, D. (1976) Science, November 12, vol. 194, pp.732-734 } \format{ This data frame contains the following columns: \describe{ \item{Species}{a factor with levels} \item{Body.weight}{a numeric vector, body weight in kg} \item{Brain.weight}{a numeric vector, Brain weight in g} \item{Slow.wave.sleep}{a numeric vector, nondreaming sleep (hrs/day)} \item{Paradoxical.sleep}{a numeric vector, dreaming sleep (hrs/day)} \item{Total.sleep}{a numeric vector, nondreaming + "dreaming" (hrs/day)} \item{Maximum.life.span}{a numeric vector (in years)} \item{Gestation.time}{a numeric vector (in days)} \item{Predation}{a numeric vector, Predation index (1 min - 5 max)} \item{Sleep.exposure}{a numeric vector, Sleep exposure index (1 min - 5 max)} \item{Danger}{a numeric vector, Overall danger index (1 min - 5 max)} } } \source{ http://lib.stat.cmu.edu/datasets/sleep } \references{ Mammals: Ecological and Constitutional Correlates, by Allison, T. and Cicchetti, D. (1976) Science, November 12, vol. 194, pp.732-734 } \examples{ data(sleep) str(sleep) } \keyword{datasets} psy/man/sphpca.Rd0000644000176200001440000001164614230253632013415 0ustar liggesusers\name{sphpca} \alias{sphpca} \title{Spherical Representation of a Correlation Matrix} \description{ Graphical representation of a correlation matrix, similar to principal component analysis (PCA) but the mapping is on a sphere. The information is close to a 3d PCA, the picture is however easier to interpret since the variables are in fact on a 2d map. } \usage{ sphpca(datafile, h=0, v=0, f=0, cx=0.75, nbsphere=2, back=FALSE, input="data", method="approx", maxiter=500, output=FALSE) } \arguments{ \item{datafile}{name of datafile} \item{h}{rotation of the sphere on a horizontal plane (in degres)} \item{v}{rotation of the sphere on a vertical plane (in degres)} \item{f}{rotation of the sphere on a frontal plane (in degres)} \item{cx}{size of the lettering (0.75 by default, 1 for bigger letters, 0.5 for smaller)} \item{nbsphere}{two by default: front and back} \item{back}{"FALSE" by default: the back sphere is not seen through} \item{input}{"data" by default: raw data are analysed, if not "data": correlation matrix is expected} \item{method}{"approx" by default: the estimation is based on a principal component analysis approximation. If "exact" the "approx" estimation is optimized (may be computationaly consumming). if "rscal" a multidimensional scaling approach is used: distances between points on the sphere are optimized so that they represent at best the original correlations. The scaling that is used leads to angles on the sphere proportional to correlation between variables} \item{maxiter}{maximum number of iterations in the optim process} \item{output}{FALSE by default: if TRUE and method="rscal" numerical results are proposed} } \details{ There is an isophormism between a correlation matrix and points on the unit hypersphere of Rn. It can be shown that a 3d spherical representation of a correlation matrix is statistically and cognitively interesting (see reference). The default option method="approx" is based on a principal components approximation (see reference). It is fast and gives rather good results. If method="exact" the representation is sligthly improved in terms of fit (the sphere minimizes the sum of squared distances between the original variables on the hypersphere and their projections on the sphere). The option method="rscal" optimizes the representation of correlations between variables with distances between points (in a least squares sense). For convenience, the scaling of points on the sphere is chosen so that angles between points are linearly related to correlations between variables (this is not the case on the hypersphere were d=[2*(1-r)]^0.5). For method="exact" or method="rscal" computations may be rather lengthy (and not sensible for more than 20-40 variables). The sphere may be rotated to help in visualising most of variables on a same side (front for example). By default, the back of the sphere (right plot) is not seen showing through. } \value{ A plot. If method="rscal" and output=TRUE, a list with : \item{$stress.before.optim}{Stress before optimization. The stress is equal to the sum of squares of differences between distances on the 3d sphere and distances on the hypersphere.} \item{$stress.after.optim}{Stress after optimization.} \item{$convergence}{If 0, convergence is OK. If not, maxiter may be increased.} \item{$correlations}{Correlation matrix of variables (Pearson).} \item{$residuals}{Differences between observed correlations (hypersphere) and correlations estimated from points on the 3d sphere.} \item{$mean.abs.resid}{Mean of absolute values of residuals.} } \references{Falissard B, A spherical representation of a correlation matrix, Journal of Classification (1996), 13:2, 267-280.} \author{Bruno Falissard} \examples{ data(sleep) sphpca(sleep[,c(2:5,7:11)]) ## spherical representation of ecological and constitutional correlates in mammals sphpca(sleep[,c(2:5,7:11)],method="rscal",output=TRUE) ## idem, but optimizes the representation of correlations between variables with distances ## between points corsleep <- as.data.frame(cor(sleep[,c(2:5,7:11)],use="pairwise.complete.obs")) sphpca(corsleep,input="Cor") sphpca(corsleep,method="rscal",input="Cor") ## when missing data are numerous, the representation of a pairwise correlation ## matrix may be preferred (even if mathematical properties are not so good...) sphpca(corsleep,method="rscal",input="Cor",h=180,f=180,nbsphere=1,back=TRUE) ## other option of presentation ## # library(polycor) # sleep$Predation <- as.ordered(sleep$Predation) # sleep$Sleep.exposure <- as.ordered(sleep$Sleep.exposure) # sleep$Danger <- as.ordered(sleep$Danger) # corsleeph <- as.data.frame(hetcor(sleep[,c(2:5,7:11)])$correlations) # sphpca(corsleeph,input="Cor",f=180) # sphpca(corsleeph,method="rscal",input="Cor",f=180) ## --> Correlations between discrete variables may appear shoking to some statisticians (?) ## --> Representation of polychoric/polyserial correlations could be prefered in this situation } \keyword{multivariate} psy/man/ckappa.Rd0000644000176200001440000000242514230252207013366 0ustar liggesusers\name{ckappa} \alias{ckappa} \title{Cohen's Kappa for 2 raters} \description{Computes Cohen's Kappa for agreement in the case of 2 raters. The diagnosis (the object of the rating) may have k possible values.} \usage{ckappa(r)} \arguments{ \item{r}{n*2 matrix or dataframe, n subjects and 2 raters}} \details{The function deals with the case where the two raters have not exactly the same scope of rating (some software associate an error with this situation). Missing value are omitted.} \value{A list with : \item{$table}{the 2*k table of raw data (first rater in rows, second rater in columns)} \item{$kappa}{Cohen's Kappa}} \references{Cohen, J. (1960), A coefficient of agreement for nominal scales, Educational and Psychological measurements, 20, 37-46.} \author{Bruno Falissard} \examples{data(expsy) ## Cohen's kappa for binary diagnosis ckappa(expsy[,c(12,14)]) ##to obtain a 95%confidence interval: #library(boot) #ckappa.boot <- function(data,x) {ckappa(data[x,])[[2]]} #res <- boot(expsy[,c(12,14)],ckappa.boot,1000) ## two-sided bootstrapped confidence interval of kappa #quantile(res$t,c(0.025,0.975)) ## adjusted bootstrap percentile (BCa) confidence interval (better) #boot.ci(res,type="bca") ##Cohen's kappa for non binary diagnosis #ckappa(expsy[,c(11,13)]) } \keyword{univar} psy/DESCRIPTION0000644000176200001440000000112014230351742012566 0ustar liggesusersPackage: psy Title: Various Procedures Used in Psychometrics Version: 1.2 Authors@R: person("Bruno", "Falissard", email="falissard_b@wanadoo.fr", role = c("aut","cre")) Description: Kappa, ICC, reliability coefficient, parallel analysis, multi-traits multi-methods, spherical representation of a correlation matrix. License: GPL (>= 2) Encoding: UTF-8 RoxygenNote: 7.1.2 NeedsCompilation: no Packaged: 2022-04-21 13:20:08 UTC; Bruno Author: Bruno Falissard [aut, cre] Maintainer: Bruno Falissard Repository: CRAN Date/Publication: 2022-04-21 22:00:02 UTC psy/R/0000755000176200001440000000000014230255010011256 5ustar liggesuserspsy/R/wkappa.R0000644000176200001440000000143514230252516012677 0ustar liggesusers#' @export wkappa <- function(r, weights = "squared") { r <- na.omit(r) r1 <- r[, 1] r2 <- r[, 2] lev <- min(c(r1, r2)):max(c(r1, r2)) p <- length(lev) tab <- matrix(nrow = p, ncol = p) dimnames(tab) <- list(lev, lev) weight <- matrix(nrow = p, ncol = p) for(i in 1:p) for(j in 1:p) { tab[i,j] <- sum(r1==lev[i]&r2==lev[j]) if (weights == "squared") weight[i, j] <- 1 - (i - j)^2/(p - 1)^2 else weight[i, j] <- 1 - abs(i - j)/abs(p - 1) } tsum <- sum(tab) ttab <- tab/tsum agreeP <- sum(ttab * weight) tm1 <- apply(ttab, 1, sum) tm2 <- apply(ttab, 2, sum) ttabchance <- tm1 %*% t(tm2) chanceP <- sum(ttabchance * weight) kappa2 <- (agreeP - chanceP)/(1 - chanceP) result <- list(table = tab, weights = weights, kappa = kappa2) result } psy/R/lkappa.R0000644000176200001440000000061714227302724012670 0ustar liggesusers#' @export lkappa <- function(r, type="Cohen", weights="squared") { nrater <- dim(r)[2] kappas <- vector(length = nrater * (nrater - 1)/2) c <- 0 for (i in 2:nrater) for (j in 1:(i - 1)) { c <- c + 1 if (type == "Cohen") kappas[c] <- ckappa(r[, c(i, j)])[[2]] else kappas[c] <- wkappa(r[, c(i, j)], weights=weights)[[3]] } return(mean(kappas)) } psy/R/mtmm.R0000644000176200001440000002424714230252510012366 0ustar liggesusers#' @importFrom grDevices dev.new #' @importFrom graphics abline #' @importFrom graphics boxplot #' @importFrom graphics points #' @importFrom graphics stripchart #' @importFrom stats cov #' @importFrom stats median #' @importFrom stats optim #' @importFrom stats reshape #' @importFrom stats rnorm #' @importFrom stats sd #' @export mtmm <- function (datafile, x, color = FALSE, itemTot = FALSE, graphItem = FALSE, stripChart=FALSE, namesDim=NULL) { ############################################################################### # Graph of Correlation within and between Item of subscale # ############################################################################### par.old <- par("mfrow") k <- 1 name <- nam <- c() r2 <- c() nn <- c() nbdim <- length(x) X <- list() n <- as.data.frame(matrix(nrow = 1, ncol = nbdim)) datafile <- na.omit(datafile[,unlist(x)]) for (i in 1:nbdim) { X[[i]] <- datafile[, x[[i]]] } # Names of variables and matrix of correlations for (i in 1:nbdim) { n[i] <- length(X[[i]]) } nn <- c(nn, n) k <- 1 for (i in 1:nbdim) { nameD <- as.data.frame(matrix(nrow = n[[i]], ncol = nbdim)) nameD[i] <- names(X[[i]]) for (j in 1:nbdim) { r <- as.data.frame(matrix(nrow = n[[i]], ncol = n[[j]])) r <- cor(X[i][[1]], X[j][[1]]) r2[k] <- list(r) k <- k + 1 } name <- c(name, nameD[i]) nam <- c(nam, name[[i]]) } # Separation of both types of matrix a <- seq(from = 1, to = length(r2), by = (nbdim + 1)) X1 <- r2[a] X2 <- r2[-a] p1 <- length(X1) p2 <- length(X2) # Correlation of item with its own dimension # Initialisation correlation <- c() dimension <- c() mat <- c() V1 <- c() V2 <- c() # Number of item k1 <- c() for (q in 1:nbdim) { w <- seq(from = 1, to = nn[[q]]) k = 0 for (i in 1:(nn[[q]] - 1)) { k <- w[i] + k } k1 <- c(k1, k) } for (l in 1:p1) { mat1 <- matrix(ncol = 1, nrow = k1[l]) mat2 <- matrix(ncol = 1, nrow = k1[l]) mat3 <- matrix(ncol = 1, nrow = k1[l]) Var1 <- matrix(ncol = 1, nrow = k1[l]) Var2 <- matrix(ncol = 1, nrow = k1[l]) k = 1 for (i in 1:(nn[[l]] - 1)) { for (j in (i + 1):nn[[l]]) { mat1[k, 1] <- X1[[l]][j, i] mat2[k, 1] <- l mat3[k, 1] <- l Var1[k, 1] <- attributes(X1[[l]])$dimnames[[1]][i] Var2[k, 1] <- attributes(X1[[l]])$dimnames[[1]][j] k = k + 1 } } correlation <- append(correlation, mat1) dimension <- append(dimension, c(mat2)) mat <- append(mat, c(mat3)) V1 <- append(V1, c(Var1)) V2 <- append(V2, c(Var2)) } # Format er <- cbind(dimension, mat, V1, V2) er <- as.data.frame(er) er$correlation <- correlation # Correlation of item with other dimensions er1 <- c() V1 <- c() V2 <- c() for (l in 1:p2) { k <- 1 mat1 <- matrix(ncol = 1, nrow = ((dim(X2[[l]])[2]) * (dim(X2[[l]])[1]))) Var1 <- matrix(ncol = 1, nrow = ((dim(X2[[l]])[2]) * (dim(X2[[l]])[1]))) Var2 <- matrix(ncol = 1, nrow = ((dim(X2[[l]])[2]) * (dim(X2[[l]])[1]))) for (i in 1:(dim(X2[[l]])[1])) { for (j in 1:(dim(X2[[l]])[2])) { mat1[k, 1] <- X2[[l]][i, j] Var1[k, 1] <- rep(attributes(X2[[l]])$dimnames[[1]][i], each = (dim(X2[[l]])[2]))[j] Var2[k, 1] <- attributes(X2[[l]])$dimnames[[2]][j] k <- k + 1 } } er1 <- append(er1, c(mat1)) V1 <- append(V1, c(Var1)) V2 <- append(V2, c(Var2)) } # Format err <- cbind(V1, V2) err <- as.data.frame(err) err$correlation <- er1 # Names of matrix i2 <- c() i1 <- c() for (k in 1:(nrow(err))) { for (l in 1:p1) { if (err$V2[k] %in% name[[l]]) i2[k] <- l if (err$V1[k] %in% name[[l]]) i1[k] <- l } } err$mat <- i2 err$dimension <- i1 # grouping of matrix corre <- rbind(er, err) #**************************************************************************** #********************************* Colors *********************************** #**************************************************************************** col <- Y <- c() for (i in 1:nbdim) { if (color == FALSE) { col <- rep("white", times = nbdim) col[i] <- c("grey") } else col <- 2:nbdim Y[i] <- list(col) } #**************************************************************************** #******************************** plots *********************************** #**************************************************************************** if (graphItem == FALSE) { if (stripChart==FALSE) { for (i in 1:p1) { ifelse(is.null(namesDim), maintitle <- paste("Scale", i, sep = " "), maintitle <- paste(i, namesDim[i], sep = " ")) Dim <- subset(corre, (corre$dimension == i), drop = TRUE) boxplot(Dim$correlation ~ Dim$mat, col = Y[[i]], main = maintitle, xlab = "i", ylab = paste("Correlation of Items of Scale i with Items of Scale", i), ylim = c(min(corre$correlation), max(corre$correlation))) } } if (stripChart==TRUE) { for (i in 1:p1) { ifelse(is.null(namesDim), maintitle <- paste("Scale", i, sep = " "), maintitle <- paste(i, namesDim[i], sep = " ")) Dim <- subset(corre, (corre$dimension == i), drop = TRUE) stripchart(Dim$correlation ~ Dim$mat, vertical=TRUE, method="jitter", jitter=0.05, pch=1, cex=1.5, main = maintitle, xlab = "i", ylab = paste("Correlation of Items of Scale i with Items of Scale", i), ylim = c(min(corre$correlation), max(corre$correlation))) for (j in 1:p1) points(j, median(Dim$correlation[Dim$mat==j]), pch="-", cex=4) } } } ############################################################################### # Value of Correlation of Item of Scale with Scale # ############################################################################## n2 <- nrow(X[[1]]) Score <- matrix(nrow = n2, ncol = nbdim) for (i in 1:nbdim) { k <- 1 for (j in 1:n2) { Score[k, i] <- sum(X[[i]][j, ]) k <- k + 1 } } # score and correlation Scoreaj <- c() mat <- c() mat2 <- c() for (i in 1:nbdim) { Scorea <- matrix(nrow = n2, ncol = nn[[i]]) for (j in 1:(nn[[i]])) { for (l in 1:n2) { Scorea[l, j] <- Score[l, i] - X[[i]][l, j] } } Scoreaj[i] <- list(Scorea) mat <- rep(Score[, i], each = nn[[i]]) dim(mat) <- c(nn[[i]], n2) mat2[i] <- list(t(mat)) } # correlations Scale <- ScaleI <- Item <- co <- c <- c() t <- 0 for (i in 1:nbdim) { t <- t + as.numeric(nn[i]) for (j in 1:nbdim) { for (k in 1:nn[[i]]) { if (i == j) { Score <- Scoreaj[[i]][, k] c <- c(c, cor(Score, X[[i]][, k])) Item <- c(Item, names(X[[i]][k])) ScaleI <- c(ScaleI, i) Scale <- c(Scale, j) } else { c <- c(c, cor(X[[i]], mat2[[j]][, 1])[k]) Item <- c(Item, names(X[[i]][k])) ScaleI <- c(ScaleI, i) Scale <- c(Scale, j) } } } } co <- c(co, c) err <- as.data.frame(cbind(Item, Scale)) err$ScaleI <- ScaleI err$correlation <- co tabl <- reshape(err, direction = "wide", timevar = "Scale", idvar = "Item", v.names = c("correlation")) for (i in 1:nbdim) { colnames(tabl)[i + 2] <- ifelse(is.null(namesDim), paste("Scale", i, sep = " "), paste(i, namesDim[i], sep = " ")) } if (itemTot == TRUE & graphItem == FALSE) { dev.new() par(mfrow = par.old) if (stripChart==FALSE) { for (i in 1:nbdim) { ifelse(is.null(namesDim), maintitle <- paste("Scale", i, sep = " "), maintitle <- paste(i, namesDim[i], sep = " ")) Dim <- subset(err, (err$Scale == i), drop = TRUE) boxplot(Dim$correlation ~ Dim$ScaleI, col = Y[[i]], main = maintitle, ylim = c(min(err$correlation), max(err$correlation)), cex.axis = 0.75, xlab = "i", ylab = paste("Corrected Correlation of Item i with Total Score",i)) } } if (stripChart==TRUE) { for (i in 1:nbdim) { ifelse(is.null(namesDim), maintitle <- paste("Scale", i, sep = " "), maintitle <- paste(i, namesDim[i], sep = " ")) Dim <- subset(err, (err$Scale == i), drop = TRUE) stripchart(Dim$correlation ~ Dim$ScaleI, vertical=TRUE, method="jitter", jitter=0.05, pch=1, cex=1.5, main = maintitle, xlab = "i", ylab = paste("Corrected Correlation of Item i with Total Score", i), ylim = c(min(err$correlation), max(err$correlation))) for (j in 1:nbdim) points(j, median(Dim$correlation[Dim$ScaleI==j]), pch="-", cex=4) } } } if (graphItem == TRUE) { for (i in 1:nbdim) { Dim <- subset(err, (err$Scale == i), drop = TRUE) Dim$Item <- factor(Dim$Item, levels = unique(as.character(Dim$Item))) with(Dim, stripchart(correlation ~ Item, vertical = TRUE, add = FALSE, main = paste("Score of Scale", i, sep = " "), xlab = "Item", ylab = "Correlation", ylim = c(min(corre$correlation), max(corre$correlation)), cex.axis = 1, las = 3, pch = 20, cex = 0.75)) abline(h = median(Dim$correlation), col = "red") } } return(tabl) } psy/R/ckappa.R0000644000176200001440000000167714227300276012666 0ustar liggesusers#' @importFrom stats na.omit #' @export ckappa <- function(r) { r <- na.omit(r) r1 <- r[,1] r2 <- r[,2] n1 <- as.character(r1) n2 <- as.character(r2) lev <- levels(as.factor(c(n1,n2))) p <- length(lev) tab <- matrix(nrow=p,ncol=p) dimnames(tab) <- list(levels(as.factor(c(n1,n2))),levels(as.factor(c(n1,n2)))) dim1 <- dimnames(tab)[[1]] dim2 <- dimnames(tab)[[2]] tabi <- table(n1,n2) dimi1 <- dimnames(tabi)[[1]] dimi2 <- dimnames(tabi)[[2]] for(i in 1:p)for(j in 1:p) { if((sum(dim1[i]==dimi1)==1)&(sum(dim2[j]==dimi2)==1)) tab[i,j] <- tabi[dim1[i],dim2[j]] else tab[i,j] <- 0 } tsum <- sum(tab) ttab <- tab/tsum tm1 <- apply(ttab, 1, sum) tm2 <- apply(ttab, 2, sum) agreeP <- sum(diag(ttab)) chanceP <- sum(tm1 * tm2) kappa2 <- (agreeP - chanceP)/(1 - chanceP) result <- list("table"=tab,"kappa"=kappa2) result } psy/R/sphpca.R0000644000176200001440000017574514227303544012716 0ustar liggesusers#' @export sphpca <- function(datafile, h=0, v=0, f=0, cx=0.75, nbsphere=2, back=FALSE, input="data",method="approx", maxiter=500, output=FALSE) { p <- dim(datafile)[2] one <- matrix(1, nrow=p) load <- matrix(nrow=p, ncol=3) names <- attributes(datafile)$names if (input=="data") { mat <- as.matrix(na.omit(datafile)) matcorp <- cor(mat) } if (input!="data") { matcorp <- as.matrix(datafile) } decomp <- eigen(matcorp, symmetric=TRUE) eigenval <- decomp$values eigenvect <- decomp$vectors eigenval <- pmax(0.00001*one, eigenval) load <- eigenvect*sqrt(kronecker(one,t(eigenval))) load <- load[,1:3] if (method=="exact") { lo <- load dim(lo) <- c(3*p,1) fn1 <- function(tt) { dim(tt) <- c(p,3) stress <- 0 for(i in 1:p) for(k in 1:3) { int <- 0 for(m in 1:p) { int <- int + tt[m,k]*(tt[m,1]*tt[i,1]+tt[m,2]*tt[i,2]+tt[m,3]*tt[i,3]-matcorp[m,i])/sqrt(tt[m,1]^2+tt[m,2]^2+tt[m,3]^2) } stress <- stress+abs(int) } stress } opt <- optim(lo,fn1,method="BFGS",control=list(maxit=20)) convergence <- opt$convergence looptim <- opt$par stress.optim <- fn1(looptim) stress.non.optim <- fn1(lo) load <- looptim dim(load) <- c(p,3) } for(i in 1:p) load[i,] <- load[i,]/sqrt(sum(load[i,]^2)) if (method=="dist" | method=="rscal") { m.to.o <- function(w) { x <- w[1] ; y <- w[2] ; z <- w[3] theta <- acos(z) if (z==1) phi <- 0 if (z!=1 & y>=0) phi <- acos(x/sqrt(1-z^2)) if (z!=1 & y<0) phi <- 2*pi-acos(x/sqrt(1-z^2)) c(theta,phi) } o.to.m <- function(w) { theta <- w[1] phi <- w[2] z <- cos(theta) x <- sin(theta)*cos(phi) y <- sin(theta)*sin(phi) c(x,y,z) } o <- t(apply(load,1,m.to.o)) dim(o) <- c(2*p,1) if(method=="dist") d <- sqrt(2*(1-matcorp)) if(method=="rscal") d <- pi/2*(1-matcorp) fn2 <- function(ov) { dim(ov) <- c(p,2) m <- t(apply(ov,1,o.to.m)) stress <- 0 for(i in 1:p) for(j in 1:p) {if (j=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="red",bg="red")} for(i in 1:p) {if (load[i,1]>=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} plot(cos((1:201)*pi/100),sin((1:201)*pi/100),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1,1),ylim=c(-1,1)) lines(mp1m[,1],mp1m[,2]) lines(mp2m[,1],mp2m[,2]) lines(mp3m[,1],mp3m[,2]) lines(mp4m[,1],mp4m[,2]) lines(mp5m[,1],mp5m[,2]) lines(-mq1m[,1],mq1m[,2]) lines(-mq2m[,1],mq2m[,2]) lines(-mq3m[,1],mq3m[,2]) lines(-mq4m[,1],mq4m[,2]) lines(-mq5m[,1],mq5m[,2]) lines(-mq6m[,1],mq6m[,2]) lines(-mq7m[,1],mq7m[,2]) lines(-mq8m[,1],mq8m[,2]) for(i in 1:p) {if (load[i,1]<=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="red",bg="red")} for(i in 1:p) {if (load[i,1]<=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} } if ((nbsphere!=2) & (back==TRUE)) { par(mfrow=c(1,1)) par(pty="s") par(oma=c(0,0,0,0)) par(mar=c(0,0,0,0)) plot(cos((1:201)*pi/100),sin((1:201)*pi/100),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1,1),ylim=c(-1,1)) lines(mp1p[,1],mp1p[,2]) lines(mp2p[,1],mp2p[,2]) lines(mp3p[,1],mp3p[,2]) lines(mp4p[,1],mp4p[,2]) lines(mp5p[,1],mp5p[,2]) lines(mq1p[,1],mq1p[,2]) lines(mq2p[,1],mq2p[,2]) lines(mq3p[,1],mq3p[,2]) lines(mq4p[,1],mq4p[,2]) lines(mq5p[,1],mq5p[,2]) lines(mq6p[,1],mq6p[,2]) lines(mq7p[,1],mq7p[,2]) lines(mq8p[,1],mq8p[,2]) lines(mp1m[,1],mp1m[,2],lty=3) lines(mp2m[,1],mp2m[,2],lty=3) lines(mp3m[,1],mp3m[,2],lty=3) lines(mp4m[,1],mp4m[,2],lty=3) lines(mp5m[,1],mp5m[,2],lty=3) lines(-mq1m[,1],mq1m[,2],lty=3) lines(-mq2m[,1],mq2m[,2],lty=3) lines(-mq3m[,1],mq3m[,2],lty=3) lines(-mq4m[,1],mq4m[,2],lty=3) lines(-mq5m[,1],mq5m[,2],lty=3) lines(-mq6m[,1],mq6m[,2],lty=3) lines(-mq7m[,1],mq7m[,2],lty=3) lines(-mq8m[,1],mq8m[,2],lty=3) for(i in 1:p) {if (load[i,1]>=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="red",bg="red")} for(i in 1:p) {if (load[i,1]>=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} for(i in 1:p) {if (load[i,1]<=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="blue",bg="white")} for(i in 1:p) {if (load[i,1]<=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} } if ((nbsphere!=2) & (back==FALSE)) { par(mfrow=c(1,1)) par(pty="s") par(oma=c(0,0,0,0)) par(mar=c(0,0,0,0)) plot(cos((1:201)*pi/100),sin((1:201)*pi/100),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1,1),ylim=c(-1,1)) lines(mp1p[,1],mp1p[,2]) lines(mp2p[,1],mp2p[,2]) lines(mp3p[,1],mp3p[,2]) lines(mp4p[,1],mp4p[,2]) lines(mp5p[,1],mp5p[,2]) lines(mq1p[,1],mq1p[,2]) lines(mq2p[,1],mq2p[,2]) lines(mq3p[,1],mq3p[,2]) lines(mq4p[,1],mq4p[,2]) lines(mq5p[,1],mq5p[,2]) lines(mq6p[,1],mq6p[,2]) lines(mq7p[,1],mq7p[,2]) lines(mq8p[,1],mq8p[,2]) for(i in 1:p) {if (load[i,1]>=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="red",bg="red")} for(i in 1:p) {if (load[i,1]>=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} for(i in 1:p) {if (load[i,1]<=0) symbols(x=load[i,2], y=load[i,3], circles=0.015, inches=FALSE, add=TRUE,fg="blue",bg="white")} for(i in 1:p) {if (load[i,1]<=0) text(x=load[i,2],y=load[i,3]-0.05,labels=names[i],cex=cx)} } if (output!=FALSE & method=="exact") list("stress.before.optim"=stress.non.optim, "stress.after.otpim"=stress.optim,"convergence"=convergence) if (output!=FALSE & method=="rscal" | method=="dist") list("stress.before.optim"=stress.non.optim, "stress.after.optim"=stress.optim,"convergence"=convergence, "correlations"=round(matcorp,3),"residuals"=round(resid.r,3),"mean.abs.resid"=resmoy) } psy/R/screeplot.R0000644000176200001440000000220114227303426013407 0ustar liggesusers#' @export scree.plot <- function(namefile, title="Scree Plot", type="R", use="complete.obs", simu="F") { mat <- namefile if (use=="complete.obs") mat <- na.omit(namefile) if (type=="R") eigenval <- eigen(cor(mat,use="pairwise.complete.obs"), symmetric=TRUE)$values if (type=="V") eigenval <- eigen(cov(mat,use="pairwise.complete.obs"), symmetric=TRUE)$values if (type=="E") eigenval <- namefile if (type=="M") eigenval <- eigen(namefile, symmetric=TRUE)$values nev <- length(eigenval) plot(eigenval, type = "b", pch = 16, bty = "o", main = title, xlab = "Dimension", ylab = "Eigenvalue") lines(c(1,nev),c(1,1),lty=2) if (is.numeric(simu) && (type=="R")) { n <- dim(mat)[1] p <- dim(mat)[2] matsimu <- matrix(nrow=n,ncol=p) int <- rep(1,n*p) attr(int,"dim") <- c(n,p) mat <- pmax(as.matrix(mat),int) for(i in 1:simu) { matnorm <- rnorm(n*p) attr(matnorm,"dim") <- c(n,p) matsimu <- (mat/mat)*matnorm eigenval <- eigen(cor(matsimu,use="pairwise.complete.obs"))$values points(eigenval,type="l") } } } psy/R/mdspca.R0000644000176200001440000001742414230252501012662 0ustar liggesusers#' @export mdspca <- function(datafile, supvar="no", supsubj="no", namesupvar=colnames(supvar,do.NULL=FALSE), namesupsubj=colnames(supsubj,do.NULL=FALSE), dimx=1, dimy=2, cx=0.75) { #*********************************************** # missing values are omitted, normalization of var and supvar #*********************************************** if ((is.na(supvar[1]) | (supvar[1]!="no")) & (!is.na(supsubj[1]) & (supsubj[1]=="no"))) { svar <- 1 ssubj <- 0 p <- dim(datafile)[2] supvar <- as.matrix(supvar) pp <- dim(supvar)[2] interm <- cbind(datafile,supvar) interm <- na.omit(interm) mat <- as.matrix(interm[,1:p]) matp <- as.matrix(interm[,(p+1):(p+pp)]) n <- dim(mat)[1] for(i in 1:p) mat[,i] <- (mat[,i]-mean(mat[,i]))/(sqrt(n-1)*sd(mat[,i])) for(i in 1:pp) matp[,i] <- (matp[,i]-mean(matp[,i]))/(sqrt(n-1)*sd(matp[,i])) } if ((is.na(supvar[1]) | (supvar[1]!="no")) & (is.na(supsubj[1]) | (supsubj[1]!="no"))) { svar <- 1 ssubj <- 1 p <- dim(datafile)[2] supvar <- as.matrix(supvar) pp <- dim(supvar)[2] supsubj <- as.matrix(supsubj) ppp <- dim(supsubj)[2] interm <- cbind(datafile,supvar,supsubj) interm <- na.omit(interm) mat <- as.matrix(interm[,1:p]) matp <- as.matrix(interm[,(p+1):(p+pp)]) supsubj <- as.matrix(interm[,(p+pp+1):(p+pp+ppp)]) n <- dim(mat)[1] for(i in 1:p) mat[,i] <- (mat[,i]-mean(mat[,i]))/(sqrt(n-1)*sd(mat[,i])) for(i in 1:pp) matp[,i] <- (matp[,i]-mean(matp[,i]))/(sqrt(n-1)*sd(matp[,i])) } if ((!is.na(supvar[1]) & (supvar[1]=="no")) & (is.na(supsubj[1]) | (supsubj[1]!="no"))) { svar <- 0 ssubj <- 1 p <- dim(datafile)[2] supsubj <- as.matrix(supsubj) ppp <- dim(supsubj)[2] interm <- cbind(datafile,supsubj) interm <- na.omit(interm) mat <- as.matrix(interm[,1:p]) supsubj <- as.matrix(interm[,(p+1):(p+ppp)]) n <- dim(mat)[1] for(i in 1:p) mat[,i] <- (mat[,i]-mean(mat[,i]))/(sqrt(n-1)*sd(mat[,i])) } if ((!is.na(supvar[1]) & (supvar[1]=="no")) & (!is.na(supsubj[1]) & (supsubj[1]=="no"))) { svar <- 0 ssubj <- 0 p <- dim(datafile)[2] mat <- as.matrix(na.omit(datafile)) n <- dim(mat)[1] for(i in 1:p) mat[,i] <- (mat[,i]-mean(mat[,i]))/(sqrt(n-1)*sd(mat[,i])) } #********************************************** # definitions #********************************************** n <- dim(mat)[1] names <- matrix(nrow=p+2) one2 <- matrix(1, nrow=p) load <- matrix(nrow=p, ncol=p) loady <- matrix(nrow=p+2, ncol=p) #*********************************************** # correlations and loadings #*********************************************** names[1:p] <- attributes(datafile)$names un <- matrix(1, ncol=p, nrow=n) one <- matrix(1, nrow=n) matcorp <- cor(mat) decomp <- eigen(matcorp, symmetric=TRUE) eigenval <- decomp$values eigenvect <- decomp$vectors eigenval <- pmax(0.00001*one2, eigenval) load <- eigenvect*sqrt(kronecker(one2,t(eigenval))) loady[1:p,] <- load[1:p,1:p] #*********************************************** # supplementary variables #*********************************************** if(svar==1) { one3 <- matrix(1,nrow=pp) namesp <- matrix(nrow=pp) namesp[1:pp] <- namesupvar loadp <- matrix(nrow=pp, ncol=p) loadyp <- matrix(nrow=pp, ncol=p) loadp <- (t(matp)%*%mat%*%eigenvect)*kronecker(one3,t(1/sqrt(eigenval))) loadyp[1:pp,] <- loadp[1:pp,1:p] } #*********************************************** # supplementary subjects #*********************************************** if(ssubj==1) { nn <- ppp mod <- matrix(nrow=nn) for(i in 1:nn) { factsub <- as.factor(supsubj[,i]) mod[i] <- nlevels(factsub) } nmod <- sum(mod) names2 <- matrix(nrow=nmod) mat2 <- matrix(nrow=nmod, ncol=p) load2 <- matrix(nrow=nmod, ncol=p) loady2 <- matrix(nrow=nmod, ncol=p) compt <- 0 mat <- as.data.frame(mat) for(i in 1:nn) for(j in 1:mod[i]) { compt <- compt+1 factsub <- as.factor(supsubj[,i]) names2[compt] <- paste(namesupsubj[i],levels(factsub)[j]) mat2[compt,] <- sapply(split(mat,factsub)[[j]],mean) } load2 <- (mat2%*%eigenvect)*sqrt(n/p) loady2[1:nmod,] <- load2[1:nmod,1:p] } #**************************************************************************** #**************************************************************************** # Drawing #**************************************************************************** #**************************************************************************** #****************** two more points for a non truncated drawing ******** loady[p+1,] <- 1.5 loady[p+2,] <- -1.5 names[p+1] <- "." names[p+2] <- "." #**************************************************************************** #******************************** plots *********************************** #**************************************************************************** par(pty="s") if (is.na(supsubj[1]) | (supsubj[1]!="no")) { par(mfrow=c(1,2)) par(oma=c(0,0,0,0)) par(mar=c(0,0,0,0)) dimmax <- max(abs(loady2[,dimx])+0.2,abs(loady2[,dimy])+0.2,1.2) #*************** new axes (centered) ******** par(mar=rep(0,4)) plot(x=c(-1*dimmax+0.1,dimmax-0.1),y=c(0,0),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1*dimmax,dimmax),ylim=c(-1*dimmax,dimmax),col="grey") lines(x=c(0,0),y=c(-1*dimmax+0.1,dimmax-0.1),type="l",col="grey") #****************** plot of correlations *********** symbols(x=loady2[,dimx], y=loady2[,dimy], squares=rep(.03,length(loady2[,dimy])), inches=FALSE, add=TRUE,fg="blue",bg="blue") #***************** name plot ***************** text(x=loady2[,dimx],y=loady2[,dimy]-0.05,labels=names2,cex=cx) } #*************** new axes (centered) ******** if (!is.na(supsubj[1]) & (supsubj[1]=="no")) {par(mar=rep(0,4))} plot(x=c(-1.1,1.1),y=c(0,0),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1.2,1.2),ylim=c(-1.2,1.2),col="grey") lines(x=c(0,0),y=c(-1.1,1.1),type="l",col="grey") #************** circle (r=1)**************** symbols(x=0, y=0, circles=1, inches=FALSE, add=TRUE, lwd=2) #****************** plot of correlations *********** symbols(x=loady[,dimx], y=loady[,dimy], circles=rep(.01*cx*2,length(loady[,dimy])), inches=FALSE, add=TRUE,fg="grey",bg="red") #***************** name plot ***************** text(x=loady[,dimx],y=loady[,dimy]-0.05,labels=names,cex=cx) if (is.na(supvar[1]) | (supvar[1]!="no")) { #****************** plot of correlations sup var *********** symbols(x=loadyp[,dimx], y=loadyp[,dimy], circles=rep(.01*cx*2,length(loadyp[,dimy])), inches=FALSE, add=TRUE,fg="grey",bg="green") #***************** name plot ***************** text(x=loadyp[,dimx],y=loadyp[,dimy]-0.05,labels=namesp,cex=cx) } #****************** name of factors *********** pf1 <- floor(100*eigenval[dimx]/sum(eigenval)) pf2 <- floor(100*eigenval[dimy]/sum(eigenval)) annotate1 <- paste("x = F",dimx," : ",pf1,"% var", sep="") annotate2 <- paste("y = F",dimy," : ",pf2,"% var", sep="") text(x=1,y=1,labels=annotate1,cex=cx) text(x=1,y=1-0.05*cx*2,labels=annotate2,cex=cx) par(mfrow=c(1,1)) #**************************************************************************** #******************************** end of plots **************************** #**************************************************************************** } psy/R/fpca.R0000644000176200001440000003143314227302114012322 0ustar liggesusers#' @importFrom stats na.omit #' @importFrom stats cor #' @importFrom stats model.extract #' @importFrom stats var #' @importFrom graphics lines #' @importFrom graphics par #' @importFrom graphics symbols #' @importFrom graphics text #' @export fpca <- function(formula=NULL, y=NULL, x=NULL,data, cx=0.75, pvalues="No", partial="Yes", input="data", contraction="No", sample.size=1) #********************************************** # # datafile is the name of the dataframe that contains the data # y is the number of the column related to the dependant variable (ex: y = 6) # x is the vector of the number of the columns related to the independant variables # (ex: x = c(1,2,3,4,5,7,8,9,10) # # option: pvalues is a vector of determined pvalues that will replace the correlations # to the focus variable (if pvalues != "No" then partial = "No") (pvalues="No" by default) # # q <- 1 (q>1 for a future version) # # option: partial is an option to present a focused PCA that is a simple renormalization # of conventional PCA (partial="Yes" by default) # # option: input indicates wether the input correspond to data (default) or to a # correlation matrix (if input != "data" then partial = "No") (input="data" by default) # # option: contraction change the appearance of the figure # (if contraction="Yes" then pvalues="No") (contraction="No" by default) # # option: sample.size, size of the sample when input!="data" # #********************************************** { if (is.null(y)) { call <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) Y <- model.extract(mf, "response") z=dim(mf)[2] x <- mf[,2:z] namey=names(mf)[1] namex=names(mf[,2:z]) } datafile<-data if (pvalues[1]!="No") partial <- "No" #********************************************** # definitions #********************************************** p <- ifelse(is.null(y),ncol(x),length(x)) if (input=="data") n <-ifelse(is.null(y),nrow(x),dim(datafile)[1]) else n <- sample.size if (input=="data") {if(is.null(y)) mat<-mf else mat<-matrix(ncol=p+1, nrow=n) } names <- matrix(nrow=p+2) one2 <- matrix(1, nrow=p) load <- matrix(nrow=p, ncol=p) norm <- matrix(nrow=p, ncol=p-1) loadx <- matrix(nrow=p+2, ncol=p-1) loadyp <- matrix(nrow=p+2, ncol=p-1) loadym <- matrix(nrow=p+2, ncol=p-1) loady <- matrix(nrow=p+2, ncol=p-1) q <- 1 if (input=="data") { #*********************************************** # missing values are NOT omitted # input x (independant) and y (dependant variable) # x and y are normalized # correlations of (x,y) #*********************************************** if (is.null(formula)){ q <- min(q,p-1) mat[,1] <- datafile[,c(y)] namey<-ifelse(is.numeric(y),attributes(datafile)$names[y],y) for(i in 1:p) { mat[,i+1] <- datafile[,c(x[i])] names[i] <- ifelse(is.numeric(x[i]),attributes(datafile)$names[x[i]],x[i]) } mat <- na.omit(mat) #following command used to work for data.frames containing NA, didn't work if no NAs } n <- dim(mat)[1] xv <- matrix(ncol=p, nrow=n) yv <- matrix(nrow=n) un <- matrix(1, ncol=p, nrow=n) one <- matrix(1, nrow=n) for(i in 1:p) { xv[,i] <- (mat[,i+1]-mean(mat[,i+1]))/(sqrt(var(mat[,i+1]))*sqrt(n-1)) } yv <- mat[,1] yv <- (yv-mean(yv))/(sqrt(var(yv))*sqrt(n-1)) matcor <- cor(mat) } else { if (is.null(formula)){ namey<-ifelse(is.numeric(y),attributes(datafile)$names[y],y) for(i in 1:p) { names[i] <- ifelse(is.numeric(x[i]),attributes(datafile)$names[x[i]],x[i]) } matcor <- matrix(nrow=p+1,ncol=1) matcor[1,1] <- 1 matcor[2:(p+1),1] <- datafile[x,y] matcorp <- datafile[x,x] } else { namex=names(mf[names(mf)!=namey]) X<-as.data.frame(t(x))[,c(namex)] matcor<- t(as.data.frame(t(Y))[,c(namey,namex)]) matcorp <- X } decomp <- eigen(matcorp, symmetric=TRUE) eigenval <- decomp$values eigenvect <- decomp$vectors eigenval <- pmax(0*one2, eigenval) load <- eigenvect*sqrt(kronecker(one2,t(eigenval))) } #*********************************************** #traditional FPCA #*********************************************** if (pvalues[1]=="No") { if (input=="data") { if (partial=="Yes") { #*********************************************** # xp is x related to y (projection on y orthog) #*********************************************** scal <- t(t(xv)%*%yv) xp <- xv - (un*yv)*kronecker(one,scal) }else { xp <- xv } #*********************************************** # decomposition of the covariance matrix of xp #*********************************************** #matcorp <- var(xp, na.method="omit") matcorp <- var(xp) decomp <- eigen(matcorp, symmetric=TRUE) eigenval <- decomp$values eigenvect <- decomp$vectors eigenval <- pmax(0*one2, eigenval) load <- eigenvect*sqrt(kronecker(one2,t(eigenval))) } #*********************************************** # renormalization of the loadings #*********************************************** if (contraction=="No") { for(i in 1:p) { for(j in 1:p-1) { norm[i,j] <- sqrt(load[i,1]*load[i,1] + load[i,j+1]*load[i,j+1]) loadx[i,j] <- load[i,1]*sqrt(2-2*abs(matcor[i+1,1]))/norm[i,j] if (matcor[i+1,1] > 0) loadyp[i,j] <- load[i,j+1]*sqrt(2-2*matcor[i+1,1])/norm[i,j] else loadym[i,j] <- load[i,j+1]*sqrt(2+2*matcor[i+1,1])/norm[i,j] loady[i,j] <- load[i,j+1]*sqrt(2-2*abs(matcor[i+1,1]))/norm[i,j] - 0.05 } } } if (contraction!="No") { for(i in 1:p) { for(j in 1:p-1) { norm[i,j] <- sqrt(load[i,1]*load[i,1] + load[i,j+1]*load[i,j+1]) loadx[i,j] <- 1.5*load[i,1]*(1-abs(matcor[i+1,1]))/norm[i,j] if (matcor[i+1,1] > 0) loadyp[i,j] <- 1.5*load[i,j+1]*(1-matcor[i+1,1])/norm[i,j] else loadym[i,j] <- 1.5*load[i,j+1]*(1+matcor[i+1,1])/norm[i,j] loady[i,j] <- 1.5*load[i,j+1]*(1-abs(matcor[i+1,1]))/norm[i,j] - 0.05 } } } } #*********************************************** #pvalued FPCA #*********************************************** if (pvalues[1]!="No") { matcorp <- var(xv, na.rm=TRUE) decomp <- eigen(matcorp, symmetric=TRUE) eigenval <- decomp$values eigenvect <- decomp$vectors eigenval <- pmax(0*one2, eigenval) load <- eigenvect*sqrt(kronecker(one2,t(eigenval))) #*********************************************** # renormalization of the loadings (1.5 is for drawing convenience) #*********************************************** pnorm <- matrix(nrow=p) pvaluesabs <- abs(pvalues) pvaluesabs <- pmax(pvaluesabs,0.001) for (i in 1:p) if (pvaluesabs[i]==0) pnorm[i] <- 0 else pnorm[i] <- pvaluesabs[i]^(log(pvaluesabs[i])/-50) for(i in 1:p) { for(j in 1:p-1) { norm[i,j] <- sqrt(load[i,1]*load[i,1] + load[i,j+1]*load[i,j+1]) loadx[i,j] <- 1.5*load[i,1]*pnorm[i]/norm[i,j] if (pvalues[i] > 0) loadyp[i,j] <- 1.5*load[i,j+1]*pnorm[i]/norm[i,j] else loadym[i,j] <- 1.5*load[i,j+1]*pnorm[i]/norm[i,j] loady[i,j] <- 1.5*load[i,j+1]*pnorm[i]/norm[i,j] - 0.05 } } } #**************************************************************************** #**************************************************************************** # Drawing #**************************************************************************** #**************************************************************************** #****************** two more points for a non truncated drawing ******** if (is.null(formula)){ for(j in 1:p-1) { loadx[p+1,j] <- 1.5 loady[p+1,j] <- 1.5 loadx[p+2,j] <- -1.5 loady[p+2,j] <- -1.5 } names[p+1] <- "." names[p+2] <- "." } else {names=namex} #**************************************************************************** #******************************** q plots *********************************** #**************************************************************************** j <- 1 { #*************** new axes (centered) ******** par(pty="s") par(mar=rep(0,4)) plot(x=c(-1.6,1.6),y=c(0,0),type="l",axes=FALSE,frame.plot=FALSE,ann=FALSE,xlim=c(-1.7,1.7),ylim=c(-1.7,1.7),col="grey") lines(x=c(0,0),y=c(-1.6,1.6),type="l",col="grey") #******************************************************** #traditionnal FPCA #******************************************************** if (pvalues[1]=="No") { #************** circles (r=0, r=0.2, ...)**************** radius <- matrix(nrow=5) if (contraction=="No") { radius[1] <- 1.414 radius[2] <- 1.265 radius[3] <- 1.095 radius[4] <- 0.894 radius[5] <- 0.632 } else { radius[1] <- 1.5 radius[2] <- 1.2 radius[3] <- 0.9 radius[4] <- 0.6 radius[5] <- 0.3 } symbols(x=0, y=0, circles=radius[1], inches=FALSE, add=TRUE, lwd=2) symbols(x=c(0,0,0,0), y=c(0,0,0,0), circles=radius[2:5], inches=FALSE, add=TRUE, lwd=1,fg="grey") } #******************************************************** #pvalued FPCA #******************************************************** if (pvalues[1]!="No") { #************** circles (p=0.1, p=0.05, ...)**************** symbols(x=0, y=0, circles=1.5, inches=FALSE, add=TRUE, lwd=2) symbols(x=c(0,0,0), y=c(0,0,0), circles=c(1.35,1,.557), inches=FALSE, add=TRUE, lwd=1,fg="grey") symbols(x=0, y=0, circles=1.254, inches=FALSE, add=TRUE, lwd=1,fg="red") } #************** dependant variable ***************** symbols(x=0, y=0, circles=0.03, bg="black", inches=FALSE, add=TRUE, lwd=1) #******************************************************** #traditionnal FPCA #******************************************************** if (pvalues[1]=="No") { #************** circle with p = 5% ***************** if (input=="data") { e <- exp(1.96*2/sqrt(n-3)) }else { if (sample.size<4) e <- 0 else e <- exp(1.96*2/sqrt(sample.size-3)) } if (contraction=="No") rayonsign <- sqrt(2-2*(e-1)/(1+e)) else rayonsign <- (1-(e-1)/(1+e))*1.5 symbols(x=0, y=0, circles=rayonsign, inches=FALSE, add=TRUE, lwd=1, fg="red") #**************** legends : r=0, r=0.2, ... **************** text(x=c(rep(0.01,5)),y=radius+.04, labels=c("r = 0","r = 0.2","r = 0.4","r = 0.6","r = 0.8"),cex=0.5) } #******************************************************** #pvalued FPCA #******************************************************** if (pvalues[1]!="No") { #**************** legends : p=0, p=0.1, ... **************** text(x=c(rep(0.01,5)),y=c(.563,.982,1.239,1.335,1.48), labels=c("p < 0.001","p = 0.01","p = 0.05","p = 0.1","p = 1"),cex=cx) } #****************** plot of positive correlations *********** symbols(x=loadx[,j], y=loadyp[,j], circles=rep(.03,length(loadyp[,j])), inches=FALSE, add=TRUE,fg="blue",bg="green") #****************** plot of negative correlations *********** symbols(x=loadx[,j], y=loadym[,j], circles=rep(.03,length(loadym[,j])), inches=FALSE, add=TRUE,fg="red",bg="yellow") #***************** names ****************** text(x=-0.18,y=-.12,labels=namey, cex=cx+0.25)#focus variable text(x=loadx[,j],y=loady[,j],labels=names,cex=cx)#other variables #****************** name of factors *********** #annotate <- paste("Factors : 1,", j, sep="") #text(x=1,y=1.3,labels=annotate,cex=cx) #**************************************************************************** #******************************** end of q plots **************************** #**************************************************************************** } #******************* end ********************** } psy/R/icc.R0000644000176200001440000000137014227302613012150 0ustar liggesusers#' @importFrom stats anova #' @importFrom stats lm #' @export icc <- function(data) { score <- as.matrix(na.omit(data)) n <- dim(score)[1] p <- dim(score)[2] data2 <- matrix(ncol=3,nrow=p*n) attr(score,"dim") <- c(p*n,1) data2[,1] <- score subject <- as.factor(rep(1:n,p)) rater <- as.factor(rep(1:p,each=n)) data2[,2] <- subject data2[,3] <- rater ms <- anova(lm(score~subject+rater))[[3]] names(ms) <- NULL v.s <- (ms[1]-ms[3])/p v.r <- (ms[2]-ms[3])/n res <- ms[3] icc.a <- v.s/(v.s+v.r+res) icc.c <- v.s/(v.s+res) result <- list("nb.subjects"=n,"nb.raters"=p,"subject.variance"=v.s,"rater.variance"=v.r,"residual"=res,"icc.consistency"=icc.c,"icc.agreement"=icc.a) result } psy/R/cronbach.R0000644000176200001440000000044514227301265013175 0ustar liggesusers#' @importFrom stats na.omit #' @export cronbach <- function(v1) { v1 <- na.omit(v1) nv1 <- ncol(v1) pv1 <- nrow(v1) alpha <- (nv1/(nv1-1))*(1 - sum(apply(v1,2,var))/var(apply(v1,1,sum))) resu <- list("sample.size"=pv1,"number.of.items"=nv1,"alpha"=alpha) resu } psy/MD50000644000176200001440000000251214230351742011376 0ustar liggesusers07ef5d1a963be48723d1e72fe4ad6768 *DESCRIPTION 25aed251bba87f67d8afd03ee328d9ea *NAMESPACE 1ed987fdd524eebc76b17ca9e5c8b6d6 *R/ckappa.R 09aa98a6a3f7ff9ff999d7fb564c2b01 *R/cronbach.R 5dab52ac34fff32772233295714ecbb9 *R/fpca.R 0fbee99ead4dc8af547b828ba314be8a *R/icc.R f425decf3ceac0cffd042073e6e06da9 *R/lkappa.R c25f0b91743c7c9ea954af0ef37c111d *R/mdspca.R ab7a8183f4e64e7886985324390b46d2 *R/mtmm.R 01a74d26b71f7d20937b5c355f6b3ec8 *R/screeplot.R 5ef03b4546057349fc1d0699afeff896 *R/sphpca.R a6cdac03d299254f73aa35497760ae37 *R/wkappa.R 10c97594b730da607a1f409f77216915 *data/ehd.rda 04d5b74fa8b1143037aab7c44e18d238 *data/expsy.rda 7163afd608ec35e19e6615872df9b8ac *data/sleep.rda a5c611ab4e39ceb2a9cd2c78e19e7a4b *man/ckappa.Rd e4c74df714f913326c8b2b97c4e1d9f1 *man/cronbach.Rd b702067118afdccbb99b271a5eea03ef *man/ehd.Rd 560bcc6a41f344be15234016f0743e9f *man/expsy.Rd 159a192fb22f368fcc7955820dbd9e55 *man/fpca.Rd 05b9682daade47e80c44c3c73cf91971 *man/icc.Rd 95b0b099a137149c36d75dc75c9e59d8 *man/lkappa.Rd 9a936be1d9d11777d93b66e531ca0c67 *man/mdspca.Rd cf9cf218c658302a1d5b0d47c60102a1 *man/mtmm.Rd ab1da1276eed0058925063a265ec3fd5 *man/psy-package.Rd 7355503a3078505369d9550bae2fd94b *man/scree.plot.Rd c764c7b2cdca8701936b1865b35506a5 *man/sleep.Rd d15d763bc829c2341741e1ec434295ef *man/sphpca.Rd b142de2e32d268d9ccd569865a99f76d *man/wkappa.Rd